code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.optim
import com.intel.analytics.bigdl.dataset.{DataSet, Sample, SampleToMiniBatch}
import com.intel.analytics.bigdl.models.lenet.LeNet5
import com.intel.analytics.bigdl.nn.CrossEntropyCriterion
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Engine
import com.intel.analytics.bigdl.utils.RandomGenerator._
import com.intel.analytics.bigdl._
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
class ValidatorSpec extends FlatSpec with Matchers with BeforeAndAfter{
var sc: SparkContext = null
val nodeNumber = 1
val coreNumber = 1
before {
Engine.init(nodeNumber, coreNumber, true)
val conf = new SparkConf().setMaster("local[1]").setAppName("validator")
sc = new SparkContext(conf)
}
after {
if (sc != null) {
sc.stop()
}
}
private def processPath(path: String): String = {
if (path.contains(":")) {
path.substring(1)
} else {
path
}
}
"DistriValidator" should "be correct" in {
RNG.setSeed(100)
val tmp = new Array[Sample[Float]](100)
var i = 0
while (i < tmp.length) {
val input = Tensor[Float](28, 28).fill(0.8f)
val label = Tensor[Float](1).fill(1.0f)
tmp(i) = Sample(input, label)
i += 1
}
val model = LeNet5(classNum = 10)
val dataSet = DataSet.array(tmp, sc).transform(SampleToMiniBatch(1))
val validator = Validator(model, dataSet)
val result = validator.test(Array(new Top1Accuracy[Float](), new Top5Accuracy[Float](),
new Loss[Float](CrossEntropyCriterion[Float]())))
result(0)._1 should be (new AccuracyResult(0, 100))
result(1)._1 should be (new AccuracyResult(100, 100))
result(2)._1 should be (new LossResult(230.67628f, 100))
result(0)._1.result()._1 should be (0f)
result(1)._1.result()._1 should be (1f)
result(2)._1.result()._1 should be (2.306763f+-0.000001f)
}
"LocalValidator" should "be correct" in {
RNG.setSeed(100)
val tmp = new Array[Sample[Float]](100)
var i = 0
while (i < tmp.length) {
val input = Tensor[Float](28, 28).fill(0.8f)
val label = Tensor[Float](1).fill(1.0f)
tmp(i) = Sample(input, label)
i += 1
}
val model = LeNet5(classNum = 10)
val dataSet = DataSet.array(tmp).transform(SampleToMiniBatch(1))
val validator = Validator(model, dataSet)
val result = validator.test(Array(new Top1Accuracy[Float](), new Top5Accuracy[Float](),
new Loss[Float](CrossEntropyCriterion[Float]())))
result(0)._1 should be (new AccuracyResult(0, 100))
result(1)._1 should be (new AccuracyResult(100, 100))
result(2)._1 should be (new LossResult(230.67628f, 100))
}
}
| jenniew/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/optim/ValidatorSpec.scala | Scala | apache-2.0 | 3,375 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.utils
import scala.scalajs.runtime
object Platform {
/** Returns `true` if and only if the code is executing on a JVM.
* Note: Returns `false` when executing on any JS VM.
*/
final val executingInJVM = false
final val executingInJVMOnJDK6 = false
final val executingInJVMOnJDK7OrLower = false
// Members that are only accessible from testSuite/js
// (i.e. do no link on the JVM).
def areTypedArraysSupported: Boolean =
runtime.Bits.areTypedArraysSupported
def executingInRhino: Boolean = sysProp("rhino")
def executingInNodeJS: Boolean = sysProp("nodejs")
def executingInPhantomJS: Boolean = sysProp("phantomjs")
def typedArrays: Boolean = sysProp("typedarray")
def isInFastOpt: Boolean = sysProp("fastopt-stage")
def isInFullOpt: Boolean = sysProp("fullopt-stage")
def isInProductionMode: Boolean = sysProp("production-mode")
def isInDevelopmentMode: Boolean = sysProp("development-mode")
def hasCompliantAsInstanceOfs: Boolean = sysProp("compliant-asinstanceofs")
def hasCompliantModule: Boolean = sysProp("compliant-moduleinit")
def hasStrictFloats: Boolean = sysProp("strict-floats")
private def sysProp(key: String): Boolean =
System.getProperty("scalajs." + key, "false") == "true"
}
| mdedetrich/scala-js | test-suite/js/src/test/scala/org/scalajs/testsuite/utils/Platform.scala | Scala | bsd-3-clause | 1,808 |
package info.armado.ausleihe.client.remote.services
import info.armado.ausleihe.client.transport.dataobjects.inuse._
import info.armado.ausleihe.client.transport.requests.IssueIdentityCardRequestDTO
import info.armado.ausleihe.client.transport.results._
import info.armado.ausleihe.client.util.DTOExtensions._
import info.armado.ausleihe.database.access.{EnvelopeDao, IdentityCardDao, LendIdentityCardDao}
import info.armado.ausleihe.database.barcode._
import info.armado.ausleihe.database.entities.{Envelope, IdentityCard, LendIdentityCard}
import javax.enterprise.context.RequestScoped
import javax.inject.Inject
import javax.transaction.Transactional
import javax.ws.rs._
import javax.ws.rs.core.MediaType
@Path("/issue")
@RequestScoped
class IssueIdentityCardsService {
@Inject var identityCardDao: IdentityCardDao = _
@Inject var envelopeDao: EnvelopeDao = _
@Inject var lendIdentityCardDao: LendIdentityCardDao = _
def findIdentityCard(
identityCardBarcode: Barcode
): Option[Either[LendIdentityCard, IdentityCard]] =
lendIdentityCardDao.selectCurrentByIdentityCardBarcode(identityCardBarcode) match {
case Some(lendIdentityCard) => Some(Left(lendIdentityCard))
case None =>
identityCardDao.selectActivatedByBarcode(identityCardBarcode) match {
case Some(identityCard) => Some(Right(identityCard))
case None => None
}
}
def findEnvelope(envelopeBarcode: Barcode): Option[Either[LendIdentityCard, Envelope]] =
lendIdentityCardDao.selectCurrentByEnvelopeBarcode(envelopeBarcode) match {
case Some(lendIdentityCard) => Some(Left(lendIdentityCard))
case None =>
envelopeDao.selectActivatedByBarcode(envelopeBarcode) match {
case Some(envelopeBarcode) => Some(Right(envelopeBarcode))
case None => None
}
}
@POST
@Consumes(Array(MediaType.APPLICATION_XML))
@Produces(Array(MediaType.APPLICATION_XML))
@Path("/identitycard")
@Transactional
def issueIdentityCard(issueIdentityCardRequest: IssueIdentityCardRequestDTO): AbstractResultDTO =
issueIdentityCardRequest match {
case IssueIdentityCardRequestDTO(Some(identityCardBarcode), Some(envelopeBarcode)) =>
(ValidateBarcode(identityCardBarcode), ValidateBarcode(envelopeBarcode)) match {
// both given barcodes (the identitycard barcode and the envelope barcode) are valid
case (ValidBarcode(identityCardBarcode), ValidBarcode(envelopeBarcode)) =>
(findIdentityCard(identityCardBarcode), findEnvelope(envelopeBarcode)) match {
// both the identitycard and the envelope are currently not issued
case (Some(Right(identityCard)), Some(Right(envelope))) => {
lendIdentityCardDao.issueIdentityCard(identityCard, envelope)
IssueIdentityCardSuccessDTO(identityCard.toIdentityCardDTO, envelope.toEnvelopeDTO)
}
case (Some(Left(lic @ LendIdentityCard(_, _, _, _, _))), _) =>
LendingEntityInUseDTO(
lic.toIdentityCardDTO,
IdentityCardInUseDTO(lic.toEnvelopeDTO, lic.toGameDTO)
)
case (_, Some(Left(lic @ LendIdentityCard(_, _, _, _, _)))) =>
LendingEntityInUseDTO(
lic.toEnvelopeDTO,
EnvelopeInUseDTO(lic.toIdentityCardDTO, lic.toGameDTO)
)
case (None, _) => LendingEntityNotExistsDTO(identityCardBarcode.toString)
case (_, None) => LendingEntityNotExistsDTO(envelopeBarcode.toString)
}
case (ValidBarcode(_), InvalidBarcode(_)) => IncorrectBarcodeDTO(envelopeBarcode)
case (InvalidBarcode(_), _) => IncorrectBarcodeDTO(identityCardBarcode)
case _ => throw new BadRequestException("Invalid input request")
}
// wrong input
case _ => throw new BadRequestException()
}
}
| Spielekreis-Darmstadt/lending | lending-client-backend/src/main/scala/info/armado/ausleihe/client/remote/services/IssueIdentityCardsService.scala | Scala | apache-2.0 | 3,987 |
import sbt._
object Dependencies {
val resolutionRepos = Seq( )
val Http4sVersion = "0.18.12"
val CirceVersion = "0.9.3"
def compile (deps: ModuleID*): Seq[ModuleID] = deps map (_ % "compile")
def provided (deps: ModuleID*): Seq[ModuleID] = deps map (_ % "provided")
def test (deps: ModuleID*): Seq[ModuleID] = deps map (_ % "test")
def runtime (deps: ModuleID*): Seq[ModuleID] = deps map (_ % "runtime")
def container (deps: ModuleID*): Seq[ModuleID] = deps map (_ % "container")
// include for core
// JSON and BSON encodings
val jackson = "com.fasterxml.jackson.core" % "jackson-core" % "2.7.4"
val jacksonData = "com.fasterxml.jackson.core" % "jackson-databind" % "2.7.4"
val jacksonBson = "de.undercouch" % "bson4jackson" % "2.7.0"
val jacksonJsr353 = "com.fasterxml.jackson.datatype" % "jackson-datatype-jsr353" % "2.7.4"
val http4sBlazeServer = "org.http4s" %% "http4s-blaze-server" % Http4sVersion
val http4sBlazeClient = "org.http4s" %% "http4s-blaze-client" % Http4sVersion
val http4sCirce = "org.http4s" %% "http4s-circe" % Http4sVersion
val http4sDsl = "org.http4s" %% "http4s-dsl" % Http4sVersion
val http4sClient = "org.http4s" %% "http4s-client" % Http4sVersion
val circeParser = "io.circe" %% "circe-parser" % CirceVersion
val http4s = Seq(http4sBlazeServer, http4sDsl, http4sClient)
// Logging Config and Metrics
val slf4j = "org.slf4j" % "slf4j-log4j12" % "1.7.21"
val slf4jAPI = "org.slf4j" % "slf4j-api" % "1.7.21"
val lbConfig = "com.typesafe" % "config" % "1.3.1"
val micrometer = "io.micrometer" % "micrometer-core" % "1.0.4"
// Various dependencies for implementations of Event and Binder servers
// EventSource and/or Sink implementations
val nats = "io.nats" % "java-nats-streaming" % "0.4.1"
val artemis = "org.apache.activemq" % "artemis-jms-client" % "2.4.0"
val kafka = "org.apache.kafka" % "kafka-clients" % "1.0.0"
val hbaseCommon = "org.apache.hbase" % "hbase-common" % "2.0.1"
val hbaseClient = "org.apache.hbase" % "hbase-client" % "2.0.1"
val hadoop = "org.apache.hadoop" % "hadoop-common" % "2.2.0"
// Binder implementations
val lmdb = "org.lmdbjava" % "lmdbjava" % "0.6.0"
val postgres = "org.postgresql" % "postgresql" % "42.1.4"
// REST framework
val vertxWeb = "io.vertx" % "vertx-web" % "3.4.2"
val vertx = "io.vertx" % "vertx-core" % "3.4.2"
val vertxAuth = "io.vertx" % "vertx-auth-common" % "3.4.2"
// Java Test frameworks
val junit = "junit" % "junit" % "4.12"
val junitIntf = "com.novocode" % "junit-interface" % "0.11"
val vertxUnit = "io.vertx" % "vertx-unit" % "3.4.2"
val restAssured = "io.rest-assured" % "rest-assured" % "3.0.6"
val commonsIo = "commons-io" % "commons-io" % "2.6"
// Scala Test
val scalatest = "org.scalatest" %% "scalatest" % "3.0.5"
}
| Tesco/mewbase | project/Dependencies.scala | Scala | mit | 2,952 |
package org.jetbrains.plugins.scala
package annotator
import com.intellij.codeInspection.ProblemHighlightType
import com.intellij.lang.annotation.AnnotationHolder
import org.jetbrains.plugins.scala.annotator.quickfix.ReportHighlightingErrorQuickFix
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructor, ScPrimaryConstructor}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScConstrBlock
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.ScTypePresentation
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
trait ConstructorAnnotator {
// TODO duplication with application annotator.
def annotateConstructor(constructor: ScConstructor, holder: AnnotationHolder) {
//in case if constructor is function
constructor.reference match {
case None => return
case _ =>
}
val resolved = constructor.reference.toList.flatMap(_.resolveAllConstructors)
resolved match {
case List() =>
holder.createErrorAnnotation(constructor.typeElement, "Cannot resolve constructor")
case List(r: ScalaResolveResult) =>
val missed = for (MissedValueParameter(p) <- r.problems) yield p.name + ": " + p.paramType.presentableText
val argsElement = constructor.args.getOrElse(constructor.typeElement)
if (missed.nonEmpty)
holder.createErrorAnnotation(argsElement,
"Unspecified value parameters: " + missed.mkString(", "))
r.problems.foreach {
case ExcessArgument(argument) =>
holder.createErrorAnnotation(argument, "Too many arguments for constructor")
case TypeMismatch(expression, expectedType) =>
if (expression != null)
for (t <- expression.getType(TypingContext.empty)) {
//TODO show parameter name
val (expectedText, actualText) = ScTypePresentation.different(expectedType, t)
val message = ScalaBundle.message("type.mismatch.expected.actual", expectedText, actualText)
val annotation = holder.createErrorAnnotation(expression, message)
annotation.registerFix(ReportHighlightingErrorQuickFix)
}
else {
//TODO investigate case when expression is null. It's possible when new Expression(ScType)
}
case MissedValueParameter(_) => // simultaneously handled above
case UnresolvedParameter(_) => // don't show function inapplicability, unresolved
case MalformedDefinition() =>
holder.createErrorAnnotation(constructor.typeElement, "Constructor has malformed definition")
case ExpansionForNonRepeatedParameter(expression) =>
holder.createErrorAnnotation(expression, "Expansion for non-repeated parameter")
case PositionalAfterNamedArgument(argument) =>
holder.createErrorAnnotation(argument, "Positional after named argument")
case ParameterSpecifiedMultipleTimes(assignment) =>
holder.createErrorAnnotation(assignment.getLExpression, "Parameter specified multiple times")
case WrongTypeParameterInferred => //todo: ?
case ExpectedTypeMismatch => //will be reported later
case DefaultTypeParameterMismatch(expected, actual) => constructor.typeArgList match {
case Some(tpArgList) =>
val message: String = ScalaBundle.message("type.mismatch.default.args.expected.actual", expected, actual)
holder.createErrorAnnotation(tpArgList, message)
case _ =>
}
case _ => holder.createErrorAnnotation(argsElement, "Not applicable." /* TODO + signatureOf(f)*/)
}
case results =>
holder.createErrorAnnotation(constructor.typeElement, "Cannot resolve constructor")
}
}
def annotateAuxiliaryConstructor(constr: ScConstrBlock, holder: AnnotationHolder) {
val selfInvocation = constr.selfInvocation
selfInvocation match {
case Some(self) =>
self.bind match {
case Some(c: ScPrimaryConstructor) => //it's ok
case Some(fun: ScFunction) =>
//check order
if (fun.getTextRange.getStartOffset > constr.getTextRange.getStartOffset) {
val annotation = holder.createErrorAnnotation(self,
ScalaBundle.message("called.constructor.definition.must.precede"))
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
}
case _ =>
}
case None =>
constr.getContainingFile match {
case file: ScalaFile if !file.isCompiled =>
val annotation = holder.createErrorAnnotation(constr,
ScalaBundle.message("constructor.invocation.expected"))
annotation.setHighlightType(ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
case _ => //nothing to do in decompiled stuff
}
}
}
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/annotator/ConstructorAnnotator.scala | Scala | apache-2.0 | 5,176 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.component.spray
import java.util.concurrent.atomic.AtomicInteger
import com.webtrends.harness.app.HarnessActor.SystemReady
import com.webtrends.harness.component.spray.client.SprayClient
import com.webtrends.harness.component.{ComponentStarted, Component}
import com.webtrends.harness.utils.ConfigUtil
import spray.can.server.ServerSettings
case class HttpRunning()
case class WebSocketRunning()
class SprayManager(name:String) extends Component(name)
with SprayHttpServer
with SprayClient
with SprayWebSocketServer {
val spSettings = ServerSettings(config)
val internalHttpPort = ConfigUtil.getDefaultValue(s"$name.http-port", config.getInt, 8080)
val externalHttpPortPath: String = s"$name.http-external-port"
val externalHttpPort = config.hasPath(externalHttpPortPath) match {
case true => Some(config.getInt(externalHttpPortPath))
case false => None
}
val websocketPort = ConfigUtil.getDefaultValue(s"$name.websocket-port", config.getInt, 8081)
var rCount = new AtomicInteger(0)
override def receive = super.receive orElse {
case HttpRunning =>
checkRunning()
case WebSocketRunning =>
checkRunning()
case HttpReloadRoutes =>
sendHttpServerMessage(HttpReloadRoutes)
}
private def expectedRunningCount = {
externalHttpPort match {
case None => 2
case Some(_) => 3
}
}
private def checkRunning() = {
rCount.getAndIncrement()
if (rCount.get() == expectedRunningCount) {
sendHttpServerMessage(HttpStartProcessing)
context.parent ! ComponentStarted(self.path.name)
}
}
override def start = {
startSprayServer(internalHttpPort, externalHttpPort, Some(spSettings))
startWebSocketServer(websocketPort, Some(spSettings))
// start the HttpClient actor
startSprayClient
}
override def stop = {
super.stop
sendHttpServerMessage(ShutdownServer)
stopWebSocketServer
}
}
object SprayManager {
def ComponentName = "wookiee-spray"
def KeyHttpClientTimeout = s"$ComponentName.client.timeout"
def KeyStaticRoot = s"$ComponentName.static-content.root-path"
def KeyStaticType = s"$ComponentName.static-content.type"
} | mjwallin1/wookiee-spray | src/main/scala/com/webtrends/harness/component/spray/SprayManager.scala | Scala | apache-2.0 | 2,948 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples
import java.util.Random
import org.apache.spark.util.Vector
import org.apache.spark.SparkContext._
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
/**
* K-means clustering.
*/
object LocalKMeans {
val N = 1000
val R = 1000 // Scaling factor
val D = 10
val K = 10
val convergeDist = 0.001
val rand = new Random(42)
def generateData = {
def generatePoint(i: Int) = {
Vector(D, _ => rand.nextDouble * R)
}
Array.tabulate(N)(generatePoint)
}
def closestPoint(p: Vector, centers: HashMap[Int, Vector]): Int = {
var index = 0
var bestIndex = 0
var closest = Double.PositiveInfinity
for (i <- 1 to centers.size) {
val vCurr = centers.get(i).get
val tempDist = p.squaredDist(vCurr)
if (tempDist < closest) {
closest = tempDist
bestIndex = i
}
}
return bestIndex
}
def main(args: Array[String]) {
val data = generateData
var points = new HashSet[Vector]
var kPoints = new HashMap[Int, Vector]
var tempDist = 1.0
while (points.size < K) {
points.add(data(rand.nextInt(N)))
}
val iter = points.iterator
for (i <- 1 to points.size) {
kPoints.put(i, iter.next())
}
println("Initial centers: " + kPoints)
while(tempDist > convergeDist) {
var closest = data.map (p => (closestPoint(p, kPoints), (p, 1)))
var mappings = closest.groupBy[Int] (x => x._1)
var pointStats = mappings.map(pair => pair._2.reduceLeft [(Int, (Vector, Int))] {case ((id1, (x1, y1)), (id2, (x2, y2))) => (id1, (x1 + x2, y1+y2))})
var newPoints = pointStats.map {mapping => (mapping._1, mapping._2._1/mapping._2._2)}
tempDist = 0.0
for (mapping <- newPoints) {
tempDist += kPoints.get(mapping._1).get.squaredDist(mapping._2)
}
for (newP <- newPoints) {
kPoints.put(newP._1, newP._2)
}
}
println("Final centers: " + kPoints)
}
}
| mkolod/incubator-spark | examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala | Scala | apache-2.0 | 2,814 |
package com.wavesplatform.settings
case class UtxSettings(
maxSize: Int,
maxBytesSize: Long,
maxScriptedSize: Int,
blacklistSenderAddresses: Set[String],
allowBlacklistedTransferTo: Set[String],
fastLaneAddresses: Set[String],
allowTransactionsFromSmartAccounts: Boolean,
allowSkipChecks: Boolean
)
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/settings/UtxSettings.scala | Scala | mit | 332 |
package graf.gremlin.structure.convert
import java.util.{Optional, Comparator}
import java.util.function.{ Function ⇒ JFunction, _ }
import graf.gremlin.structure.convert.WrapAsScala._
import scala.collection.convert
import scala.language.implicitConversions
trait DecorateAsScala extends convert.DecorateAsScala {
import Decorators._
implicit def asScalaBiConsumerConverter[T, U](f: BiConsumer[T, U]): AsScala[(T, U) ⇒ Unit] =
new AsScala(asScalaBiConsumer(f))
implicit def asScalaBiFunctionConverter[T, U, R](f: BiFunction[T, U, R]): AsScala[(T, U) ⇒ R] =
new AsScala(asScalaBiFunction(f))
implicit def asScalaBinaryOperatorConverter[T](f: BinaryOperator[T]): AsScala[(T, T) ⇒ T] =
new AsScala(asScalaBinaryOperator(f))
implicit def asScalaBiPredicateConverter[T, U](f: BiPredicate[T, U]): AsScala[(T, U) ⇒ Boolean] =
new AsScala(asScalaBiPredicate(f))
implicit def asScalaConsumerConverter[T](f: Consumer[T]): AsScala[T ⇒ Unit] =
new AsScala(asScalaConsumer(f))
implicit def asScalaJFunctionConverter[T, U](f: JFunction[T, U]): AsScala[T ⇒ U] =
new AsScala(asScalaJFunction(f))
implicit def asScalaPredicateConverter[T](f: Predicate[T]): AsScala[T ⇒ Boolean] =
new AsScala(asScalaPredicate(f))
implicit def asScalaSupplierConverter[T](f: Supplier[T]): AsScala[Unit ⇒ T] =
new AsScala(asScalaSupplier(f))
implicit def asScalaUnaryOperatorConverter[T](f: UnaryOperator[T]): AsScala[T ⇒ T] =
new AsScala(asScalaUnaryOperator(f))
implicit def asScalaComparatorConverter[T](f: Comparator[T]): AsScala[Ordering[T]] =
new AsScala(asScalaComparator(f))
implicit def asScalaOptionConverter[T](f: Optional[T]): AsScala[Option[T]] =
new AsScala(asScalaOption(f))
}
| dkrieg/Graf | src/main/scala/graf/gremlin/structure/convert/DecorateAsScala.scala | Scala | apache-2.0 | 1,760 |
package com.atomist.tree.pathexpression
import com.atomist.graph.GraphNode
import com.atomist.rug.runtime.js.{DefaultExecutionContext, ExecutionContext}
import com.atomist.tree.pathexpression.ExecutionResult.ExecutionResult
object ExpressionEngine {
type NodePreparer = GraphNode => GraphNode
}
/**
* Evaluates path expressions, whether as raw strings or parsed.
*/
trait ExpressionEngine {
import ExpressionEngine._
/**
* Return the result of evaluating the expression. If the expression is invalid
* return a message, otherwise the result of invoking the valid expression.
*
* @param node root node to evaluate the path against
* @param parsed Parsed path expression. It's already been validated
* @param executionContext context used to resolve repos or anything else
* that needs user context
* @param nodePreparer called on nodes before any methods (including navigation)
* are called on them. This can be used to set state.
* @return
*/
def evaluate(node: GraphNode,
parsed: PathExpression,
executionContext: ExecutionContext = DefaultExecutionContext,
nodePreparer: Option[NodePreparer] = None
): ExecutionResult
}
| atomist/rug | src/main/scala/com/atomist/tree/pathexpression/ExpressionEngine.scala | Scala | gpl-3.0 | 1,303 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.json
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions.{Expression, ExprUtils}
import org.apache.spark.sql.catalyst.json.JSONOptionsInRead
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.connector.read.PartitionReaderFactory
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex
import org.apache.spark.sql.execution.datasources.json.JsonDataSource
import org.apache.spark.sql.execution.datasources.v2.TextBasedFileScan
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.SerializableConfiguration
case class JsonScan(
sparkSession: SparkSession,
fileIndex: PartitioningAwareFileIndex,
dataSchema: StructType,
readDataSchema: StructType,
readPartitionSchema: StructType,
options: CaseInsensitiveStringMap,
pushedFilters: Array[Filter],
partitionFilters: Seq[Expression] = Seq.empty,
dataFilters: Seq[Expression] = Seq.empty)
extends TextBasedFileScan(sparkSession, options) {
private val parsedOptions = new JSONOptionsInRead(
CaseInsensitiveMap(options.asScala.toMap),
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
override def isSplitable(path: Path): Boolean = {
JsonDataSource(parsedOptions).isSplitable && super.isSplitable(path)
}
override def getFileUnSplittableReason(path: Path): String = {
assert(!isSplitable(path))
if (!super.isSplitable(path)) {
super.getFileUnSplittableReason(path)
} else {
"the json datasource is set multiLine mode"
}
}
override def createReaderFactory(): PartitionReaderFactory = {
// Check a field requirement for corrupt records here to throw an exception in a driver side
ExprUtils.verifyColumnNameOfCorruptRecord(dataSchema, parsedOptions.columnNameOfCorruptRecord)
if (readDataSchema.length == 1 &&
readDataSchema.head.name == parsedOptions.columnNameOfCorruptRecord) {
throw QueryCompilationErrors.queryFromRawFilesIncludeCorruptRecordColumnError()
}
val caseSensitiveMap = options.asCaseSensitiveMap.asScala.toMap
// Hadoop Configurations are case sensitive.
val hadoopConf = sparkSession.sessionState.newHadoopConfWithOptions(caseSensitiveMap)
val broadcastedConf = sparkSession.sparkContext.broadcast(
new SerializableConfiguration(hadoopConf))
// The partition values are already truncated in `FileScan.partitions`.
// We should use `readPartitionSchema` as the partition schema here.
JsonPartitionReaderFactory(sparkSession.sessionState.conf, broadcastedConf,
dataSchema, readDataSchema, readPartitionSchema, parsedOptions, pushedFilters)
}
override def equals(obj: Any): Boolean = obj match {
case j: JsonScan => super.equals(j) && dataSchema == j.dataSchema && options == j.options &&
equivalentFilters(pushedFilters, j.pushedFilters)
case _ => false
}
override def hashCode(): Int = super.hashCode()
override def description(): String = {
super.description() + ", PushedFilters: " + pushedFilters.mkString("[", ", ", "]")
}
}
| ueshin/apache-spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/json/JsonScan.scala | Scala | apache-2.0 | 4,258 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.views.filter
import iht.views.ViewTestHelper
import iht.views.html.filter.no_assets
import play.api.test.Helpers.{contentAsString, _}
class NoAssetsViewTest extends ViewTestHelper {
val fakeRequest = createFakeRequest(isAuthorised = false)
lazy val noAssetsView: no_assets = app.injector.instanceOf[no_assets]
val iht400PaperFormLink = "https://www.gov.uk/government/publications/inheritance-tax-inheritance-tax-account-iht400"
"No Assets view" must {
"have no message keys in html" in {
val result = noAssetsView()(fakeRequest, messages)
val view = asDocument(contentAsString(result)).toString
noMessageKeysShouldBePresent(view)
}
"display the correct title" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
val title = doc.getElementsByTag("h1").first
title.text must be(messagesApi("page.iht.filter.noAssets.title"))
}
"display the correct browser title" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
val browserTitle = doc.getElementsByTag("title").first
browserTitle.text must be(messagesApi("page.iht.filter.noAssets.title") + " - GOV.UK")
}
"contain content advising why you must use IHT400 form" in {
val result = noAssetsView()(fakeRequest, messages)
val content = contentAsString(result)
content must include(messagesApi("page.iht.filter.noAssets.label.b"))
content must include(messagesApi("page.iht.filter.noAssets.label.c"))
}
"contain link to IHT-400 paper form" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
val linkElement = doc.getElementById("form-link")
linkElement.attr("href") must be(iht400PaperFormLink)
}
"contain a 'Previous answers' section" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
assertRenderedById(doc, "previous-answers")
}
"contain a 'Start again' link to go back to the domicile page" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
val link = doc.getElementById("start-again")
link.text() must be(messagesApi("iht.startAgain"))
link.attr("href") must be(iht.controllers.filter.routes.DomicileController.onPageLoad().url)
}
"contain a row showing the user's answer to the previous domicile question" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
val row = doc.getElementById("domicile-row")
row.text() must include(messagesApi("page.iht.registration.deceasedPermanentHome.title"))
row.text() must include(messagesApi("iht.countries.englandOrWales"))
}
"contain a 'Change' link to go back to the domicile page" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
val link = doc.getElementById("change-domicile")
link.text() must include(messagesApi("iht.change"))
link.attr("href") must be(iht.controllers.filter.routes.DomicileController.onPageLoad().url)
}
"contain a row showing the user's answer to the previous estimate question when given the under 32500 parameter" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
val row = doc.getElementById("estimate-row")
row.text() must include(messagesApi("iht.roughEstimateEstateWorth"))
row.text() must include(messagesApi("page.iht.filter.estimate.choice.under"))
}
"contain a row showing the user's answer to the previous estimate question when given the between parameter" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
val row = doc.getElementById("estimate-row")
row.text() must include(messagesApi("iht.roughEstimateEstateWorth"))
row.text() must include(messagesApi("page.iht.filter.estimate.choice.under"))
}
"contain a 'Change' link to go back to the estimate page" in {
val result = noAssetsView()(fakeRequest, messages)
val doc = asDocument(contentAsString(result))
val link = doc.getElementById("change-estimate")
link.text() must include(messagesApi("iht.change"))
link.attr("href") must be(iht.controllers.filter.routes.EstimateController.onPageLoadWithoutJointAssets().url)
}
}
}
| hmrc/iht-frontend | test/iht/views/filter/NoAssetsViewTest.scala | Scala | apache-2.0 | 5,223 |
/**
* Copyright (C) 2014 TU Berlin (peel@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.peelframework.core.results.etl
import java.io.File
import java.nio.file.Path
import java.sql.Connection
import akka.actor._
import akka.routing.{Broadcast, FromConfig}
import com.typesafe.config.Config
import org.peelframework.core.results.etl.extractor.EventExtractorCompanion
import org.peelframework.core.results.model.ExperimentRun
import org.springframework.context.ApplicationContext
import scala.collection.JavaConverters._
import scala.language.{existentials, postfixOps}
/** EventExtractorManager actor.
*
* Handles `ProcessFile` messages.
*/
class EventExtractorManager(appContext: ApplicationContext, config: Config, conn: Connection) extends Actor with ActorLogging {
import EventExtractorManager._
import FileProcessor.Process
/** Writer actor. */
val writer = context.watch(context.actorOf(Writer.props(appContext, conn), "writer"))
/** Processor pool actor. */
val processor = context.watch(context.actorOf(FromConfig.props(FileProcessor.props(appContext)), "processor"))
/** Registered extractor companion objects. */
val companions = appContext.getBeansOfType(classOf[EventExtractorCompanion]).asScala.values.toSeq
override def preStart() = {
log.info(s"Staring EventExtractorManager")
}
override def postStop() = {
log.info(s"Stopped EventExtractorManager")
}
/** Normal state message handler. */
override def receive: Receive = {
case process@ProcessFile(basePath, file, run) =>
// find extractors for this file
val extractors = for (companion <- companions; if companion.canProcess(process.relativeFile)) yield {
val p = companion.props(run, appContext, writer) // construct extractor props
val r = companion.reader(file) // construct reader required for this actor
r -> p // return (reader, extractor props) pair
}
// send process message if at least one extractor exists
if (extractors.nonEmpty) {
val noOfRs = extractors.map(_._1).distinct.size
val noOfEs = extractors.map(_._2).distinct.size
log.info(s"Processing file '$file' with $noOfEs extractors and $noOfRs readers")
// process the underlying file for each distinct file reader
for ((reader, extractors) <- extractors.groupBy(_._1)) {
processor ! Process(reader, for ((_, props) <- extractors) yield props)
}
}
case Shutdown =>
processor ! Broadcast(FileProcessor.Shutdown)
context become shuttingDown
}
/** "Shutting Down" state message handler. */
def shuttingDown: Receive = {
case ProcessFile(_, _, _) =>
log.warning("Cannot handle 'ProcessFile' message in EventExtractorManager who is shutting down.")
case Terminated(actor) if actor == processor =>
writer ! PoisonPill // all processors are done now, it is safe to send the PoisonPill to the writer
case Terminated(actor) if actor == writer =>
context stop self // the writer is the last child to terminate
context.system.shutdown() // after that we can shutdown the whole ActorSystem
}
}
/** Companion object. */
object EventExtractorManager {
/** Used by others to ask to process a file associated with an experiment run. */
case class ProcessFile(basePath: Path, file: File, run: ExperimentRun) {
lazy val relativeFile = basePath.relativize(file.toPath).toFile
}
/** Shutdown message for FileProcessor actors. */
case class Shutdown()
/** Props constructor. */
def props(context: ApplicationContext, config: Config, conn: Connection): Props = {
Props(new EventExtractorManager(context, config, conn))
}
} | carabolic/peel | peel-core/src/main/scala/org/peelframework/core/results/etl/EventExtractorManager.scala | Scala | apache-2.0 | 4,247 |
package models.snapshots
import scalaz._
import Scalaz._
import scalaz.effect.IO
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import scalaz.syntax.SemigroupOps
import cache._
import db._
import models.base.RequestInput
import models.tosca.{ KeyValueField, KeyValueList}
import models.Constants._
import io.megam.auth.funnel.FunnelErrors._
import com.datastax.driver.core.{ ResultSet, Row }
import com.websudos.phantom.dsl._
import scala.concurrent.{ Future => ScalaFuture }
import com.websudos.phantom.connectors.{ ContactPoint, KeySpaceDef }
import scala.concurrent.Await
import scala.concurrent.duration._
import utils.{DateHelper, StringStuff}
import io.megam.util.Time
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.{DateTimeFormat,ISODateTimeFormat}
import io.megam.common.uid.UID
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
import java.nio.charset.Charset
import controllers.stack.ImplicitJsonFormats
/**
* @author ranjitha
*
*/
case class SnapshotsInput( asm_id: String, org_id: String, account_id: String, name: String, status: String, tosca_type: String)
case class SnapshotsResult(
id: String,
asm_id: String,
org_id: String,
account_id: String,
name: String,
status: String,
image_id: String,
tosca_type: String,
inputs: models.tosca.KeyValueList,
outputs: models.tosca.KeyValueList,
json_claz: String,
created_at: DateTime)
object SnapshotsResult {
def apply(id: String, asm_id: String, org_id: String, account_id: String, name: String, status: String, image_id: String, tosca_type: String, inputs: models.tosca.KeyValueList, outputs: models.tosca.KeyValueList) = new SnapshotsResult(id, asm_id, org_id, account_id, name, status, image_id, tosca_type, inputs, outputs, "Megam::Snapshots", DateTime.now())
}
sealed class SnapshotsSacks extends CassandraTable[SnapshotsSacks, SnapshotsResult] with ImplicitJsonFormats {
object id extends StringColumn(this) with PartitionKey[String]
object asm_id extends StringColumn(this) with PrimaryKey[String]
object org_id extends StringColumn(this)
object account_id extends StringColumn(this) with PrimaryKey[String]
object name extends StringColumn(this)
object status extends StringColumn(this)
object image_id extends StringColumn(this)
object tosca_type extends StringColumn(this)
object inputs extends JsonListColumn[SnapshotsSacks, SnapshotsResult, KeyValueField](this) {
override def fromJson(obj: String): KeyValueField = {
JsonParser.parse(obj).extract[KeyValueField]
}
override def toJson(obj: KeyValueField): String = {
compactRender(Extraction.decompose(obj))
}
}
object outputs extends JsonListColumn[SnapshotsSacks, SnapshotsResult, KeyValueField](this) {
override def fromJson(obj: String): KeyValueField = {
JsonParser.parse(obj).extract[KeyValueField]
}
override def toJson(obj: KeyValueField): String = {
compactRender(Extraction.decompose(obj))
}
}
object created_at extends DateTimeColumn(this)
object json_claz extends StringColumn(this)
def fromRow(row: Row): SnapshotsResult = {
SnapshotsResult(
id(row),
asm_id(row),
org_id(row),
account_id(row),
name(row),
status(row),
image_id(row),
tosca_type(row),
inputs(row),
outputs(row),
json_claz(row),
created_at(row))
}
}
abstract class ConcreteSnapshots extends SnapshotsSacks with RootConnector {
override lazy val tableName = "snapshots"
override implicit def space: KeySpace = scyllaConnection.space
override implicit def session: Session = scyllaConnection.session
def insertNewRecord(sps: SnapshotsResult): ValidationNel[Throwable, ResultSet] = {
val res = insert.value(_.id, sps.id)
.value(_.asm_id, sps.asm_id)
.value(_.org_id, sps.org_id)
.value(_.account_id, sps.account_id)
.value(_.name, sps.name)
.value(_.status, sps.status)
.value(_.image_id, sps.image_id)
.value(_.tosca_type, sps.tosca_type)
.value(_.inputs, sps.inputs)
.value(_.outputs, sps.outputs)
.value(_.json_claz, sps.json_claz)
.value(_.created_at, sps.created_at)
.future()
Await.result(res, 5.seconds).successNel
}
def updateRecord(email: String, rip: SnapshotsResult, aor: Option[SnapshotsResult]): ValidationNel[Throwable, ResultSet] = {
val oldstatus = aor.get.status
val newstatus = rip.status
val oldimage_id= aor.get.image_id
val newimage_id = rip.image_id
val res = update.where(_.id eqs rip.id)
.and(_.account_id eqs email)
.and(_.asm_id eqs rip.asm_id)
.modify(_.status setTo StringStuff.NilOrNot(newstatus, oldstatus))
.and(_.image_id setTo StringStuff.NilOrNot(newimage_id, oldimage_id))
.and(_.inputs setTo rip.inputs)
.and(_.outputs setTo rip.outputs)
.future()
Await.result(res, 5.seconds).successNel
}
def listRecords(email: String): ValidationNel[Throwable, Seq[SnapshotsResult]] = {
val res = select.allowFiltering().where(_.account_id eqs email).fetch()
Await.result(res, 5.seconds).successNel
}
def listAllRecords: ValidationNel[Throwable, Seq[SnapshotsResult]] = {
val res = select.fetch
Await.result(res, 5.seconds).successNel
}
def getRecord(id: String, assembly_id: String, email: String): ValidationNel[Throwable, Option[SnapshotsResult]] = {
val res = select.allowFiltering().where(_.id eqs id).and(_.account_id eqs email).and(_.asm_id eqs assembly_id).one()
Await.result(res, 5.seconds).successNel
}
def getRecords(assembly_id: String, email: String): ValidationNel[Throwable, Seq[SnapshotsResult]] = {
val res = select.allowFiltering().where(_.account_id eqs email).and(_.asm_id eqs assembly_id).fetch()
Await.result(res, 5.seconds).successNel
}
def getSnapRecords(id: String, email: String): ValidationNel[Throwable, Seq[SnapshotsResult]] = {
val res = select.allowFiltering().where(_.id eqs id).and(_.account_id eqs email).fetch()
Await.result(res, 5.seconds).successNel
}
def deleteRecord(acc_id: String, asm_id: String, id: String): ValidationNel[Throwable, ResultSet] = {
val res = delete.where(_.account_id eqs acc_id).and(_.id eqs id).and(_.asm_id eqs asm_id).future()
Await.result(res,5.seconds).successNel
}
}
object Snapshots extends ConcreteSnapshots {
private def mkSnapshotsSack(email: String, input: String): ValidationNel[Throwable, SnapshotsResult] = {
val snapshotsInput: ValidationNel[Throwable, SnapshotsInput] = (Validation.fromTryCatchThrowable[SnapshotsInput, Throwable] {
parse(input).extract[SnapshotsInput]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel
for {
snap <- snapshotsInput
uir <- (UID("sps").get leftMap { ut: NonEmptyList[Throwable] => ut })
} yield {
val uname = uir.get._2.toString.substring(0, 5)
val bvalue = Set(email)
val json = new SnapshotsResult(uir.get._1 + uir.get._2, snap.asm_id, snap.org_id, email, snap.name + uname, snap.status, "", snap.tosca_type, List(), List(), "Megam::Snapshots", DateHelper.now())
json
}
}
def create(email: String, input: String): ValidationNel[Throwable, Option[SnapshotsResult]] = {
for {
wa <- (mkSnapshotsSack(email, input) leftMap { err: NonEmptyList[Throwable] => err })
set <- (insertNewRecord(wa) leftMap { t: NonEmptyList[Throwable] => t })
} yield {
play.api.Logger.warn(("%s%s%-20s%s").format(Console.GREEN, Console.BOLD, "Snapshots.created success", Console.RESET))
atPub(email, wa)
wa.some
}
}
def delete(email: String, asm_id: String, id: String): ValidationNel[Throwable, SnapshotsResult] = {
for {
wa <- (findBySnapId(id,asm_id, email) leftMap { t: NonEmptyList[Throwable] => t })
set <- (deleteRecord(email, asm_id, id) leftMap { t: NonEmptyList[Throwable] => t })
} yield {
play.api.Logger.warn(("%s%s%-20s%s").format(Console.GREEN, Console.BOLD, "Snapshots.delete success", Console.RESET))
wa
}
}
def update(email: String, input: String): ValidationNel[Throwable, SnapshotsResult] = {
val ripNel: ValidationNel[Throwable, SnapshotsResult] = (Validation.fromTryCatchThrowable[SnapshotsResult,Throwable] {
parse(input).extract[SnapshotsResult]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel
for {
rip <- ripNel
qor <- (Snapshots.findBySnapId(rip.id, rip.asm_id, email) leftMap { t: NonEmptyList[Throwable] => t })
set <- updateRecord(email, rip, qor.some)
} yield {
qor
}
}
def findByEmail(accountID: String): ValidationNel[Throwable, Seq[SnapshotsResult]] = {
(listRecords(accountID) leftMap { t: NonEmptyList[Throwable] =>
new ResourceItemNotFound(accountID, "Snapshots = nothing found.")
}).toValidationNel.flatMap { nm: Seq[SnapshotsResult] =>
if (!nm.isEmpty)
Validation.success[Throwable, Seq[SnapshotsResult]](nm).toValidationNel
else
Validation.success[Throwable, Seq[SnapshotsResult]](List[SnapshotsResult]()).toValidationNel
}
}
def deleteByEmail(email: String): ValidationNel[Throwable, SnapshotsResult] = {
for {
sa <- (findByEmail(email) leftMap { t: NonEmptyList[Throwable] => t })
df <- deleteFound(email, sa)
} yield df
}
def findBySnapId(id: String, assembly_id: String, email: String): ValidationNel[Throwable, SnapshotsResult] = {
(getRecord(id, assembly_id, email) leftMap { t: NonEmptyList[Throwable] ⇒
new ServiceUnavailableError(id, (t.list.map(m ⇒ m.getMessage)).mkString("\\n"))
}).toValidationNel.flatMap { xso: Option[SnapshotsResult] ⇒
xso match {
case Some(xs) ⇒ {
Validation.success[Throwable, SnapshotsResult](xs).toValidationNel
}
case None ⇒ Validation.failure[Throwable, SnapshotsResult](new ResourceItemNotFound(id, "")).toValidationNel
}
}
}
def findById(assemblyID: String, email: String): ValidationNel[Throwable, Seq[SnapshotsResult]] = {
(getRecords(assemblyID, email) leftMap { t: NonEmptyList[Throwable] =>
new ResourceItemNotFound(assemblyID, "Snapshots = nothing found.")
}).toValidationNel.flatMap { nm: Seq[SnapshotsResult] =>
if (!nm.isEmpty)
Validation.success[Throwable, Seq[SnapshotsResult]](nm).toValidationNel
else
Validation.failure[Throwable, Seq[SnapshotsResult]](new ResourceItemNotFound(assemblyID, "Snapshots = nothing found.")).toValidationNel
}
}
//Admin authority can list all snapshots for 1.5.
def list: ValidationNel[Throwable, Seq[SnapshotsResult]] = {
listAllRecords match {
case Success(value) => Validation.success[Throwable, Seq[SnapshotsResult]](value).toValidationNel
case Failure(err) => Validation.success[Throwable, Seq[SnapshotsResult]](List()).toValidationNel
}
}
def getById(id: String, email: String): ValidationNel[Throwable, Seq[SnapshotsResult]] = {
(getSnapRecords(id, email) leftMap { t: NonEmptyList[Throwable] =>
new ResourceItemNotFound(id, "Snapshots = nothing found.")
}).toValidationNel.flatMap { nm: Seq[SnapshotsResult] =>
if (!nm.isEmpty)
Validation.success[Throwable, Seq[SnapshotsResult]](nm).toValidationNel
else
Validation.failure[Throwable, Seq[SnapshotsResult]](new ResourceItemNotFound(id, "Snapshots = nothing found.")).toValidationNel
}
}
private def deleteFound(email: String, sn: Seq[SnapshotsResult]) = {
val output = (sn.map { sas =>
play.api.Logger.warn(("%s%s%-20s%s").format(Console.GREEN, Console.BOLD, "Snapshots.delete success", Console.RESET))
dePub(email, sas)
})
if (!output.isEmpty)
output.head
else
SnapshotsResult("","","","","","","", "", models.tosca.KeyValueList.empty, models.tosca.KeyValueList.empty).successNel
}
//We support attaching disks for a VM. When we do containers we need to rethink.
private def atPub(email: String, wa: SnapshotsResult): ValidationNel[Throwable, SnapshotsResult] = {
models.base.Requests.createAndPub(email, RequestInput(email, wa.id, CATTYPE_TORPEDO, "", SNAPSHOT_CREATE, SNAPSHOT).json)
wa.successNel[Throwable]
}
//We support attaching disks for a VM. When we do containers we need to rethink.
private def dePub(email: String, wa: SnapshotsResult): ValidationNel[Throwable, SnapshotsResult] = {
models.base.Requests.createAndPub(email, RequestInput(email, wa.id, CATTYPE_TORPEDO, "", SNAPSHOT_REMOVE, SNAPSHOT).json)
wa.successNel[Throwable]
}
}
| indykish/vertice_gateway | app/models/snapshots/Snapshots.scala | Scala | mit | 12,662 |
package dotty.tools.dotc
package transform
import core._
import Contexts.Context
import Flags._
import dotty.tools.dotc.ast.tpd
import MegaPhase.MiniPhase
import dotty.tools.dotc.core.Types.{ThisType, TermRef}
/** Replace This references to module classes in static methods by global identifiers to the
* corresponding modules.
*/
class ElimStaticThis extends MiniPhase {
import ast.tpd._
def phaseName: String = "elimStaticThis"
override def transformThis(tree: This)(implicit ctx: Context): Tree =
if (!tree.symbol.is(Package) && ctx.owner.enclosingMethod.is(JavaStatic)) {
assert(tree.symbol.is(ModuleClass))
ref(tree.symbol.sourceModule)
}
else tree
override def transformIdent(tree: tpd.Ident)(implicit ctx: Context): tpd.Tree =
if (ctx.owner.enclosingMethod.is(JavaStatic))
tree.tpe match {
case TermRef(thiz: ThisType, _) if thiz.cls.is(ModuleClass, JavaDefined) =>
ref(thiz.cls.sourceModule).select(tree.symbol)
case TermRef(thiz: ThisType, _) =>
assert(tree.symbol.is(Flags.JavaStatic) || thiz.cls.is(JavaDefined))
tree
case _ => tree
}
else tree
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/transform/ElimStaticThis.scala | Scala | apache-2.0 | 1,171 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.index.bloom
import java.util.{Random, UUID}
import scala.collection.JavaConverters._
import org.apache.commons.io.FileUtils
import org.apache.spark.sql.{CarbonEnv, SaveMode, SparkSession}
import org.apache.spark.sql.test.SparkTestQueryExecutor
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonV3DataFormatConstants}
import org.apache.carbondata.core.index.status.IndexStatus
import org.apache.carbondata.core.metadata.index.IndexType
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.index.bloom.BloomCoarseGrainIndexTestUtil.{checkBasicQuery, createFile, deleteFile}
class BloomCoarseGrainIndexFunctionSuite
extends QueryTest with BeforeAndAfterAll with BeforeAndAfterEach {
val bigFile = s"$resourcesPath/bloom_index_function_test_big.csv"
val normalTable = "carbon_normal"
val bloomSampleTable = "carbon_bloom"
val indexName = "bloom_dm"
override protected def beforeAll(): Unit = {
deleteFile(bigFile)
createFile(bigFile, line = 2000)
sql(s"DROP TABLE IF EXISTS $normalTable")
sql(s"DROP TABLE IF EXISTS $bloomSampleTable")
}
override def afterEach(): Unit = {
sql(s"DROP TABLE IF EXISTS $normalTable")
sql(s"DROP TABLE IF EXISTS $bloomSampleTable")
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
}
test("test bloom index: index column is integer, dictionary, sort_column") {
sql(
s"""
| CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata TBLPROPERTIES('table_blocksize'='128')
| """.stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata TBLPROPERTIES('table_blocksize'='128', 'sort_columns'='id')
| """.stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (city, id)
| AS 'bloomfilter'
| properties('BLOOM_SIZE'='640000')
""".stripMargin)
IndexStatusUtil.checkIndexStatus(bloomSampleTable, indexName, IndexStatus.ENABLED.name(),
sqlContext.sparkSession, IndexType.BLOOMFILTER)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $normalTable
| OPTIONS('header'='false')
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $bloomSampleTable
| OPTIONS('header'='false')
""".stripMargin)
IndexStatusUtil.checkIndexStatus(bloomSampleTable, indexName, IndexStatus.ENABLED.name(),
sqlContext.sparkSession, IndexType.BLOOMFILTER)
sql(s"SHOW INDEXES ON TABLE $bloomSampleTable").collect()
checkExistence(sql(s"SHOW INDEXES ON TABLE $bloomSampleTable"), true, indexName)
sql(s"select * from $bloomSampleTable where id = 1").collect()
sql(s"select * from $bloomSampleTable where city = 'city_1'").collect()
checkBasicQuery(indexName, bloomSampleTable, normalTable)
sql(s"DROP INDEX $indexName ON TABLE $bloomSampleTable")
sql(s"DROP TABLE IF EXISTS $normalTable")
sql(s"DROP TABLE IF EXISTS $bloomSampleTable")
}
test("test bloom index: index column is integer, dictionary, not sort_column") {
sql(
s"""
| CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata TBLPROPERTIES('table_blocksize'='128')
| """.stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata TBLPROPERTIES('table_blocksize'='128', 'sort_columns'='name')
| """.stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (city, id)
| AS 'bloomfilter'
| properties('BLOOM_SIZE'='640000')
""".stripMargin)
IndexStatusUtil.checkIndexStatus(bloomSampleTable, indexName, IndexStatus.ENABLED.name(),
sqlContext.sparkSession, IndexType.BLOOMFILTER)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $normalTable
| OPTIONS('header'='false')
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $bloomSampleTable
| OPTIONS('header'='false')
""".stripMargin)
IndexStatusUtil.checkIndexStatus(bloomSampleTable, indexName, IndexStatus.ENABLED.name(),
sqlContext.sparkSession, IndexType.BLOOMFILTER)
sql(s"SHOW INDEXES ON TABLE $bloomSampleTable").collect()
checkExistence(sql(s"SHOW INDEXES ON TABLE $bloomSampleTable"), true, indexName)
sql(s"select * from $bloomSampleTable where id = 1").collect()
sql(s"select * from $bloomSampleTable where city = 'city_1'").collect()
checkBasicQuery(indexName, bloomSampleTable, normalTable)
sql(s"DROP TABLE IF EXISTS $normalTable")
sql(s"DROP TABLE IF EXISTS $bloomSampleTable")
}
test("test bloom index: index column is integer, sort_column") {
sql(
s"""
| CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata TBLPROPERTIES('table_blocksize'='128')
| """.stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata TBLPROPERTIES('table_blocksize'='128', 'sort_columns'='id')
| """.stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (city, id)
| AS 'bloomfilter'
| properties('BLOOM_SIZE'='640000')
""".stripMargin)
IndexStatusUtil.checkIndexStatus(bloomSampleTable, indexName, IndexStatus.ENABLED.name(),
sqlContext.sparkSession, IndexType.BLOOMFILTER)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $normalTable
| OPTIONS('header'='false')
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $bloomSampleTable
| OPTIONS('header'='false')
""".stripMargin)
IndexStatusUtil.checkIndexStatus(bloomSampleTable, indexName, IndexStatus.ENABLED.name(),
sqlContext.sparkSession, IndexType.BLOOMFILTER)
sql(s"SHOW INDEXES ON TABLE $bloomSampleTable").collect()
checkExistence(sql(s"SHOW INDEXES ON TABLE $bloomSampleTable"), true, indexName)
sql(s"select * from $bloomSampleTable where id = 1").collect()
sql(s"select * from $bloomSampleTable where city = 'city_1'").collect()
checkBasicQuery(indexName, bloomSampleTable, normalTable)
sql(s"DROP TABLE IF EXISTS $normalTable")
sql(s"DROP TABLE IF EXISTS $bloomSampleTable")
}
test("test bloom index: index column is float, not dictionary") {
val floatCsvPath = s"$resourcesPath/datasamplefordate.csv"
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
sql(
s"""
| CREATE TABLE $normalTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$floatCsvPath' INTO TABLE $normalTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (salary)
| AS 'bloomfilter'
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$floatCsvPath' INTO TABLE $bloomSampleTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040.56'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040.56'"),
sql(s"SELECT * FROM $normalTable WHERE salary='1040.56'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040'"),
sql(s"SELECT * FROM $normalTable WHERE salary='1040'"))
}
test("test bloom index: index column is float, dictionary") {
val floatCsvPath = s"$resourcesPath/datasamplefordate.csv"
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
sql(
s"""
| CREATE TABLE $normalTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$floatCsvPath' INTO TABLE $normalTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (salary)
| AS 'bloomfilter'
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$floatCsvPath' INTO TABLE $bloomSampleTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040.56'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040.56'"),
sql(s"SELECT * FROM $normalTable WHERE salary='1040.56'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040'"),
sql(s"SELECT * FROM $normalTable WHERE salary='1040'"))
}
// since float cannot be sort_columns, we skip the test case
test("test bloom index: index column is date") {
val dateCsvPath = s"$resourcesPath/datasamplefordate.csv"
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
sql(
s"""
| CREATE TABLE $normalTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$dateCsvPath' INTO TABLE $normalTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (doj)
| AS 'bloomfilter'
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$dateCsvPath' INTO TABLE $bloomSampleTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-14'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-15'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-14'"),
sql(s"SELECT * FROM $normalTable WHERE doj='2016-03-14'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-15'"),
sql(s"SELECT * FROM $normalTable WHERE doj='2016-03-15'"))
}
test("test bloom index: index column is date, dictionary, sort column") {
val dateCsvPath = s"$resourcesPath/datasamplefordate.csv"
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
sql(
s"""
| CREATE TABLE $normalTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$dateCsvPath' INTO TABLE $normalTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno', 'sort_columns'='doj')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (doj)
| AS 'bloomfilter'
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$dateCsvPath' INTO TABLE $bloomSampleTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-14'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-15'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-14'"),
sql(s"SELECT * FROM $normalTable WHERE doj='2016-03-14'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-15'"),
sql(s"SELECT * FROM $normalTable WHERE doj='2016-03-15'"))
}
// timestamp is naturally not dictionary
test("test bloom index: index column is timestamp") {
val timeStampData = s"$resourcesPath/timeStampFormatData1.csv"
sql(
s"""
| CREATE TABLE IF NOT EXISTS $normalTable (
| ID Int, date date, starttime Timestamp, country String, name String, phonetype String,
| serialname String, salary Int)
| STORED AS carbondata
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$timeStampData' into table $normalTable
| OPTIONS('dateformat' = 'yyyy/MM/dd','timestampformat'='yyyy-MM-dd HH:mm:ss')
""".stripMargin)
sql(
s"""
| CREATE TABLE IF NOT EXISTS $bloomSampleTable (
| ID Int, date date, starttime Timestamp, country String, name String, phonetype String,
| serialname String, salary Int)
| STORED AS carbondata
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (starttime)
| AS 'bloomfilter'
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$timeStampData' into table $bloomSampleTable
| OPTIONS('dateformat' = 'yyyy/MM/dd','timestampformat'='yyyy-MM-dd HH:mm:ss')
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE starttime='2016-07-25 01:03:30.0'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE starttime='2016-07-25 01:03:31.0'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE starttime='2016-07-25 01:03:30.0'"),
sql(s"SELECT * FROM $normalTable WHERE starttime='2016-07-25 01:03:30.0'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE starttime='2016-07-25 01:03:31.0'"),
sql(s"SELECT * FROM $normalTable WHERE starttime='2016-07-25 01:03:31.0'"))
}
test("test bloom index: index column is timestamp, dictionary, sort_column") {
val timeStampData = s"$resourcesPath/timeStampFormatData1.csv"
sql(
s"""
| CREATE TABLE IF NOT EXISTS $normalTable (
| ID Int, date date, starttime Timestamp, country String, name String, phonetype String,
| serialname String, salary Int)
| STORED AS carbondata
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$timeStampData' into table $normalTable
| OPTIONS('dateformat' = 'yyyy/MM/dd','timestampformat'='yyyy-MM-dd HH:mm:ss')
""".stripMargin)
sql(
s"""
| CREATE TABLE IF NOT EXISTS $bloomSampleTable (
| ID Int, date date, starttime Timestamp, country String, name String, phonetype String,
| serialname String, salary Int)
| STORED AS carbondata
| TBLPROPERTIES('dictionary_column'='starttime', 'sort_columns'='starttime')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (starttime)
| AS 'bloomfilter'
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$timeStampData' into table $bloomSampleTable
| OPTIONS('dateformat' = 'yyyy/MM/dd','timestampformat'='yyyy-MM-dd HH:mm:ss')
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE starttime=null").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE starttime='2016-07-25 01:03:30.0'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE starttime='2016-07-25 01:03:31.0'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE starttime='2016-07-25 01:03:30.0'"),
sql(s"SELECT * FROM $normalTable WHERE starttime='2016-07-25 01:03:30.0'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE starttime='2016-07-25 01:03:31.0'"),
sql(s"SELECT * FROM $normalTable WHERE starttime='2016-07-25 01:03:31.0'"))
}
// it seems the CI env will be timeout on this test, just ignore it here
ignore("test bloom index: loading and querying with empty values on index column") {
sql(s"CREATE TABLE $normalTable(c1 string, c2 int, c3 string) STORED AS carbondata")
sql(s"CREATE TABLE $bloomSampleTable(c1 string, c2 int, c3 string) STORED AS carbondata")
sql(
s"""
| CREATE INDEX $indexName
| on $bloomSampleTable (c1, c2)
| as 'bloomfilter'
""".stripMargin)
// load data with empty value
sql(s"INSERT INTO $normalTable SELECT '', 1, 'xxx'")
sql(s"INSERT INTO $bloomSampleTable SELECT '', 1, 'xxx'")
sql(s"INSERT INTO $normalTable SELECT '', null, 'xxx'")
sql(s"INSERT INTO $bloomSampleTable SELECT '', null, 'xxx'")
// query on null fields
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable"),
sql(s"SELECT * FROM $normalTable"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE c1 = null"),
sql(s"SELECT * FROM $normalTable WHERE c1 = null"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE c1 = ''"),
sql(s"SELECT * FROM $normalTable WHERE c1 = ''"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE isNull(c1)"),
sql(s"SELECT * FROM $normalTable WHERE isNull(c1)"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE isNull(c2)"),
sql(s"SELECT * FROM $normalTable WHERE isNull(c2)"))
}
test("test bloom index: querying with longstring index column") {
sql(s"CREATE TABLE $normalTable(c1 string, c2 int, c3 string) " +
"STORED AS carbondata TBLPROPERTIES('long_string_columns'='c3')")
sql(s"CREATE TABLE $bloomSampleTable(c1 string, c2 int, c3 string) " +
"STORED AS carbondata TBLPROPERTIES('long_string_columns'='c3')")
// create index on longstring columns
sql(
s"""
| CREATE INDEX $indexName
| on $bloomSampleTable (c3)
| as 'bloomfilter'
""".stripMargin)
sql(s"INSERT INTO $normalTable SELECT 'c1v1', 1, 'xxx'")
sql(s"INSERT INTO $bloomSampleTable SELECT 'c1v1', 1, 'xxx'")
sql(s"INSERT INTO $normalTable SELECT 'c1v1', 1, 'yyy'")
sql(s"INSERT INTO $bloomSampleTable SELECT 'c1v1', 1, 'yyy'")
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE c3 = 'xxx'"),
sql(s"SELECT * FROM $normalTable WHERE c3 = 'xxx'"))
}
test("test rebuild bloom index: index column is integer, dictionary, sort_column") {
sql(
s"""
| CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata
| TBLPROPERTIES('table_blocksize'='128')
| """.stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata
| TBLPROPERTIES('table_blocksize'='128', 'sort_columns'='id')
| """.stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $normalTable
| OPTIONS('header'='false')
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $bloomSampleTable
| OPTIONS('header'='false')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (city,id,age,name)
| AS 'bloomfilter'
| properties('BLOOM_SIZE'='640000')
""".stripMargin)
sql(s"SHOW INDEXES ON TABLE $bloomSampleTable").collect()
checkExistence(sql(s"SHOW INDEXES ON TABLE $bloomSampleTable"), true, indexName)
checkBasicQuery(indexName, bloomSampleTable, normalTable)
sql(s"DROP TABLE IF EXISTS $normalTable")
sql(s"DROP TABLE IF EXISTS $bloomSampleTable")
}
test("test rebuild bloom index: index column is integer, dictionary, not sort_column") {
sql(
s"""
| CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata
| TBLPROPERTIES('table_blocksize'='128')
| """.stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata
| TBLPROPERTIES('table_blocksize'='128', 'sort_columns'='name')
| """.stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $normalTable
| OPTIONS('header'='false')
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $bloomSampleTable
| OPTIONS('header'='false')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (city,id,age,name)
| AS 'bloomfilter'
| properties('BLOOM_SIZE'='640000')
""".stripMargin)
sql(s"SHOW INDEXES ON TABLE $bloomSampleTable").collect()
checkExistence(sql(s"SHOW INDEXES ON TABLE $bloomSampleTable"), true, indexName)
checkBasicQuery(indexName, bloomSampleTable, normalTable)
sql(s"DROP TABLE IF EXISTS $normalTable")
sql(s"DROP TABLE IF EXISTS $bloomSampleTable")
}
test("test rebuild bloom index: index column is integer, sort_column") {
sql(
s"""
| CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata
| TBLPROPERTIES('table_blocksize'='128')
| """.stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(id INT, name STRING, city STRING, age INT,
| s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
| STORED AS carbondata
| TBLPROPERTIES('table_blocksize'='128', 'sort_columns'='id')
| """.stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $normalTable
| OPTIONS('header'='false')
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $bloomSampleTable
| OPTIONS('header'='false')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (city,id,age,name)
| AS 'bloomfilter'
| properties('BLOOM_SIZE'='640000')
""".stripMargin)
sql(s"SHOW INDEXES ON TABLE $bloomSampleTable").collect()
checkExistence(sql(s"SHOW INDEXES ON TABLE $bloomSampleTable"), true, indexName)
checkBasicQuery(indexName, bloomSampleTable, normalTable)
sql(s"DROP TABLE IF EXISTS $normalTable")
sql(s"DROP TABLE IF EXISTS $bloomSampleTable")
}
test("test rebuild bloom index: index column is float, not dictionary") {
val floatCsvPath = s"$resourcesPath/datasamplefordate.csv"
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
sql(
s"""
| CREATE TABLE $normalTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$floatCsvPath' INTO TABLE $normalTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$floatCsvPath' INTO TABLE $bloomSampleTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (salary)
| AS 'bloomfilter'
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040.56'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040.56'"),
sql(s"SELECT * FROM $normalTable WHERE salary='1040.56'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040'"),
sql(s"SELECT * FROM $normalTable WHERE salary='1040'"))
}
test("test drop index when more than one bloom index exists") {
sql(s"CREATE TABLE $bloomSampleTable " +
"(id int,name string,salary int)STORED as carbondata TBLPROPERTIES('SORT_COLUMNS'='id')")
sql(s"CREATE index index1 ON TABLE $bloomSampleTable(id) as 'bloomfilter' " +
"PROPERTIES ( 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001', 'BLOOM_COMPRESS'='true')")
sql(s"CREATE index index2 ON TABLE $bloomSampleTable (name) as 'bloomfilter' " +
"PROPERTIES ('BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001', 'BLOOM_COMPRESS'='true')")
sql(s"insert into $bloomSampleTable values(1,'nihal',20)")
checkExistence(sql(s"SHOW INDEXES ON TABLE $bloomSampleTable"), true, "index1", "index2")
sql(s"drop index index1 on $bloomSampleTable")
checkExistence(sql(s"SHOW INDEXES ON TABLE $bloomSampleTable"), true, "index2")
sql(s"drop index index2 on $bloomSampleTable")
val carbonTable = CarbonEnv.getCarbonTable(
Option("default"), bloomSampleTable)(sqlContext.sparkSession)
val isIndexExists = carbonTable.getTableInfo
.getFactTable
.getTableProperties
.get("indexexists")
assertResult("false")(isIndexExists)
assert(sql(s"SHOW INDEXES ON TABLE $bloomSampleTable").collect().isEmpty)
}
test("test rebuild bloom index: index column is float, dictionary") {
val floatCsvPath = s"$resourcesPath/datasamplefordate.csv"
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
sql(
s"""
| CREATE TABLE $normalTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$floatCsvPath' INTO TABLE $normalTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$floatCsvPath' INTO TABLE $bloomSampleTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (salary)
| AS 'bloomfilter'
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040.56'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040.56'"),
sql(s"SELECT * FROM $normalTable WHERE salary='1040.56'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE salary='1040'"),
sql(s"SELECT * FROM $normalTable WHERE salary='1040'"))
}
test("test rebuild bloom index: index column is date") {
val dateCsvPath = s"$resourcesPath/datasamplefordate.csv"
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
sql(
s"""
| CREATE TABLE $normalTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$dateCsvPath' INTO TABLE $normalTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$dateCsvPath' INTO TABLE $bloomSampleTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (doj)
| AS 'bloomfilter'
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-14'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-15'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-14'"),
sql(s"SELECT * FROM $normalTable WHERE doj='2016-03-14'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-15'"),
sql(s"SELECT * FROM $normalTable WHERE doj='2016-03-15'"))
}
test("test rebuild bloom index: index column is date, dictionary, sort_colum") {
val dateCsvPath = s"$resourcesPath/datasamplefordate.csv"
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
sql(
s"""
| CREATE TABLE $normalTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno')
""".stripMargin)
sql(
s"""
| CREATE TABLE $bloomSampleTable(empno string, doj date, salary float)
| STORED AS carbondata
| TBLPROPERTIES('SORT_COLUMNS'='empno,doj')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$dateCsvPath' INTO TABLE $normalTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| LOAD DATA INPATH '$dateCsvPath' INTO TABLE $bloomSampleTable OPTIONS(
| 'DELIMITER'=',', 'QUOTECHAR'='"', 'BAD_RECORDS_ACTION'='FORCE')
""".stripMargin)
sql(
s"""
| CREATE INDEX $indexName
| ON $bloomSampleTable (doj)
| AS 'bloomfilter'
""".stripMargin)
sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-14'").collect()
sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-15'").collect()
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-14'"),
sql(s"SELECT * FROM $normalTable WHERE doj='2016-03-14'"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE doj='2016-03-15'"),
sql(s"SELECT * FROM $normalTable WHERE doj='2016-03-15'"))
}
ignore("test rebuild bloom index: loading and querying with empty values on index column") {
sql(s"CREATE TABLE $normalTable(c1 string, c2 int, c3 string) STORED AS carbondata")
sql(s"CREATE TABLE $bloomSampleTable(c1 string, c2 int, c3 string) STORED AS carbondata")
// load data with empty value
sql(s"INSERT INTO $normalTable SELECT '', 1, 'xxx'")
sql(s"INSERT INTO $bloomSampleTable SELECT '', 1, 'xxx'")
sql(s"INSERT INTO $normalTable SELECT '', null, 'xxx'")
sql(s"INSERT INTO $bloomSampleTable SELECT '', null, 'xxx'")
sql(
s"""
| CREATE INDEX $indexName
| on $bloomSampleTable (c1, c2)
| as 'bloomfilter'
""".stripMargin)
// query on null fields
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable"),
sql(s"SELECT * FROM $normalTable"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE c1 = null"),
sql(s"SELECT * FROM $normalTable WHERE c1 = null"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE c1 = ''"),
sql(s"SELECT * FROM $normalTable WHERE c1 = ''"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE isNull(c1)"),
sql(s"SELECT * FROM $normalTable WHERE isNull(c1)"))
checkAnswer(sql(s"SELECT * FROM $bloomSampleTable WHERE isNull(c2)"),
sql(s"SELECT * FROM $normalTable WHERE isNull(c2)"))
}
test("test bloom index: deleting & clearning segment will clear index files") {
sql(s"CREATE TABLE $bloomSampleTable(c1 string, c2 int, c3 string) STORED AS carbondata")
sql(
s"""
| CREATE INDEX $indexName
| on $bloomSampleTable (c1, c2)
| as 'bloomfilter'
""".stripMargin)
sql(s"INSERT INTO $bloomSampleTable SELECT 'c1v1', 1, 'c3v1'")
sql(s"INSERT INTO $bloomSampleTable SELECT 'c1v2', 2, 'c3v2'")
// two segments both has index files
val carbonTable = CarbonEnv.getCarbonTable(Option("default"), bloomSampleTable)(
SparkTestQueryExecutor.spark)
import scala.collection.JavaConverters._
(0 to 1).foreach { segId =>
val indexPath = CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath,
segId.toString, indexName)
assert(FileUtils.listFiles(FileUtils.getFile(indexPath), Array("bloomindexmerge"), true)
.asScala.nonEmpty)
}
// delete and clean the first segment, the corresponding index files should be cleaned too
sql(s"DELETE FROM TABLE $bloomSampleTable WHERE SEGMENT.ID IN (0)")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_CLEAN_FILES_FORCE_ALLOWED, "true")
sql(s"CLEAN FILES FOR TABLE $bloomSampleTable options('force'='true')")
CarbonProperties.getInstance()
.removeProperty(CarbonCommonConstants.CARBON_CLEAN_FILES_FORCE_ALLOWED)
var indexPath = CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath, "0", indexName)
assert(!FileUtils.getFile(indexPath).exists(),
"index file of this segment has been deleted, should not exist")
indexPath = CarbonTablePath.getIndexesStorePath(carbonTable.getTablePath, "1", indexName)
assert(FileUtils.listFiles(FileUtils.getFile(indexPath), Array("bloomindexmerge"), true)
.asScala.nonEmpty)
}
// two blocklets in one block are hit by bloom index while block cache level hit this block
test("CARBONDATA-2788: enable block cache level and bloom index") {
// minimum per page is 2000 rows
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.BLOCKLET_SIZE, "2000")
// minimum per blocklet is 16MB
CarbonProperties.getInstance()
.addProperty(CarbonV3DataFormatConstants.BLOCKLET_SIZE_IN_MB, "16")
// these lines will result in 3 blocklets in one block and bloom will hit at least 2 of them
val lines = 100000
sql("drop table if exists test_rcd").collect()
val r = new Random()
import sqlContext.implicits._
val df = sqlContext.sparkContext.parallelize(1 to lines)
.map(x => ("No." + r.nextInt(10000), "country" + x % 10000, "city" + x % 10000, x % 10000,
UUID.randomUUID().toString, UUID.randomUUID().toString, UUID.randomUUID().toString,
UUID.randomUUID().toString, UUID.randomUUID().toString, UUID.randomUUID().toString,
UUID.randomUUID().toString, UUID.randomUUID().toString, UUID.randomUUID().toString,
UUID.randomUUID().toString, UUID.randomUUID().toString, UUID.randomUUID().toString))
.toDF("ID", "country", "city", "population",
"random1", "random2", "random3",
"random4", "random5", "random6",
"random7", "random8", "random9",
"random10", "random11", "random12")
df.write
.format("carbondata")
.option("tableName", "test_rcd")
.option("SORT_COLUMNS", "id")
.option("SORT_SCOPE", "LOCAL_SORT")
.mode(SaveMode.Overwrite)
.save()
val withoutBloom = sql("select count(*) from test_rcd where city = 'city40'").collect().toSeq
sql("CREATE INDEX dm_rcd " +
"ON TABLE test_rcd (city)" +
"AS 'bloomfilter' " +
"properties ('BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001')")
checkAnswer(sql("select count(*) from test_rcd where city = 'city40'"), withoutBloom)
sql("drop table if exists test_rcd").collect()
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.BLOCKLET_SIZE,
CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL)
CarbonProperties.getInstance().addProperty(CarbonV3DataFormatConstants.BLOCKLET_SIZE_IN_MB,
CarbonV3DataFormatConstants.BLOCKLET_SIZE_IN_MB_DEFAULT_VALUE)
}
override def afterAll(): Unit = {
deleteFile(bigFile)
sql(s"DROP TABLE IF EXISTS $normalTable")
sql(s"DROP TABLE IF EXISTS $bloomSampleTable")
}
}
object IndexStatusUtil {
def checkIndexStatus(tableName: String,
indexName: String,
indexStatus: String,
sparkSession: SparkSession,
indexProvider: IndexType): Unit = {
val carbonTable = CarbonEnv.getCarbonTable(Some(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
tableName)(sparkSession)
val secondaryIndexMap = carbonTable.getIndexesMap.get(indexProvider.getIndexProviderName)
if (null != secondaryIndexMap) {
val indexes = secondaryIndexMap.asScala
.filter(p => p._2.get(CarbonCommonConstants.INDEX_STATUS).equalsIgnoreCase(indexStatus))
assert(indexes.exists(p => p._1.equals(indexName) &&
p._2.get(CarbonCommonConstants.INDEX_STATUS) == indexStatus))
}
}
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/carbondata/index/bloom/BloomCoarseGrainIndexFunctionSuite.scala | Scala | apache-2.0 | 40,093 |
package com.github.jpbetz.subspace
import java.nio.FloatBuffer
object Vector2 {
def fill(value: Float): Vector2 = Vector2(value, value)
def allocateEmptyBuffer: FloatBuffer = Buffers.createFloatBuffer(2)
def fromBuffer(buffer: FloatBuffer): Vector2 = {
Vector2(buffer.get(0), buffer.get(1))
}
}
case class Vector2(x: Float, y: Float) extends Bufferable {
def apply(index: Int): Float = {
index match {
case 0 => x
case 1 => y
case _ => throw new ArrayIndexOutOfBoundsException(index)
}
}
def magnitude: Float = {
Math.sqrt(x * x + y * y).toFloat
}
def normalize: Vector2 = {
val l = 1f / magnitude
Vector2(x * l, y * l)
}
def dotProduct(vec: Vector2): Float = {
x * vec.x + y * vec.y
}
def unary_- : Vector2 = negate
def negate: Vector2 = Vector2(-x, -y)
def +(vec: Vector2): Vector2 = add(vec)
def add(vec: Vector2): Vector2 = add(vec.x, vec.y)
def add(x: Float, y: Float): Vector2 = {
Vector2(this.x + x, this.y + y)
}
def -(vec: Vector2): Vector2 = subtract(vec.x, vec.y)
def subtract(vec: Vector2): Vector2 = subtract(vec.x, vec.y)
def subtract(x: Float, y: Float): Vector2 = {
Vector2(this.x - x, this.y - y)
}
def *(f: Float): Vector2 = scale(f)
def /(f: Float): Vector2 = scale(1/f)
def scale(f: Float): Vector2 = scale(f, f)
def scale(vec: Vector2): Vector2 = scale(vec.x, vec.y)
def scale(x: Float, y: Float): Vector2 = {
Vector2(this.x * x, this.y * y)
}
def clamp(min: Float, max: Float): Vector2 = {
if (min > max) throw new IllegalArgumentException("min must not be greater than max")
Vector2(
Floats.clamp(x, min, max),
Floats.clamp(y, min, max)
)
}
def clamp(min: Vector2, max: Vector2): Vector2 = {
if (min.x > max.x) throw new IllegalArgumentException("min.x must not be greater than max.x")
if (min.y > max.y) throw new IllegalArgumentException("min.y must not be greater than max.y")
Vector2(
Floats.clamp(x, min.x, max.x),
Floats.clamp(y, min.y, max.y)
)
}
def distanceTo(vec: Vector2): Float = {
(this - vec).magnitude
}
def lerp(vec: Vector2, t: Float): Vector2 = {
if (t < 0f || t > 1f) throw new IllegalArgumentException("t must be between 0 and 1, inclusively.")
Vector2(
Floats.lerp(x, vec.x, t),
Floats.lerp(y, vec.y, t)
)
}
def round(): Vector2 = {
Vector2(
scala.math.round(x),
scala.math.round(y)
)
}
//def rotate(): Vector2 = ???
override def toString: String = {
s"($x, $y)"
}
// swizzle operators
def xx = Vector2(x, x)
def xy = Vector2(x, y)
def yx = Vector2(y, x)
def yy = Vector2(y, y)
def allocateBuffer: FloatBuffer = {
val direct = Buffers.createFloatBuffer(2)
direct.put(x).put(y)
direct.flip()
direct
}
def updateBuffer(buffer: FloatBuffer): Unit = {
buffer.clear()
buffer.put(x).put(y)
buffer.flip()
}
}
| jpbetz/subspace | subspace/src/main/scala/com/github/jpbetz/subspace/Vector2.scala | Scala | apache-2.0 | 2,961 |
/*
* Copyright (c) 2017 Aaron Levin
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package ca.aaronlevin.parsec
import cats.{ Functor, Id, Monad }
import cats.implicits._
import scala.language.higherKinds
import scala.annotation.tailrec
object StacklessTest {
// easiest way to express rank-2 polymorphism...
trait IFoo[S, M[_], A] {
def apply[B](state: S, cont: A => M[B]): M[B]
}
// A stripped down ParsecT
sealed abstract class Foo[S, M[_], A] {
def apply[B](state: S, cont: A => M[B]): M[B] = {
import Foo._
val renv = new Foo.RunEnv[M, B]
renv.eval(state, this, renv.ScalaFunc(cont)).get
}
final def map[B](f: A => B): Foo[S, M, B] = Foo.Mapped(this, f)
final def flatMap[B](f: A => Foo[S, M, B]): Foo[S, M, B] = Foo.FlatMapped(this, f)
}
object Foo {
case class Pure[S, M[_], A](value: A) extends Foo[S, M, A]
case class Mapped[S, M[_], A, B](foo: Foo[S, M, A], fn: A => B) extends Foo[S, M, B]
case class FlatMapped[S, M[_], A, B](foo: Foo[S, M, A], fn: A => Foo[S, M, B])
extends Foo[S, M, B]
case class F[S, M[_], A](f: IFoo[S, M, A]) extends Foo[S, M, A]
// START: RunEnv
private class RunEnv[M[_], B] {
/**
* Result and Fn reprsent the trampoline. it's how we bounce between
*/
sealed abstract class Result {
def get: M[B]
}
case class Complete(get: M[B]) extends Result
case class ContResult[S, A](state: S, runFun: IFoo[S, M, A], fn: Fn[A]) extends Result {
def get: M[B] =
fn match {
case ScalaFunc(sfn) => runFun.apply(state, sfn)
// state is assumed pure here?
case _ =>
runFun.apply(state, { a =>
eval(state, Pure[S, M, A](a), fn).get
})
}
}
// next action. this is where we push user methods from the heap onto the stack.
sealed trait Fn[A]
case class ScalaFunc[A](fn: A => M[B]) extends Fn[A]
case class MappedFunc[A, C](fn: A => C, next: Fn[C]) extends Fn[A]
case class FlatMappedFunc[A, S, C](fn: A => Foo[S, M, C], next: Fn[C]) extends Fn[A]
def eval[S, A](state: S, foo: Foo[S, M, A], fn: Fn[A]): Result =
loop(state, foo.asInstanceOf[Foo[S, M, Any]], fn.asInstanceOf[Fn[Any]])
@tailrec
private def loop[S](state: S, foo: Foo[S, M, Any], fn: Fn[Any]): Result = foo match {
case Pure(value) =>
fn match {
case ScalaFunc(sfn) => Complete(sfn(value))
case MappedFunc(mfn, next) => loop(state, Pure[S, M, Any](mfn(value)), next)
case FlatMappedFunc(fmn, next) =>
loop(state, fmn(value).asInstanceOf[Foo[S, M, Any]], next)
}
case Mapped(foo, mfn) => loop(state, foo, MappedFunc(mfn, fn))
case FlatMapped(foo, mfn) => loop(state, foo, FlatMappedFunc(mfn, fn))
case F(f) => ContResult(state, f, fn)
}
}
// END: RunEnv
}
implicit def fooFunctor[S, M[_]] = new Functor[Foo[S, M, ?]] {
import Foo._
def map[A, B](fa: Foo[S, M, A])(f: A => B): Foo[S, M, B] = Mapped[S, M, A, B](fa, f)
}
implicit def fooMonad[S, M[_]] = new Monad[Foo[S, M, ?]] {
import Foo._
def pure[A](a: A) = Pure(a)
def flatMap[A, B](fa: Foo[S, M, A])(f: A => Foo[S, M, B]) = FlatMapped(fa, f)
def tailRecM[A, B](a: A)(f: A => Foo[S, M, Either[A, B]]): Foo[S, M, B] = new Foo[S, M, B] {
override def apply[X](state: S, cont: B => M[X]): M[X] =
f(a).apply(state, {
case Left(a) =>
tailRecM(a)(f).apply(state, { b =>
cont(b)
})
case Right(b) => cont(b)
})
}
}
def runFoo[S, M[_], A](foo: Foo[S, M, A], state: S)(implicit monad: Monad[M]): M[A] =
foo.apply[A](state, { a =>
monad.pure(a)
})
def stackTest(n: Int) = {
import Foo._
val monad = fooMonad[Unit, Id]
val r: Foo[Unit, Id, Int] = monad.pure(333)
val rr: Foo[Unit, Id, Int] = (1 to n).foldLeft(r)((r, _) => r.map(i => i + 1))
runFoo[Unit, Id, Int](rr, Unit)
}
}
| aaronlevin/scala-parsec | src/main/scala/ca/aaronlevin/parsec/foo.scala | Scala | mit | 5,271 |
//: ----------------------------------------------------------------------------
//: Copyright (C) 2015 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package funnel
package elastic
import java.net.URI
import java.util.Date
import java.text.SimpleDateFormat
import scalaz.concurrent.Strategy.Executor
import scalaz.stream.{Process1,Process,time,wye}
/**
* This class provides a consumer for the funnel monitoring stream that takes
* the events emitted and constructs a JSON document that will be sent to elastic
* search. The names of the metrics on the stream are expected to be delimited
* by a `/`, which will then be exploded into a tree of fields.
*
* WARNING: Only use this sink if you have a small-medium size system as elastic-
* search performs where there is low document-cardinality. This format is convenient
* for certain types of use case, but it is not expected that this works at large scale.
*
* The resulting document structure looks like:
*
* {
* "cluster": "imqa-maestro-1-0-279-F6Euts", #This allows for a Kibana search, cluster: x
* "host": "ec2-107-22-118-178.compute-1.amazonaws.com",
* "jvm": {
* "memory": {
* "heap": {
* "committed": {
* "last": 250.99763712000001,
* "mean": 250.99763712000001,
* "standard_deviation": 0.0
* },
* "usage": {
* "last": 0.042628084023299997,
* "mean": 0.042445506024100001,
* "standard_deviation": 0.00018257799924300001
* }
* }
* }
* }
* }
*
*/
case class ElasticExploded(M: Monitoring, ISelfie: Instruments, H: HttpLayer = SharedHttpLayer.H) {
import Process._
import Elastic._
import http.JSON._
import argonaut._, Argonaut._
//metrics to report own status
val metrics = new ElasticMetrics(ISelfie)
/**
* Data points grouped by mirror URL, experiment ID, experiment group,
* and grouping key from config
*/
type ESGroup[A] = Map[GroupKey, Map[Path, Datapoint[A]]]
case class GroupKey(
/* name of the group, e.g. now */
name: String,
/* source of this group, specifically a uri */
source: Option[SourceURL],
/* if applicable, an experiment id */
experimentID: Option[ExperimentID],
/* if applicable, an experiment id */
experimentGroup: Option[GroupID]
)
/**
* Groups data points by key, mirror URL, and custom grouping from config.
* Emits when it receives a key/mirror where the key is already in the group for the mirror.
* That is, emits as few times as possible without duplicates
* and without dropping any data.
*/
def elasticGroup[A](groups: List[String]): Process1[Option[Datapoint[A]], ESGroup[A]] = {
def go(sawDatapoint: Boolean, m: ESGroup[A]): Process1[Option[Datapoint[A]], ESGroup[A]] =
await1[Option[Datapoint[A]]].flatMap {
case Some(pt) =>
val name = pt.key.name
val source = pt.key.attributes.get(AttributeKeys.source)
val experimentID = pt.key.attributes.get(AttributeKeys.experimentID)
val experimentGroup = pt.key.attributes.get(AttributeKeys.experimentGroup)
val grouping = groups.find(name startsWith _) getOrElse ""
val k = name.drop(grouping.length).split("/").toList.filterNot(_ == "")
val groupKey = GroupKey(grouping, source, experimentID, experimentGroup)
m.get(groupKey) match {
case Some(g) => g.get(k) match {
case Some(_) =>
emit(m) ++ go(sawDatapoint = true, Map(groupKey -> Map(k -> pt)))
case None =>
go(sawDatapoint = true, m + (groupKey -> (g + (k -> pt))))
}
case None =>
go(sawDatapoint = true, m + (groupKey -> Map(k -> pt)))
}
case None => // No Datapoint this time
if (sawDatapoint) { // Saw one last time
go(sawDatapoint = false, m) // Keep going with current Map
} else { // Didn't see one last time, either
M.log.info("I haven't seen any data points for a while. I hope everything's alright.")
emit(m) ++ go(sawDatapoint = false, Map()) // Publish current Map
}
}
go(false, Map())
}
/**
* Emits one JSON document per mirror URL and window type, on the right,
* first emitting the ES mapping properties for their keys, on the left.
* Once grouped by `elasticGroup`, this process emits one document per
* URL/window with all the key/value pairs that were seen for that mirror
* in the group for that period.
*
* For the fixed fields `uri` and `host`, if we do not have a meaningful
* value for this, we fallback to assuming this is coming from the local
* monitoring instance, so just use the supplied flask name.
*/
def elasticUngroup[A](flaskName: String, flaskCluster: String): Process1[ESGroup[A], Json] =
await1[ESGroup[A]].flatMap { g =>
M.log.debug(s"Publishing ${g.size} elements to ElasticSearch")
emitAll(g.toSeq.map { case (groupKey, m) =>
("uri" := groupKey.source.getOrElse(flaskName)) ->:
("host" := groupKey.source.map(u => (new URI(u)).getHost).getOrElse(flaskName)) ->:
("experiment_id" :=? groupKey.experimentID ) ->?:
("experiment_group" :=? groupKey.experimentGroup ) ->?:
("@timestamp" :=
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZZ").format(new Date)) ->:
m.toList.foldLeft(("group" := groupKey.name) ->: jEmptyObject) {
case (o, (ps, dp)) =>
val attrs = dp.key.attributes
val kind = attrs.get(AttributeKeys.kind)
val clust = ("cluster" :=
attrs.getOrElse(AttributeKeys.cluster, flaskCluster)) ->: jEmptyObject
clust deepmerge (o deepmerge (ps ++ kind).foldRight((dp.asJson -| "value").get)(
(a, b) => (a := b) ->: jEmptyObject))
}
})
}.repeat
/**
*
*/
def publish(flaskName: String, flaskCluster: String): ES[Unit] = {
val E = Executor(Monitoring.defaultPool)
bufferAndPublish(flaskName, flaskCluster)(M, E, H, metrics){ cfg =>
val subscription = Monitoring.subscribe(M){ k =>
cfg.groups.exists(g => k.startsWith(g))}.map(Option.apply)
time.awakeEvery(cfg.subscriptionTimeout)(E,
Monitoring.schedulingPool).map(_ => Option.empty[Datapoint[Any]]
).wye(subscription)(wye.merge)(E) |>
elasticGroup(cfg.groups) |>
elasticUngroup(flaskName, flaskCluster)
}
}
} | neigor/funnel | elastic/src/main/scala/ElasticExploded.scala | Scala | apache-2.0 | 7,260 |
/**
* Created by Romain Reuillon on 22/09/16.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package org.openmole.core
package expansion {
import org.openmole.core.context.Val
import org.openmole.core.fileservice.FileService
import org.openmole.core.workspace.TmpDirectory
sealed trait Validate {
def apply(inputs: Seq[Val[_]])(implicit newFile: TmpDirectory, fileService: FileService): Seq[Throwable]
def ++(v: Validate) = Validate.++(this, v)
}
object Validate {
class Parameters(val inputs: Seq[Val[_]])(implicit val tmpDirectory: TmpDirectory, implicit val fileService: FileService)
case class LeafValidate(validate: Parameters ⇒ Seq[Throwable]) extends Validate {
def apply(inputs: Seq[Val[_]])(implicit newFile: TmpDirectory, fileService: FileService): Seq[Throwable] = validate(new Parameters(inputs))
}
case class SeqValidate(validate: Seq[Validate]) extends Validate {
def apply(inputs: Seq[Val[_]])(implicit newFile: TmpDirectory, fileService: FileService): Seq[Throwable] = validate.flatMap(_.apply(inputs))
}
def apply(f: Parameters ⇒ Seq[Throwable]): Validate = LeafValidate(f)
def apply(vs: Validate*): Validate = SeqValidate(vs)
case object success extends Validate {
def apply(inputs: Seq[Val[_]])(implicit newFile: TmpDirectory, fileService: FileService): Seq[Throwable] = Seq()
}
def ++(v1: Validate, v2: Validate) =
(v1, v2) match {
case (Validate.success, Validate.success) ⇒ Validate.success
case (v, Validate.success) ⇒ v
case (Validate.success, v) ⇒ v
case (v1, v2) ⇒ SeqValidate(toIterable(v1).toSeq ++ toIterable(v2))
}
implicit def fromSeqValidate(v: Seq[Validate]) = apply(v: _*)
implicit def fromThrowables(t: Seq[Throwable]) = Validate { _ ⇒ t }
implicit def toIterable(v: Validate) =
v match {
case s: SeqValidate ⇒ s.validate
case l: LeafValidate ⇒ Iterable(l)
case success ⇒ Iterable.empty
}
}
trait ExpansionPackage {
implicit def seqToSeqOfFromContext[T](s: Seq[T])(implicit toFromContext: ToFromContext[T, T]): Seq[FromContext[T]] = s.map(e ⇒ toFromContext(e))
type Condition = expansion.Condition
lazy val Condition = expansion.Condition
}
}
package object expansion {
type Condition = FromContext[Boolean]
}
| openmole/openmole | openmole/core/org.openmole.core.expansion/src/main/scala/org/openmole/core/expansion/package.scala | Scala | agpl-3.0 | 3,056 |
package com.typesafe.sbt
package packager
import java.io.File
object Hashing {
def sha1Sum(t: File): String =
messageDigestHex(java.security.MessageDigest.getInstance("SHA-1"))(t)
def sha512(t: File): String =
messageDigestHex(java.security.MessageDigest.getInstance("SHA-512"))(t)
def md5Sum(t: File): String =
messageDigestHex(java.security.MessageDigest.getInstance("MD5"))(t)
def messageDigestHex(md: java.security.MessageDigest)(file: File): String = {
val in = new java.io.FileInputStream(file);
val buffer = new Array[Byte](8192)
try {
def read(): Unit = in.read(buffer) match {
case x if x <= 0 => ()
case size => md.update(buffer, 0, size); read()
}
read()
} finally in.close()
convertToHex(md.digest)
}
def convertToHex(data: Array[Byte]): String = {
//TODO - use java.lang.Integer.toHexString() ?
val buf = new StringBuffer
def byteToHex(b: Int) =
if ((0 <= b) && (b <= 9)) ('0' + b).toChar
else ('a' + (b - 10)).toChar
for (i <- 0 until data.length) {
buf append byteToHex((data(i) >>> 4) & 0x0F)
buf append byteToHex(data(i) & 0x0F)
}
buf.toString
}
}
| fsat/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/Hashing.scala | Scala | bsd-2-clause | 1,207 |
/*
* @author Philip Stutz
*
* Copyright 2010 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect
/**
* StateForwarderEdge is an edge implementation that signals
* the state of its source vertex.
*
* @param targetId id of this edges's target vertex
*/
class StateForwarderEdge[TargetIdType](targetId: TargetIdType)
extends DefaultEdge(targetId) {
def signal = source.state.asInstanceOf[Signal]
} | danihegglin/DynDCO | src/main/scala/com/signalcollect/StateForwarderEdge.scala | Scala | apache-2.0 | 987 |
/*
* Copyright 2013 ChronoTrack
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.chronotrack.flurry.service
import com.chronotrack.flurry.gen.{IdDetailed, Flurry}
import com.chronotrack.flurry.Generator
import com.chronotrack.flurry.util.BitUtils
/**
* User: ilya
* Date: 8/16/13
* Time: 11:33 AM
*/
class FlurryThriftHandler(val generator:Generator) extends Flurry.Iface {
def get_worker_id() = {
generator.workerId
}
def get_id() = {
generator.getId
}
def get_id_detailed() = {
val id = generator.getId
val tokens = BitUtils.idToTokens(id, generator.workerIdBits, generator.sequenceBits)
new IdDetailed(id, tokens.time, tokens.worker.toInt, tokens.sequence.toInt)
}
}
| isterin/flurry | src/main/scala/com/chronotrack/flurry/service/FlurryThriftHandler.scala | Scala | apache-2.0 | 1,265 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.examples.extra
import com.spotify.scio.avro._
import com.spotify.scio.examples.extra.AvroExample.{AccountFromSchema, AccountToSchema}
import com.spotify.scio.io._
import com.spotify.scio.testing._
class AvroExampleTest extends PipelineSpec {
"AvroExample" should "work for specific input" in {
val input =
Seq(
new Account(1, "checking", "Alice", 1000.0, AccountStatus.Active),
new Account(2, "checking", "Bob", 1500.0, AccountStatus.Active)
)
val expected = input.map(_.toString)
JobTest[com.spotify.scio.examples.extra.AvroExample.type]
.args("--input=in.avro", "--output=out.txt", "--method=specificIn")
.input(AvroIO[Account]("in.avro"), input)
.output(TextIO("out.txt"))(coll => coll should containInAnyOrder(expected))
.run()
}
it should "work for specific output" in {
val expected = (1 to 100)
.map { i =>
Account
.newBuilder()
.setId(i)
.setAmount(i.toDouble)
.setName("account" + i)
.setType("checking")
.build()
}
JobTest[com.spotify.scio.examples.extra.AvroExample.type]
.args("--output=out.avro", "--method=specificOut")
.output(AvroIO[Account]("out.avro"))(coll => coll should containInAnyOrder(expected))
.run()
}
"AvroExample" should "work for typed input" in {
val input = Seq(
AccountFromSchema(1, "checking", "Alice", 1000.0),
AccountFromSchema(2, "checking", "Bob", 1500.0)
)
val expected = input.map(_.toString)
JobTest[com.spotify.scio.examples.extra.AvroExample.type]
.args("--input=in.avro", "--output=out.txt", "--method=typedIn")
.input(AvroIO[AccountFromSchema]("in.avro"), input)
.output(TextIO("out.txt"))(coll => coll should containInAnyOrder(expected))
.run()
}
it should "work for typed output" in {
val expected = (1 to 100).map { i =>
AccountToSchema(id = i, amount = i.toDouble, name = "account" + i, `type` = "checking")
}
JobTest[com.spotify.scio.examples.extra.AvroExample.type]
.args("--output=out.avro", "--method=typedOut")
.output(AvroIO[AccountToSchema]("out.avro")) { coll =>
coll should containInAnyOrder(expected)
}
.run()
}
}
| spotify/scio | scio-examples/src/test/scala/com/spotify/scio/examples/extra/AvroExampleTest.scala | Scala | apache-2.0 | 2,897 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import kafka.log.UnifiedLog
import kafka.server.LogOffsetMetadata
import kafka.utils.Logging
import org.apache.kafka.common.TopicPartition
class Replica(val brokerId: Int, val topicPartition: TopicPartition) extends Logging {
// the log end offset value, kept in all replicas;
// for local replica it is the log's end offset, for remote replicas its value is only updated by follower fetch
@volatile private[this] var _logEndOffsetMetadata = LogOffsetMetadata.UnknownOffsetMetadata
// the log start offset value, kept in all replicas;
// for local replica it is the log's start offset, for remote replicas its value is only updated by follower fetch
@volatile private[this] var _logStartOffset = UnifiedLog.UnknownOffset
// The log end offset value at the time the leader received the last FetchRequest from this follower
// This is used to determine the lastCaughtUpTimeMs of the follower
@volatile private[this] var lastFetchLeaderLogEndOffset = 0L
// The time when the leader received the last FetchRequest from this follower
// This is used to determine the lastCaughtUpTimeMs of the follower
@volatile private[this] var lastFetchTimeMs = 0L
// lastCaughtUpTimeMs is the largest time t such that the offset of most recent FetchRequest from this follower >=
// the LEO of leader at time t. This is used to determine the lag of this follower and ISR of this partition.
@volatile private[this] var _lastCaughtUpTimeMs = 0L
def logStartOffset: Long = _logStartOffset
def logEndOffsetMetadata: LogOffsetMetadata = _logEndOffsetMetadata
def logEndOffset: Long = logEndOffsetMetadata.messageOffset
def lastCaughtUpTimeMs: Long = _lastCaughtUpTimeMs
/*
* If the FetchRequest reads up to the log end offset of the leader when the current fetch request is received,
* set `lastCaughtUpTimeMs` to the time when the current fetch request was received.
*
* Else if the FetchRequest reads up to the log end offset of the leader when the previous fetch request was received,
* set `lastCaughtUpTimeMs` to the time when the previous fetch request was received.
*
* This is needed to enforce the semantics of ISR, i.e. a replica is in ISR if and only if it lags behind leader's LEO
* by at most `replicaLagTimeMaxMs`. These semantics allow a follower to be added to the ISR even if the offset of its
* fetch request is always smaller than the leader's LEO, which can happen if small produce requests are received at
* high frequency.
*/
def updateFetchState(followerFetchOffsetMetadata: LogOffsetMetadata,
followerStartOffset: Long,
followerFetchTimeMs: Long,
leaderEndOffset: Long): Unit = {
if (followerFetchOffsetMetadata.messageOffset >= leaderEndOffset)
_lastCaughtUpTimeMs = math.max(_lastCaughtUpTimeMs, followerFetchTimeMs)
else if (followerFetchOffsetMetadata.messageOffset >= lastFetchLeaderLogEndOffset)
_lastCaughtUpTimeMs = math.max(_lastCaughtUpTimeMs, lastFetchTimeMs)
_logStartOffset = followerStartOffset
_logEndOffsetMetadata = followerFetchOffsetMetadata
lastFetchLeaderLogEndOffset = leaderEndOffset
lastFetchTimeMs = followerFetchTimeMs
}
def resetLastCaughtUpTime(curLeaderLogEndOffset: Long, curTimeMs: Long, lastCaughtUpTimeMs: Long): Unit = {
lastFetchLeaderLogEndOffset = curLeaderLogEndOffset
lastFetchTimeMs = curTimeMs
_lastCaughtUpTimeMs = lastCaughtUpTimeMs
trace(s"Reset state of replica to $this")
}
override def toString: String = {
val replicaString = new StringBuilder
replicaString.append("Replica(replicaId=" + brokerId)
replicaString.append(s", topic=${topicPartition.topic}")
replicaString.append(s", partition=${topicPartition.partition}")
replicaString.append(s", lastCaughtUpTimeMs=$lastCaughtUpTimeMs")
replicaString.append(s", logStartOffset=$logStartOffset")
replicaString.append(s", logEndOffset=$logEndOffset")
replicaString.append(s", logEndOffsetMetadata=$logEndOffsetMetadata")
replicaString.append(s", lastFetchLeaderLogEndOffset=$lastFetchLeaderLogEndOffset")
replicaString.append(s", lastFetchTimeMs=$lastFetchTimeMs")
replicaString.append(")")
replicaString.toString
}
override def equals(that: Any): Boolean = that match {
case other: Replica => brokerId == other.brokerId && topicPartition == other.topicPartition
case _ => false
}
override def hashCode: Int = 31 + topicPartition.hashCode + 17 * brokerId
}
| guozhangwang/kafka | core/src/main/scala/kafka/cluster/Replica.scala | Scala | apache-2.0 | 5,359 |
/*
* The Computer Language Shootout
* http://shootout.alioth.debian.org/
*
* contributed by Andrei Formiga
*/
/* functional version */
object sumcol
{
def sumFile(res: int): int =
{
val line = Console.readLine
if (line == null) res else sumFile(res + Integer.parseInt(line))
}
def main(args: Array[String]) =
{
Console.println(sumFile(0).toString())
}
}
| kragen/shootout | bench/sumcol/sumcol.scala-2.scala | Scala | bsd-3-clause | 387 |
package keystoneml.nodes.images
import breeze.linalg._
import breeze.numerics._
import breeze.stats.mean
import keystoneml.nodes.learning.{GaussianMixtureModelEstimator, GaussianMixtureModel}
import org.apache.spark.rdd.RDD
import keystoneml.utils.MatrixUtils
import keystoneml.workflow.{Pipeline, OptimizableEstimator, Estimator, Transformer}
/**
* Abstract interface for Fisher Vector.
*/
trait FisherVectorInterface extends Transformer[DenseMatrix[Float], DenseMatrix[Float]]
/**
* Implements a fisher vector.
*
* @param gmm A trained Gaussian Mixture Model
*/
case class FisherVector(gmm: GaussianMixtureModel)
extends FisherVectorInterface {
private val gmmMeans = gmm.means
private val gmmVars = gmm.variances
private val gmmWeights = gmm.weights
/**
*
* @param in matrix of size numSiftDimensions by numSiftDescriptors
* @return The output value
*/
override def apply(in: DenseMatrix[Float]): DenseMatrix[Float] = {
val nDesc = in.cols.toDouble
// Get the fisher vector posterior assignments
val x = convert(in, Double)
val q = gmm.apply(x.t) // numSiftDescriptors x K
/* here is the Fisher Vector in all of its beauty. This is directly
from the FV survey by Sanchez et al: */
val s0 = mean(q, Axis._0) // 1 x K, but really K x 1 because it's a dense vector
val s1 = (x * q) :/= nDesc // D x K
val s2 = ((x :* x) * q) :/= nDesc // D x K
val fv1 = (s1 - gmmMeans * diag(s0).t) :/ (sqrt(gmmVars) * diag(sqrt(gmmWeights)))
val fv2 = (s2 - (gmmMeans * 2.0 :* s1) + (((gmmMeans :* gmmMeans) - gmmVars)*diag(s0)).t) :/
(gmmVars * diag(sqrt(gmmWeights :* 2.0)))
// concatenate the two fv terms
convert(DenseMatrix.horzcat(fv1, fv2), Float)
}
}
/**
* Trains a scala Fisher Vector implementation, via
* estimating a GMM by treating each column of the inputs as a separate
* DenseVector input to [[GaussianMixtureModelEstimator]]
*
* TODO: Pending philosophical discussions on how to best make it so you can
* swap in GMM, KMeans++, etc. for Fisher Vectors. For now just hard-codes GMM here
*
* @param k Number of centers to estimate.
*/
case class ScalaGMMFisherVectorEstimator(k: Int) extends Estimator[DenseMatrix[Float], DenseMatrix[Float]] {
def fit(data: RDD[DenseMatrix[Float]]): FisherVector = {
val gmmTrainingData = data.flatMap(x => MatrixUtils.matrixToColArray(x).map(i => convert(i, Double)))
val gmmEst = new GaussianMixtureModelEstimator(k)
val gmm = gmmEst.fit(gmmTrainingData)
FisherVector(gmm)
}
}
/**
* Trains either a scala or an `enceval` Fisher Vector implementation, via
* estimating a GMM by treating each column of the inputs as a separate
* DenseVector input to [[GaussianMixtureModelEstimator]]
*
* Automatically decides which implementation to use when node-level optimization is enabled.
*
* @param k Number of centers to estimate.
*/
case class GMMFisherVectorEstimator(k: Int) extends OptimizableEstimator[DenseMatrix[Float], DenseMatrix[Float]] {
val default = ScalaGMMFisherVectorEstimator(k)
def optimize(sample: RDD[DenseMatrix[Float]], numPerPartition: Map[Int, Int])
: Estimator[DenseMatrix[Float], DenseMatrix[Float]] = {
if (k >= 32) {
keystoneml.nodes.images.external.EncEvalGMMFisherVectorEstimator(k)
} else {
ScalaGMMFisherVectorEstimator(k)
}
}
} | amplab/keystone | src/main/scala/keystoneml/nodes/images/FisherVector.scala | Scala | apache-2.0 | 3,366 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import cascading.flow.hadoop.HadoopFlow
import cascading.flow.planner.BaseFlowStep
import org.apache.hadoop.conf.Configured
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.util.{GenericOptionsParser, Tool => HTool, ToolRunner}
import scala.annotation.tailrec
import scala.collection.JavaConverters._
class Tool extends Configured with HTool {
// This mutable state is not my favorite, but we are constrained by the Hadoop API:
var rootJob: Option[(Args) => Job] = None
// Allows you to set the job for the Tool to run
def setJobConstructor(jobc: (Args) => Job): Unit =
if (rootJob.isDefined) {
sys.error("Job is already defined")
} else {
rootJob = Some(jobc)
}
protected def getJob(args: Args): Job = rootJob match {
case Some(job) => job(args)
case None if args.positional.isEmpty =>
throw ArgsException("Usage: Tool <jobClass> --local|--hdfs [args...]")
case None => // has at least one arg
val jobName = args.positional.head
// Remove the job name from the positional arguments:
val nonJobNameArgs = args + ("" -> args.positional.tail)
Job(jobName, nonJobNameArgs)
}
// This both updates the jobConf with hadoop arguments
// and returns all the non-hadoop arguments. Should be called once if
// you want to process hadoop arguments (like -libjars).
protected def nonHadoopArgsFrom(args: Array[String]): Array[String] =
(new GenericOptionsParser(getConf, args)).getRemainingArgs
def parseModeArgs(args: Array[String]): (Mode, Args) = {
val a = Args(nonHadoopArgsFrom(args))
(Mode(a, getConf), a)
}
// Parse the hadoop args, and if job has not been set, instantiate the job
def run(args: Array[String]): Int = {
val (mode, jobArgs) = parseModeArgs(args)
// Connect mode with job Args
run(getJob(Mode.putMode(mode, jobArgs)))
}
protected def run(job: Job): Int = {
val onlyPrintGraph = job.args.boolean("tool.graph")
if (onlyPrintGraph) {
// TODO use proper logging
println("Only printing the job graph, NOT executing. Run without --tool.graph to execute the job")
}
/*
* This is a tail recursive loop that runs all the
* jobs spawned from this one
*/
val jobName = job.getClass.getName
@tailrec
def start(j: Job, cnt: Int): Unit = {
val successful = if (onlyPrintGraph) {
val flow = j.buildFlow
/*
* This just writes out the graph representing
* all the cascading elements that are created for this
* flow. Use graphviz to render it as a PDF.
* The job is NOT run in this case.
*/
val thisDot = jobName + cnt + ".dot"
println("writing DOT: " + thisDot)
/* We add descriptions if they exist to the stepName so it appears in the .dot file */
flow match {
case hadoopFlow: HadoopFlow =>
val flowSteps = hadoopFlow.getFlowSteps.asScala
flowSteps.foreach { step =>
val baseFlowStep: BaseFlowStep[JobConf] = step.asInstanceOf[BaseFlowStep[JobConf]]
val descriptions = baseFlowStep.getConfig.get(Config.StepDescriptions, "")
if (!descriptions.isEmpty) {
val stepXofYData = """\\(\\d+/\\d+\\)""".r.findFirstIn(baseFlowStep.getName).getOrElse("")
// Reflection is only temporary. Latest cascading has setName public: https://github.com/cwensel/cascading/commit/487a6e9ef#diff-0feab84bc8832b2a39312dbd208e3e69L175
// https://github.com/twitter/scalding/issues/1294
val x = classOf[BaseFlowStep[JobConf]].getDeclaredMethod("setName", classOf[String])
x.setAccessible(true)
x.invoke(step, "%s %s".format(stepXofYData, descriptions))
}
}
case _ => // descriptions not yet supported in other modes
}
flow.writeDOT(thisDot)
val thisStepsDot = jobName + cnt + "_steps.dot"
println("writing Steps DOT: " + thisStepsDot)
flow.writeStepsDOT(thisStepsDot)
true
} else {
j.validate()
j.run()
}
j.clear()
//When we get here, the job is finished
if (successful) {
// we need to use match not foreach to get tail recursion
j.next match { // linter:disable:UseOptionForeachNotPatMatch
case Some(nextj) => start(nextj, cnt + 1)
case None => ()
}
} else {
throw new RuntimeException(
"Job failed to run: " + jobName +
(if (cnt > 0) { " child: " + cnt.toString + ", class: " + j.getClass.getName }
else { "" })
)
}
}
//start a counter to see how deep we recurse:
start(job, 0)
0
}
}
object Tool {
def main(args: Array[String]): Unit =
try {
ToolRunner.run(new JobConf, new Tool, ExpandLibJarsGlobs(args))
} catch {
case t: Throwable => {
//re-throw the exception with extra info
throw new Throwable(RichXHandler(t), t)
}
}
}
| twitter/scalding | scalding-core/src/main/scala/com/twitter/scalding/Tool.scala | Scala | apache-2.0 | 5,674 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.std.transfer.model
import java.time.LocalDate
import org.beangle.data.model.LongId
import org.beangle.data.model.pojo.Remark
import org.openurp.base.model.{Department, User}
import org.openurp.base.edu.model.{Direction, Major}
/** 转专业招收专业
* */
class TransferOption extends LongId with Remark {
/** 转专业招生方案 */
var scheme: TransferScheme = _
/** 院系 */
var depart: Department = _
/** 专业 */
var major: Major = _
/** 方向 */
var direction: Option[Direction] = None
/** 计划人数 */
var planCount: Int = _
/** 报名人数 */
var currentCount: Int = _
/** 负责联络的老师 */
var manager: Option[User] = None
/** 联络方式 */
var contactInfo: Option[String] = None
/** 咨询日期 */
var consultOn: Option[LocalDate] = None
/** 咨询地址 */
var consultAddr: Option[String] = None
/** 考核日期 */
var examOn: Option[LocalDate] = None
/** 考核地址 */
var examAddr: Option[String] = None
/** 面试内容 */
var auditionContent: Option[String] = None
/** 笔试内容 */
var writtenContent: Option[String] = None
/** 面试分数占总分比例 */
var auditionPercent: Int = _
/** 笔试分数占总分比例 */
var writtenPercent: Int = _
}
| openurp/api | std/src/main/scala/org/openurp/std/transfer/model/TransferOption.scala | Scala | lgpl-3.0 | 2,019 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.businessactivities
import javax.inject.Inject
import cats.data.OptionT
import cats.implicits._
import connectors.DataCacheConnector
import controllers.{AmlsBaseController, CommonPlayDependencies}
import forms.{EmptyForm, Form2, InvalidForm, ValidForm}
import models.businessactivities.{BusinessActivities, TransactionTypes}
import play.api.mvc.MessagesControllerComponents
import utils.AuthAction
import views.html.businessactivities.transaction_types
import scala.concurrent.Future
class TransactionTypesController @Inject()(val authAction: AuthAction,
val ds: CommonPlayDependencies,
val cacheConnector: DataCacheConnector,
val cc: MessagesControllerComponents,
transaction_types: transaction_types) extends AmlsBaseController(ds, cc) {
def get(edit: Boolean = false) = authAction.async {
implicit request => {
def form(ba: BusinessActivities) = ba.transactionRecordTypes.fold[Form2[TransactionTypes]](EmptyForm)(Form2(_))
for {
ba <- OptionT(cacheConnector.fetch[BusinessActivities](request.credId, BusinessActivities.key))
} yield Ok(transaction_types(form(ba), edit))
} getOrElse InternalServerError("Cannot fetch business activities")
}
def post(edit: Boolean = false) = authAction.async {
implicit request => {
lazy val redirect = Redirect(if(edit) {
routes.SummaryController.get
} else {
routes.IdentifySuspiciousActivityController.get()
})
Form2[TransactionTypes](request.body) match {
case ValidForm(_, data) => {
for {
bm <- OptionT(cacheConnector.fetch[BusinessActivities](request.credId, BusinessActivities.key))
_ <- OptionT.liftF(cacheConnector.save[BusinessActivities](request.credId, BusinessActivities.key, bm.transactionRecordTypes(data)))
} yield redirect
} getOrElse InternalServerError("Unable to update Business Activities Transaction Types")
case f: InvalidForm =>
Future.successful(BadRequest(transaction_types(f, edit)))
}
}
}
}
| hmrc/amls-frontend | app/controllers/businessactivities/TransactionTypesController.scala | Scala | apache-2.0 | 2,834 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.apache.spark.annotation.DeveloperApi
/**
* ::DeveloperApi::
* The data type for User Defined Types (UDTs).
*
* This interface allows a user to make their own classes more interoperable with SparkSQL;
* e.g., by creating a [[UserDefinedType]] for a class X, it becomes possible to create
* a `DataFrame` which has class X in the schema.
*
* For SparkSQL to recognize UDTs, the UDT must be annotated with
* [[SQLUserDefinedType]].
*
* The conversion via `serialize` occurs when instantiating a `DataFrame` from another RDD.
* The conversion via `deserialize` occurs when reading from a `DataFrame`.
*/
@DeveloperApi
abstract class UserDefinedType[UserType] extends DataType with Serializable {
/** Underlying storage type for this UDT */
def sqlType: DataType
/** Paired Python UDT class, if exists. */
def pyUDT: String = null
/**
* Convert the user type to a SQL datum
*
* TODO: Can we make this take obj: UserType? The issue is in
* CatalystTypeConverters.convertToCatalyst, where we need to convert Any to UserType.
*/
def serialize(obj: Any): Any
/** Convert a SQL datum to the user type */
def deserialize(datum: Any): UserType
override private[sql] def jsonValue: JValue = {
("type" -> "udt") ~
("class" -> this.getClass.getName) ~
("pyClass" -> pyUDT) ~
("sqlType" -> sqlType.jsonValue)
}
/**
* Class object for the UserType
*/
def userClass: java.lang.Class[UserType]
/**
* The default size of a value of the UserDefinedType is 4096 bytes.
*/
override def defaultSize: Int = 4096
/**
* For UDT, asNullable will not change the nullability of its internal sqlType and just returns
* itself.
*/
private[spark] override def asNullable: UserDefinedType[UserType] = this
}
| andrewor14/iolap | sql/catalyst/src/main/scala/org/apache/spark/sql/types/UserDefinedType.scala | Scala | apache-2.0 | 2,704 |
package com.typesafe.slick.testkit.util
import com.typesafe.config.Config
import java.io._
import java.net.{URL, URLClassLoader}
import java.sql.{Connection, Driver}
import java.util.Properties
import java.util.concurrent.ExecutionException
import java.util.zip.GZIPInputStream
import scala.collection.mutable
import scala.concurrent.{Await, Future, ExecutionContext}
import slick.basic.{BasicProfile, Capability}
import slick.dbio.{NoStream, DBIOAction, DBIO}
import slick.jdbc.{JdbcProfile, ResultSetAction, JdbcDataSource}
import slick.jdbc.GetResult._
import slick.relational.RelationalProfile
import slick.sql.SqlProfile
import slick.util.AsyncExecutor
object TestDB {
object capabilities {
/** Marks a driver which is specially supported by the test kit for plain SQL queries. */
val plainSql = new Capability("test.plainSql")
/** Supports JDBC metadata in general */
val jdbcMeta = new Capability("test.jdbcMeta")
/** Supports JDBC metadata getClientInfoProperties method */
val jdbcMetaGetClientInfoProperties = new Capability("test.jdbcMetaGetClientInfoProperties")
/** Supports JDBC metadata getFunctions method */
val jdbcMetaGetFunctions = new Capability("test.jdbcMetaGetFunctions")
/** Supports JDBC metadata getIndexInfo method */
val jdbcMetaGetIndexInfo = new Capability("test.jdbcMetaGetIndexInfo")
/** Supports all tested transaction isolation levels */
val transactionIsolation = new Capability("test.transactionIsolation")
/** Supports select for update row locking */
val selectForUpdateRowLocking = new Capability("test.selectForUpdateRowLocking")
val all = Set(plainSql, jdbcMeta, jdbcMetaGetClientInfoProperties, jdbcMetaGetFunctions, jdbcMetaGetIndexInfo,
transactionIsolation, selectForUpdateRowLocking)
}
/** Copy a file, expanding it if the source name ends with .gz */
def copy(src: File, dest: File): Unit = {
dest.createNewFile()
val out = new FileOutputStream(dest)
try {
var in: InputStream = new FileInputStream(src)
try {
if(src.getName.endsWith(".gz")) in = new GZIPInputStream(in)
val buf = new Array[Byte](4096)
var cont = true
while(cont) {
val len = in.read(buf)
if(len < 0) cont = false
else out.write(buf, 0, len)
}
} finally in.close()
} finally out.close()
}
/** Delete files in the testDB directory */
def deleteDBFiles(prefix: String): Unit = {
assert(!prefix.isEmpty, "prefix must not be empty")
def deleteRec(f: File): Boolean = {
if(f.isDirectory()) f.listFiles.forall(deleteRec _) && f.delete()
else f.delete()
}
val dir = new File(TestkitConfig.testDir)
if(!dir.isDirectory) throw new IOException("Directory "+TestkitConfig.testDir+" not found")
for(f <- dir.listFiles if f.getName startsWith prefix) {
val p = TestkitConfig.testDir+"/"+f.getName
if(deleteRec(f)) println("[Deleted database file "+p+"]")
else throw new IOException("Couldn't delete database file "+p)
}
}
def mapToProps(m: Map[String, String]) = {
val p = new Properties
if(m ne null)
for((k,v) <- m) if(k.ne(null) && v.ne(null)) p.setProperty(k, v)
p
}
}
/**
* Describes a database against which you can run TestKit tests. It includes
* features such as reading the configuration file, setting up a DB connection,
* removing DB files left over by a test run, etc.
*/
trait TestDB {
type Profile <: BasicProfile
/** The test database name */
val confName: String
/** The test configuration */
lazy val config: Config = TestkitConfig.testConfig(confName)
/** Check if this test database is enabled */
def isEnabled = TestkitConfig.testDBs.map(_.contains(confName)).getOrElse(true)
/** This method is called to clean up before running all tests. */
def cleanUpBefore(): Unit = {}
/** This method is called to clean up after running all tests. It
* defaults to cleanUpBefore(). */
def cleanUpAfter() = cleanUpBefore()
/** The profile for the database */
val profile: Profile
/** Indicates whether the database persists after closing the last connection */
def isPersistent = true
/** This method is called between individual test methods to remove all
* database artifacts that were created by the test. */
def dropUserArtifacts(implicit session: profile.Backend#Session): Unit
/** Create the Database object for this test database configuration */
def createDB(): profile.Backend#Database
/** Indicates whether the database's sessions have shared state. When a
* database is shared but not persistent, Testkit keeps a session open
* to make it persistent. */
def isShared = true
/** The capabilities of the Slick profile, possibly modified for this
* test configuration. */
def capabilities: Set[Capability] = profile.capabilities ++ TestDB.capabilities.all
def confOptionalString(path: String) = if(config.hasPath(path)) Some(config.getString(path)) else None
def confString(path: String) = confOptionalString(path).getOrElse(null)
def confStrings(path: String) = TestkitConfig.getStrings(config, path).getOrElse(Nil)
/** The tests to run for this configuration. */
def testClasses: Seq[Class[_ <: GenericTest[_ >: Null <: TestDB]]] = TestkitConfig.testClasses
}
trait RelationalTestDB extends TestDB {
type Profile <: RelationalProfile
def assertTablesExist(tables: String*): DBIO[Unit]
def assertNotTablesExist(tables: String*): DBIO[Unit]
}
trait SqlTestDB extends RelationalTestDB { type Profile <: SqlProfile }
abstract class JdbcTestDB(val confName: String) extends SqlTestDB {
import profile.api.actionBasedSQLInterpolation
type Profile = JdbcProfile
lazy val database = profile.backend.Database
val jdbcDriver: String
final def getLocalTables(implicit session: profile.Backend#Session) = blockingRunOnSession(ec => localTables(ec))
final def getLocalSequences(implicit session: profile.Backend#Session) = blockingRunOnSession(ec => localSequences(ec))
def canGetLocalTables = true
def localTables(implicit ec: ExecutionContext): DBIO[Vector[String]] =
ResultSetAction[(String,String,String, String)](_.conn.getMetaData().getTables("", "", null, null)).map { ts =>
ts.filter(_._4.toUpperCase == "TABLE").map(_._3).sorted
}
def localSequences(implicit ec: ExecutionContext): DBIO[Vector[String]] =
ResultSetAction[(String,String,String, String)](_.conn.getMetaData().getTables("", "", null, null)).map { ts =>
ts.filter(_._4.toUpperCase == "SEQUENCE").map(_._3).sorted
}
def dropUserArtifacts(implicit session: profile.Backend#Session) = blockingRunOnSession { implicit ec =>
for {
tables <- localTables
sequences <- localSequences
_ <- DBIO.seq((tables.map(t => sqlu"""drop table if exists #${profile.quoteIdentifier(t)} cascade""") ++
sequences.map(t => sqlu"""drop sequence if exists #${profile.quoteIdentifier(t)} cascade""")): _*)
} yield ()
}
def assertTablesExist(tables: String*) =
DBIO.seq(tables.map(t => sql"""select 1 from #${profile.quoteIdentifier(t)} where 1 < 0""".as[Int]): _*)
def assertNotTablesExist(tables: String*) =
DBIO.seq(tables.map(t => sql"""select 1 from #${profile.quoteIdentifier(t)} where 1 < 0""".as[Int].failed): _*)
def createSingleSessionDatabase(implicit session: profile.Backend#Session, executor: AsyncExecutor = AsyncExecutor.default()): profile.Backend#Database = {
val wrappedConn = new DelegateConnection(session.conn) {
override def close(): Unit = ()
}
profile.backend.Database.forSource(new JdbcDataSource {
def createConnection(): Connection = wrappedConn
def close(): Unit = ()
val maxConnections: Option[Int] = Some(1)
}, executor)
}
final def blockingRunOnSession[R](f: ExecutionContext => DBIOAction[R, NoStream, Nothing])(implicit session: profile.Backend#Session): R = {
val ec = new ExecutionContext {
def execute(runnable: Runnable): Unit = runnable.run()
def reportFailure(t: Throwable): Unit = throw t
}
val db = createSingleSessionDatabase(session, new AsyncExecutor {
def executionContext: ExecutionContext = ec
def close(): Unit = ()
})
db.run(f(ec)).value.get.get
}
protected[this] def await[T](f: Future[T]): T =
try Await.result(f, TestkitConfig.asyncTimeout)
catch { case ex: ExecutionException => throw ex.getCause }
}
abstract class InternalJdbcTestDB(confName: String) extends JdbcTestDB(confName) { self =>
val url: String
def createDB(): profile.Backend#Database = database.forURL(url, driver = jdbcDriver)
override def toString = url
}
abstract class ExternalJdbcTestDB(confName: String) extends JdbcTestDB(confName) {
import profile.api.actionBasedSQLInterpolation
val jdbcDriver = confString("driver")
val testDB = confString("testDB")
val create = confStrings("create")
val postCreate = confStrings("postCreate")
val drop = confStrings("drop")
override def toString = confString("testConn.url")
override def isEnabled = super.isEnabled && config.getBoolean("enabled")
override lazy val testClasses: Seq[Class[_ <: GenericTest[_ >: Null <: TestDB]]] =
TestkitConfig.getStrings(config, "testClasses")
.map(_.map(n => Class.forName(n).asInstanceOf[Class[_ <: GenericTest[_ >: Null <: TestDB]]]))
.getOrElse(super.testClasses)
def databaseFor(path: String) = database.forConfig(path, config, loadCustomDriver().getOrElse(null))
override def createDB() = databaseFor("testConn")
override def cleanUpBefore(): Unit = {
if(!drop.isEmpty || !create.isEmpty) {
println("[Creating test database "+this+"]")
await(databaseFor("adminConn").run(
DBIO.seq((drop ++ create).map(s => sqlu"#$s"): _*).withPinnedSession
))
}
if(!postCreate.isEmpty) {
await(createDB().run(
DBIO.seq(postCreate.map(s => sqlu"#$s"): _*).withPinnedSession
))
}
}
override def cleanUpAfter(): Unit = {
if(!drop.isEmpty) {
println("[Dropping test database "+this+"]")
await(databaseFor("adminConn").run(
DBIO.seq(drop.map(s => sqlu"#$s"): _*).withPinnedSession
))
}
}
def loadCustomDriver() = confOptionalString("driverJar").map { jar =>
ExternalTestDB.getCustomDriver(jar, jdbcDriver)
}
}
object ExternalTestDB {
// A cache for custom drivers to avoid excessive reloading and memory leaks
private[this] val driverCache = new mutable.HashMap[(String, String), Driver]()
def getCustomDriver(url: String, driverClass: String) = synchronized {
val sysloader = java.lang.ClassLoader.getSystemClassLoader
val sysclass = classOf[URLClassLoader]
// Add the supplied jar onto the system classpath
// Doing this allows Hikari to initialise the driver, if needed
try {
val method = sysclass.getDeclaredMethod("addURL", classOf[URL])
method.setAccessible(true)
method.invoke(sysloader, new URL(url))
} catch {
case t: Throwable =>
t.printStackTrace()
throw new IOException(s"Error, could not add URL $url to system classloader");
}
driverCache.getOrElseUpdate((url, driverClass),
new URLClassLoader(Array(new URL(url)), getClass.getClassLoader).loadClass(driverClass).newInstance.asInstanceOf[Driver]
)
}
}
| nafg/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/util/TestDB.scala | Scala | bsd-2-clause | 11,400 |
package tastytest
class Overloader {
def foo: Int = 13
def foo(a: String): String = a + a
def foo(a: String, b: Boolean): String = a + b
}
| scala/scala | test/tasty/run/src-3/tastytest/Overloader.scala | Scala | apache-2.0 | 146 |
package de.tototec.sbuild.addons.java
import java.io.File
import java.net.URLClassLoader
import scala.Array.canBuildFrom
import de.tototec.sbuild.CmdlineMonitor
import de.tototec.sbuild.ExecutionFailedException
import de.tototec.sbuild.Logger
import de.tototec.sbuild.Path
import de.tototec.sbuild.Project
import de.tototec.sbuild.RichFile
import de.tototec.sbuild.addons.support.ForkSupport
/**
* Java Compiler Addon.
*
* Use [[de.tototec.sbuild.addons.java.Javac$#apply]] to configure and execute it in one go.
*
*/
object Javac {
/**
* Creates, configures and executes the Javac Addon.
*
* For parameter documentation see the [[Javac]] constructor.
*
* @since 0.4.0
*/
def apply(compilerClasspath: Seq[File] = null,
classpath: Seq[File] = null,
sources: Seq[File] = null,
srcDir: File = null,
srcDirs: Seq[File] = null,
destDir: File = null,
encoding: String = "UTF-8",
deprecation: java.lang.Boolean = null,
verbose: java.lang.Boolean = null,
source: String = null,
target: String = null,
debugInfo: String = null,
fork: Boolean = false,
additionalJavacArgs: Seq[String] = null)(implicit project: Project) =
new Javac(
compilerClasspath = compilerClasspath,
classpath = classpath,
sources = sources,
srcDir = srcDir,
srcDirs = srcDirs,
destDir = destDir,
encoding = encoding,
deprecation = deprecation,
verbose = verbose,
source = source,
target = target,
debugInfo = debugInfo,
fork = fork,
additionalJavacArgs = additionalJavacArgs
).execute
}
/**
* Java Compiler addon.
*
* The compiler can be configured via constructor parameter or `var`s.
* To actually start the compilation use [[Javac#execute]].
*
* To easily configure and execute the compiler in one go, see [[Javac$#apply]].
*
* @since 0.4.0
*
* @constructor
* Creates a new Javac Compiler addon instance.
* All parameters can be omitted and set later.
*
* The source files can be given via multiple parameters, '''sources''', '''srcDir''' and '''srcDirs''', and will be joined.
*
* @param compilerClasspath The classpath which contains the compiler and its dependencies. If not given, the environment variable `JAVA_HOME` will be checked, and if it points to a installed JDK, this one will be used.
* @param classpath The classpath used to load dependencies of the sources.
* @param srcDir A directory containing Java source files.
* @param srcDirs Multiple directories containing Java source files.
* @param sources Source files to be compiled.
* @param destDir The directory, where the compiled class files will be stored. If the directory does not exist, it will be created.
* @param encoding The encoding of the source files.
* @param deprecation Output source locations where deprecated APIs are used.
* @param verbose Output messages about what the compiler is doing.
* @param source Provide source compatibility with specified release.
* @param target Generate class files for the specified VM version.
* @param debugInfo If specified generate debugging info. Supported values: none, lines, vars, source, all.
* @param fork Run the compile in a separate process (if `true`).
* @param additionalJavacArgs Additional arguments directly passed to the Java compiler. Refer to the javac manual or inspect `javac -help` output.
*
*/
class Javac(
var compilerClasspath: Seq[File] = null,
var classpath: Seq[File] = null,
var sources: Seq[File] = null,
var srcDir: File = null,
var srcDirs: Seq[File] = null,
var destDir: File = null,
var encoding: String = "UTF-8",
var deprecation: java.lang.Boolean = null,
var verbose: java.lang.Boolean = null,
var source: String = null,
var target: String = null,
var debugInfo: String = null,
var fork: Boolean = false,
var additionalJavacArgs: Seq[String] = null)(implicit project: Project) {
private[this] val log = Logger[Javac]
val javacClassName = "com.sun.tools.javac.Main"
override def toString(): String = getClass.getSimpleName +
"(compilerClasspath=" + compilerClasspath +
",classpath=" + classpath +
",sources=" + sources +
",srcDir=" + srcDir +
",srdDirs=" + srcDirs +
",destDir=" + destDir +
",encoding=" + encoding +
",deprecation=" + deprecation +
",verbose=" + verbose +
",source=" + source +
",target=" + target +
",debugInfo=" + debugInfo +
",fork=" + fork +
",additionalJavacArgs=" + additionalJavacArgs +
")"
/**
* Execute the Java compiler.
*/
def execute {
log.debug("About to execute " + this)
var args = Array[String]()
if (classpath != null) {
val cPath = ForkSupport.pathAsArg(classpath)
log.debug("Using classpath: " + cPath)
args ++= Array("-classpath", cPath)
}
if (destDir != null) args ++= Array("-d", destDir.getAbsolutePath)
if (encoding != null) args ++= Array("-encoding", encoding)
if (deprecation != null && deprecation.booleanValue) args ++= Array("-deprecation")
if (verbose != null && verbose.booleanValue) args ++= Array("-verbose")
if (source != null) args ++= Array("-source", source)
if (target != null) args ++= Array("-target", target)
if (debugInfo != null) {
debugInfo.trim match {
case "" | "all" => args ++= Array("-g")
case arg => args ++= Array("-g:" + arg)
}
}
if (additionalJavacArgs != null && !additionalJavacArgs.isEmpty) args ++= additionalJavacArgs
var allSrcDirs = Seq[File]()
if (srcDir != null) allSrcDirs ++= Seq(srcDir)
if (srcDirs != null) allSrcDirs ++= srcDirs
require(!allSrcDirs.isEmpty || !sources.isEmpty, "No source path(s) and no sources set.")
val sourceFiles: Seq[File] =
(if (sources == null) Seq() else sources) ++
allSrcDirs.flatMap { dir =>
log.debug("Search files in dir: " + dir)
val files = RichFile.listFilesRecursive(dir, """.*\\.java$""".r)
log.debug("Found files: " + files.mkString(", "))
files
}
if (!sourceFiles.isEmpty) {
val absSourceFiles = sourceFiles.map(f => f.getAbsolutePath)
log.debug("Found source files: " + absSourceFiles.mkString(", "))
args ++= absSourceFiles
}
project.monitor.info(CmdlineMonitor.Default, s"Compiling ${sourceFiles.size} Java source files to ${destDir}")
if (destDir != null && !sourceFiles.isEmpty) destDir.mkdirs
if (compilerClasspath == null || compilerClasspath == Seq()) {
val javaHome = System.getenv("JAVA_HOME")
if (javaHome != null) compilerClasspath = Seq(Path(javaHome, "lib", "tools.jar"))
else compilerClasspath = Seq()
}
val result = try {
if (fork) compileExternal(args)
else compileInternal(args)
} catch {
case e: ClassNotFoundException if e.getMessage == javacClassName =>
val os = System.getProperty("os.name").toLowerCase
val extraMsg =
if (os.indexOf("win") >= 0) "On Windows systems, the content of the JAVA_HOME environment variable lokks typically like this: JAVA_HOME=C:\\\\Program Files\\\\Java\\\\jdk1.7.0_21"
else if (os.indexOf("mac") >= 0)
" On Mac OSX, the content of the JAVA_HOME environment variable looks typically like this: JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.7.0_21.jdk/Contents/Home." +
" An common trick is, to export it with \\"export JAVA_HOME=$(/usr/libexec/java_home)\\"."
else ""
val ex = new ExecutionFailedException("Could not find the compiler \\"" + javacClassName + "\\". Either specify a valid compilerClasspath or make sure, the JAVA_HOME environment varibale is set properly." + extraMsg)
ex.buildScript = Some(project.projectFile)
throw ex
}
if (result != 0) {
val ex = new ExecutionFailedException("Compile Errors. See compiler output.")
ex.buildScript = Some(project.projectFile)
throw ex
}
}
protected def compileExternal(args: Array[String]): Int =
ForkSupport.runJavaAndWait(compilerClasspath, Array(javacClassName) ++ args)
protected def compileInternal(args: Array[String]): Int = {
val compilerClassLoader = compilerClasspath match {
case Seq() =>
classOf[Javac].getClassLoader
case cp =>
val cl = new URLClassLoader(cp.map { f => f.toURI().toURL() }.toArray, classOf[Javac].getClassLoader)
log.debug("Using additional compiler classpath: " + cl.getURLs().mkString(", "))
cl
}
val arrayInstance = java.lang.reflect.Array.newInstance(classOf[String], args.size)
0.to(args.size - 1).foreach { i =>
java.lang.reflect.Array.set(arrayInstance, i, args(i))
}
val arrayClass = arrayInstance.getClass
val compilerClass = compilerClassLoader.loadClass(javacClassName)
val compiler = compilerClass.newInstance
val compileMethod = compilerClass.getMethod("compile", Array(arrayClass): _*)
compileMethod.invoke(compiler, arrayInstance).asInstanceOf[java.lang.Integer].intValue
}
}
| SBuild-org/sbuild | de.tototec.sbuild.addons/src/main/scala/de/tototec/sbuild/addons/java/Javac.scala | Scala | apache-2.0 | 9,223 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top.params
import _root_.org.jetbrains.plugins.scala.lang.parser.parsing.expressions.{Annotation, Expr}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.base.Modifier
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.types.ParamType
/**
* @author Alexander Podkhalyuzin
* Date: 08.02.2008
*/
/*
* ClassParam ::= {Annotation} [{Modifier} ('val' | 'var')] id ':' ParamType ['=' Expr]
*/
object ClassParam extends ClassParam {
override protected def expr = Expr
override protected def annotation = Annotation
override protected def paramType = ParamType
}
trait ClassParam {
protected def expr: Expr
protected def annotation: Annotation
protected def paramType: ParamType
def parse(builder: ScalaPsiBuilder): Boolean = {
val classParamMarker = builder.mark
val annotationsMarker = builder.mark
while (annotation.parse(builder)) {}
annotationsMarker.done(ScalaElementTypes.ANNOTATIONS)
//parse modifiers
val modifierMarker = builder.mark
var isModifier = false
while (Modifier.parse(builder)) {
isModifier = true
}
modifierMarker.done(ScalaElementTypes.MODIFIERS)
//Look for var or val
builder.getTokenType match {
case ScalaTokenTypes.kVAR |
ScalaTokenTypes.kVAL =>
builder.advanceLexer() //Let's ate this!
case _ =>
if (isModifier) {
builder error ScalaBundle.message("val.var.expected")
}
}
//Look for identifier
builder.getTokenType match {
case ScalaTokenTypes.tIDENTIFIER =>
builder.advanceLexer() //Ate identifier
case _ =>
classParamMarker.rollbackTo()
return false
}
//Try to parse tale
builder.getTokenType match {
case ScalaTokenTypes.tCOLON =>
builder.advanceLexer() //Ate ':'
if (!paramType.parse(builder)) {
builder.error(ScalaBundle.message("parameter.type.expected"))
}
case _ =>
builder.error(ScalaBundle.message("colon.expected"))
}
//default param
builder.getTokenType match {
case ScalaTokenTypes.tASSIGN =>
builder.advanceLexer() //Ate '='
if (!expr.parse(builder)) {
builder error ScalaBundle.message("wrong.expression")
}
case _ =>
}
classParamMarker.done(ScalaElementTypes.CLASS_PARAM)
true
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/top/params/ClassParam.scala | Scala | apache-2.0 | 2,573 |
package gov.uk.dvla.vehicles.dispose.runner
import cucumber.api.CucumberOptions
import cucumber.api.junit.Cucumber
import org.junit.runner.RunWith
@RunWith(classOf[Cucumber])
@CucumberOptions(
features = Array("acceptance-tests/src/test/resources/ChangeTraderDetails.feature"),
glue = Array("gov.uk.dvla.vehicles.dispose.stepdefs"),
tags = Array("@working")
)
class ChangeTraderDetails {
}
| dvla/vehicles-online | acceptance-tests/src/test/scala/gov/uk/dvla/vehicles/dispose/runner/ChangeTraderDetails.scala | Scala | mit | 399 |
package io.citrine.lolo.trees.classification
import java.io.{File, FileOutputStream, ObjectOutputStream}
import io.citrine.lolo.TestUtils
import io.citrine.lolo.stats.functions.Friedman
import org.junit.Test
import org.scalatest.Assertions._
import scala.util.Random
/**
* Created by maxhutch on 12/2/16.
*/
@Test
class ClassificationTreeTest {
/**
* Trivial models with no splits should have finite feature importance.
*/
@Test
def testFeatureImportanceNaN(): Unit = {
val X = Vector.fill(100) {
val input = Vector.fill(10)(1.0)
(input, 2.0)
}
val DTLearner = new ClassificationTreeLearner()
val DTMeta = DTLearner.train(X)
val DT = DTMeta.getModel()
assert(DTMeta.getFeatureImportance().get.forall(v => !v.isNaN))
}
@Test
def testBinary(): Unit = {
val rnd = new Random(seed = 0L)
assert(rnd.nextLong() == -4962768465676381896L)
val trainingData = TestUtils.binTrainingData(
TestUtils.generateTrainingData(2048, 12, noise = 0.1, function = Friedman.friedmanSilverman),
responseBins = Some(2)
)
val DTLearner = ClassificationTreeLearner()
val DTMeta = DTLearner.train(trainingData)
val DT = DTMeta.getModel()
/* We should be able to memorize the inputs */
val output = DT.transform(trainingData.map(_._1))
trainingData.zip(output.getExpected()).foreach {
case ((x, a), p) =>
assert(a == p, s"${a} != ${p} for ${x}")
}
assert(output.getGradient().isEmpty)
output.getDepth().foreach(d => assert(d > 0))
/* The first features should be the most important */
val importances = DTMeta.getFeatureImportance().get
assert(importances.slice(0, 5).min > importances.slice(5, importances.size).max)
}
/**
* Test a larger case and time it as a benchmark guideline
*/
@Test
def longerTest(): Unit = {
val rnd = new Random(seed = 0L)
assert(rnd.nextLong() == -4962768465676381896L)
val trainingData = TestUtils.binTrainingData(
TestUtils.generateTrainingData(1024, 12, noise = 0.1, function = Friedman.friedmanSilverman),
responseBins = Some(16)
)
val DTLearner = ClassificationTreeLearner()
val N = 100
val start = System.nanoTime()
val DTMeta = DTLearner.train(trainingData)
val DT = DTMeta.getModel()
(0 until N).map(i => DTLearner.train(trainingData))
val duration = (System.nanoTime() - start) / 1.0e9
println(s"Training large case took ${duration / N} s")
/* We should be able to memorize the inputs */
val output = DT.transform(trainingData.map(_._1))
trainingData.zip(output.getExpected()).foreach {
case ((x, a), p) =>
assert(a == p, s"${a} != ${p} for ${x}")
}
assert(output.getGradient().isEmpty)
output.getDepth().foreach(d => assert(d > 4 && d < 17, s"Depth is ${d}"))
/* The first feature should be the most important */
val importances = DTMeta.getFeatureImportance().get
assert(importances.slice(0, 5).min > importances.slice(5, importances.size).max)
}
/**
* Test a larger case and time it as a benchmark guideline
*/
@Test
def testCategorical(): Unit = {
val trainingData = TestUtils.binTrainingData(
TestUtils.generateTrainingData(1024, 12, noise = 0.1, function = Friedman.friedmanSilverman),
inputBins = Seq((0, 8)),
responseBins = Some(16)
)
val DTLearner = ClassificationTreeLearner()
val N = 100
val start = System.nanoTime()
val DT = DTLearner.train(trainingData).getModel()
(0 until N).map(i => DTLearner.train(trainingData))
val duration = (System.nanoTime() - start) / 1.0e9
println(s"Training large case took ${duration / N} s")
/* We should be able to memorize the inputs */
val output = DT.transform(trainingData.map(_._1))
trainingData.zip(output.getExpected()).foreach {
case ((x, a), p) =>
assert(a == p)
}
assert(output.getGradient().isEmpty)
output.getDepth().foreach(d => assert(d > 3 && d < 18, s"Depth is ${d}"))
}
}
/** Companion driver */
object ClassificationTreeTest {
/**
* Test driver
*
* @param argv args
*/
def main(argv: Array[String]): Unit = {
new ClassificationTreeTest().testBinary()
new ClassificationTreeTest().longerTest()
new ClassificationTreeTest().testCategorical()
}
}
| CitrineInformatics/lolo | src/test/scala/io/citrine/lolo/trees/classification/ClassificationTreeTest.scala | Scala | apache-2.0 | 4,354 |
package com.monovore.coast.wire
import java.io._
import scala.language.implicitConversions
/**
* Manages reading and writing data to Java's standard Data{Input,Output} classes.
*/
trait Serializer[A] extends Serializable {
def toArray(value: A): Array[Byte]
def fromArray(bytes: Array[Byte]): A
}
object Serializer {
def fromArray[A](input: Array[Byte])(implicit reader: Serializer[A]): A = reader.fromArray(input)
def toArray[A](value: A)(implicit writer: Serializer[A]): Array[Byte] = writer.toArray(value)
def fromJavaSerialization[A] = new Serializer[A] {
override def toArray(value: A): Array[Byte] = {
val baos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(baos)
oos.writeObject(value)
oos.close()
baos.toByteArray
}
override def fromArray(bytes: Array[Byte]): A = {
val bais = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bais)
ois.readObject().asInstanceOf[A]
}
}
}
| bkirwi/coast | core/src/main/scala/com/monovore/coast/wire/Serializer.scala | Scala | apache-2.0 | 997 |
package org.jetbrains.plugins.scala.lang.refactoring.introduceVariable
import com.intellij.openapi.util.TextRange
import com.intellij.psi.PsiFile
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAlias
/**
* Created by Kate Ustyuzhanina
* on 9/7/15
*/
class IntroduceTypeAliasData {
var currentScope: ScopeItem = null
var initialTypeElement: TextRange = null
var possibleScopes: Array[ScopeItem] = null
var typeAliasInfo: (PsiFile, TextRange) = null
var isCallModalDialogInProgress: Boolean = false
def setTypeAlias(inTypeAlias: ScTypeAlias): Unit = {
if (inTypeAlias != null) {
typeAliasInfo = (inTypeAlias.getContainingFile, inTypeAlias.getTextRange)
}
}
def clearData(): Unit = {
currentScope = null
initialTypeElement = null
possibleScopes = null
typeAliasInfo = null
isCallModalDialogInProgress = false
}
def isData: Boolean = {
currentScope != null || initialTypeElement != null || possibleScopes != null || typeAliasInfo != null
}
def addScopeElement(item: ScopeItem): Unit = {
currentScope = item
}
def setInintialInfo(textRange: TextRange): Unit = {
if (initialTypeElement == null) {
initialTypeElement = textRange
}
}
def setPossibleScopes(inPossibleScopes: Array[ScopeItem]): Unit = {
possibleScopes = inPossibleScopes
}
def getNamedElement: ScTypeAlias = {
val element = PsiTreeUtil.findElementOfClassAtOffset(typeAliasInfo._1,
typeAliasInfo._2.getStartOffset, classOf[ScTypeAlias], false)
element match {
case typeAlias: ScTypeAlias =>
typeAlias
case _ => null
}
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/refactoring/introduceVariable/IntroduceTypeAliasData.scala | Scala | apache-2.0 | 1,685 |
package beam.utils.scenario
trait ScenarioSource {
def getPersons: Iterable[PersonInfo]
def getPlans: Iterable[PlanElement]
def getHousehold(): Iterable[HouseholdInfo]
def getVehicles: Iterable[VehicleInfo] = Iterable()
}
| colinsheppard/beam | src/main/scala/beam/utils/scenario/ScenarioSource.scala | Scala | gpl-3.0 | 231 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.classification.LogisticRegressionSuite._
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.util.DefaultReadWriteTest
import org.apache.spark.ml.util.MLTestingUtils
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.linalg.{Vectors => OldVectors}
import org.apache.spark.mllib.regression.{LabeledPoint => OldLabeledPoint}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{Dataset, Row}
class MultilayerPerceptronClassifierSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
@transient var dataset: Dataset[_] = _
override def beforeAll(): Unit = {
super.beforeAll()
dataset = Seq(
(Vectors.dense(0.0, 0.0), 0.0),
(Vectors.dense(0.0, 1.0), 1.0),
(Vectors.dense(1.0, 0.0), 1.0),
(Vectors.dense(1.0, 1.0), 0.0)
).toDF("features", "label")
}
test("Input Validation") {
val mlpc = new MultilayerPerceptronClassifier()
intercept[IllegalArgumentException] {
mlpc.setLayers(Array.empty[Int])
}
intercept[IllegalArgumentException] {
mlpc.setLayers(Array[Int](1))
}
intercept[IllegalArgumentException] {
mlpc.setLayers(Array[Int](0, 1))
}
intercept[IllegalArgumentException] {
mlpc.setLayers(Array[Int](1, 0))
}
mlpc.setLayers(Array[Int](1, 1))
}
test("XOR function learning as binary classification problem with two outputs.") {
val layers = Array[Int](2, 5, 2)
val trainer = new MultilayerPerceptronClassifier()
.setLayers(layers)
.setBlockSize(1)
.setSeed(123L)
.setMaxIter(100)
.setSolver("l-bfgs")
val model = trainer.fit(dataset)
val result = model.transform(dataset)
val predictionAndLabels = result.select("prediction", "label").collect()
predictionAndLabels.foreach { case Row(p: Double, l: Double) =>
assert(p == l)
}
}
test("Test setWeights by training restart") {
val dataFrame = Seq(
(Vectors.dense(0.0, 0.0), 0.0),
(Vectors.dense(0.0, 1.0), 1.0),
(Vectors.dense(1.0, 0.0), 1.0),
(Vectors.dense(1.0, 1.0), 0.0)
).toDF("features", "label")
val layers = Array[Int](2, 5, 2)
val trainer = new MultilayerPerceptronClassifier()
.setLayers(layers)
.setBlockSize(1)
.setSeed(12L)
.setMaxIter(1)
.setTol(1e-6)
val initialWeights = trainer.fit(dataFrame).weights
trainer.setInitialWeights(initialWeights.copy)
val weights1 = trainer.fit(dataFrame).weights
trainer.setInitialWeights(initialWeights.copy)
val weights2 = trainer.fit(dataFrame).weights
assert(weights1 ~== weights2 absTol 10e-5,
"Training should produce the same weights given equal initial weights and number of steps")
}
test("3 class classification with 2 hidden layers") {
val nPoints = 1000
// The following coefficients are taken from OneVsRestSuite.scala
// they represent 3-class iris dataset
val coefficients = Array(
-0.57997, 0.912083, -0.371077, -0.819866, 2.688191,
-0.16624, -0.84355, -0.048509, -0.301789, 4.170682)
val xMean = Array(5.843, 3.057, 3.758, 1.199)
val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
// the input seed is somewhat magic, to make this test pass
val data = generateMultinomialLogisticInput(
coefficients, xMean, xVariance, true, nPoints, 1).toDS()
val dataFrame = data.toDF("label", "features")
val numClasses = 3
val numIterations = 100
val layers = Array[Int](4, 5, 4, numClasses)
val trainer = new MultilayerPerceptronClassifier()
.setLayers(layers)
.setBlockSize(1)
.setSeed(11L) // currently this seed is ignored
.setMaxIter(numIterations)
val model = trainer.fit(dataFrame)
val numFeatures = dataFrame.select("features").first().getAs[Vector](0).size
assert(model.numFeatures === numFeatures)
val mlpPredictionAndLabels = model.transform(dataFrame).select("prediction", "label").rdd.map {
case Row(p: Double, l: Double) => (p, l)
}
// train multinomial logistic regression
val lr = new LogisticRegressionWithLBFGS()
.setIntercept(true)
.setNumClasses(numClasses)
lr.optimizer.setRegParam(0.0)
.setNumIterations(numIterations)
val lrModel = lr.run(data.rdd.map(OldLabeledPoint.fromML))
val lrPredictionAndLabels =
lrModel.predict(data.rdd.map(p => OldVectors.fromML(p.features))).zip(data.rdd.map(_.label))
// MLP's predictions should not differ a lot from LR's.
val lrMetrics = new MulticlassMetrics(lrPredictionAndLabels)
val mlpMetrics = new MulticlassMetrics(mlpPredictionAndLabels)
assert(mlpMetrics.confusionMatrix.asML ~== lrMetrics.confusionMatrix.asML absTol 100)
}
test("read/write: MultilayerPerceptronClassifier") {
val mlp = new MultilayerPerceptronClassifier()
.setLayers(Array(2, 3, 2))
.setMaxIter(5)
.setBlockSize(2)
.setSeed(42)
.setTol(0.1)
.setFeaturesCol("myFeatures")
.setLabelCol("myLabel")
.setPredictionCol("myPrediction")
testDefaultReadWrite(mlp, testParams = true)
}
test("read/write: MultilayerPerceptronClassificationModel") {
val mlp = new MultilayerPerceptronClassifier().setLayers(Array(2, 3, 2)).setMaxIter(5)
val mlpModel = mlp.fit(dataset)
val newMlpModel = testDefaultReadWrite(mlpModel, testParams = true)
assert(newMlpModel.layers === mlpModel.layers)
assert(newMlpModel.weights === mlpModel.weights)
}
test("should support all NumericType labels and not support other types") {
val layers = Array(3, 2)
val mpc = new MultilayerPerceptronClassifier().setLayers(layers).setMaxIter(1)
MLTestingUtils.checkNumericTypes[
MultilayerPerceptronClassificationModel, MultilayerPerceptronClassifier](
mpc, spark) { (expected, actual) =>
assert(expected.layers === actual.layers)
assert(expected.weights === actual.weights)
}
}
}
| Panos-Bletsos/spark-cost-model-optimizer | mllib/src/test/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifierSuite.scala | Scala | apache-2.0 | 7,071 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5.handler
import akka.actor.{ActorRef, ActorSelection, ActorSystem, Props}
import akka.testkit.{TestProbe, ImplicitSender, TestKit}
import org.apache.toree.kernel.protocol.v5.content.KernelInfoReply
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.kernel.protocol.v5.{SystemActorType, Header, KernelMessage}
import org.mockito.AdditionalMatchers.{not => mockNot}
import org.mockito.Matchers.{eq => mockEq}
import com.typesafe.config.ConfigFactory
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSpecLike, Matchers}
import play.api.libs.json.Json
import scala.concurrent.duration._
object KernelInfoRequestHandlerSpec {
val config = """
akka {
loglevel = "WARNING"
}"""
}
class KernelInfoRequestHandlerSpec extends TestKit(
ActorSystem("KernelInfoRequestHandlerSpec",
ConfigFactory.parseString(KernelInfoRequestHandlerSpec.config))
) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
val actorLoader: ActorLoader = mock[ActorLoader]
val actor = system.actorOf(Props(classOf[KernelInfoRequestHandler], actorLoader))
val relayProbe : TestProbe = TestProbe()
val relaySelection : ActorSelection =
system.actorSelection(relayProbe.ref.path)
when(actorLoader.load(SystemActorType.KernelMessageRelay))
.thenReturn(relaySelection)
when(actorLoader.load(mockNot(mockEq(SystemActorType.KernelMessageRelay))))
.thenReturn(system.actorSelection(""))
val header = Header("","","","","")
val kernelMessage = new KernelMessage(
Seq[String](), "test message", header, header, Map[String, String](), "{}"
)
describe("Kernel Info Request Handler") {
it("should return a KernelMessage containing kernel info response") {
actor ! kernelMessage
val reply = relayProbe.receiveOne(1.seconds).asInstanceOf[KernelMessage]
val kernelInfo = Json.parse(reply.contentString).as[KernelInfoReply]
kernelInfo.implementation should be ("spark")
}
}
}
| asorianostratio/incubator-toree | kernel/src/test/scala/org/apache/toree/kernel/protocol/v5/handler/KernelInfoRequestHandlerSpec.scala | Scala | apache-2.0 | 2,877 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.testkit
import java.lang.management.ManagementFactory
import org.junit.Assert.{assertEquals, assertTrue, fail}
import scala.annotation.{ nowarn, tailrec }
import scala.reflect.{ClassTag, classTag}
object AllocationTest {
val threadMXBean = ManagementFactory.getThreadMXBean.asInstanceOf[com.sun.management.ThreadMXBean]
assertTrue(threadMXBean.isThreadAllocatedMemorySupported)
threadMXBean.setThreadAllocatedMemoryEnabled(true)
@nowarn("cat=lint-nullary-unit")
private object coster extends AllocationTest {
def byte = 99.toByte
def short = 9999.toShort
def int = 100000000
def long = 100000000000000L
def boolean = true
def char = 's'
def float = 123456F
def double = 123456D
def unit = ()
def sizeOf[T <: AnyRef](fn: => T): T = fn
}
lazy val costObject = costOf(coster, "Object")
lazy val costByte = costOf(coster.byte, "Byte")
lazy val costShort = costOf(coster.short, "Short")
lazy val costInt = costOf(coster.int, "Int")
lazy val costLong = costOf(coster.long, "Long")
lazy val costBoolean = costOf(coster.boolean, "Boolean")
lazy val costChar = costOf(coster.char, "Char")
lazy val costFloat = costOf(coster.float, "Float")
lazy val costDouble = costOf(coster.double, "Double")
lazy val costUnit = costOf(coster.unit, "Unit")
def sizeOf[T <: AnyRef](fn: => T, msg: String, ignoreEqualCheck: Boolean = false): Long = {
val size = coster.calcAllocationInfo(coster.sizeOf(fn), costObject, msg, ignoreEqualCheck).min
println(s"size of $msg = $size")
size
}
private def costOf[T](fn: => T, tpe: String): Long = {
val cost = coster.calcAllocationInfo(fn, 0, "", false).min
println(s"cost of tracking allocations - cost of $tpe = $cost")
cost
}
}
trait AllocationTest {
import AllocationTest._
/** Asserts whether it's expected for `a == b` to allocate memory. */
def nonAllocatingEqual(expected: Boolean, a: AnyRef, b: AnyRef): Unit = {
assertEquals(expected, nonAllocating(Boolean.box(a == b)))
}
/** Asserts that the execution of `fn` does not allocate any memory. */
def nonAllocating[T: ClassTag](fn: => T, text: String = "", ignoreEqualCheck: Boolean = false)(implicit execution: AllocationExecution = AllocationExecution()): T = {
onlyAllocates(0, text, ignoreEqualCheck)(fn)
}
private def showAllocations(allocations: List[Long]): String = allocations match {
case Nil => ""
case a :: tail =>
val sb = new StringBuilder
def append(a: Long, count: Int) = sb.append(s" allocation $a ($count times)\n")
@tailrec def loop(allocations: List[Long], last: Long, count: Int): String = allocations match {
case Nil => append(last, count).result()
case a :: tail if a != last => append(a, count); loop(tail, a, 1)
case a :: tail => loop(tail, a, count + 1)
}
loop(tail, a, 1)
}
/** Asserts that the execution of `fn` allocates `size` bytes or less. */
def onlyAllocates[T: ClassTag](size: Long, text: String = "", ignoreEqualCheck: Boolean = false)(fn: => T)(implicit execution: AllocationExecution = AllocationExecution()): T = {
val result = allocationInfo(fn, text, ignoreEqualCheck)
if (result.min > size) failTest(size, text, result)
result.result
}
/** Asserts that the execution of `fn` allocates exactly `size` bytes. */
def exactAllocates[T: ClassTag](size: Long, text: String = "", ignoreEqualCheck: Boolean = false)(fn: => T)(implicit execution: AllocationExecution = AllocationExecution()): T = {
val result = allocationInfo(fn, text, ignoreEqualCheck)
if (result.min != size) failTest(size, text, result)
result.result
}
private def failTest[T](size: Long, text: String, result: AllocationInfo[T]) = {
val extraText = if (text.isEmpty) "" else s" -- $text"
def show(x: T) = if (x == null) "null" else s"$x (${x.getClass})"
fail(s"""allocating min = ${result.min} allowed = $size$extraText
| result = ${show(result.result)}
|${showAllocations(result.allocations.toList)}""".stripMargin)
}
def allocationInfo[T: ClassTag](fn: => T, text: String = "", ignoreEqualCheck: Boolean = false)(implicit execution: AllocationExecution = AllocationExecution()): AllocationInfo[T] = {
val cost = classTag[T].runtimeClass match {
case cls if cls == classOf[Byte] => costByte
case cls if cls == classOf[Short] => costShort
case cls if cls == classOf[Int] => costInt
case cls if cls == classOf[Long] => costLong
case cls if cls == classOf[Boolean] => costBoolean
case cls if cls == classOf[Char] => costChar
case cls if cls == classOf[Float] => costFloat
case cls if cls == classOf[Double] => costDouble
case cls if cls == classOf[Unit] => costUnit
case cls if cls.isPrimitive => sys.error(s"Unexpected primitive $cls")
case _ => costObject
}
calcAllocationInfo(fn, cost, text, ignoreEqualCheck)
}
/** Calculates memory allocation exempting `cost` expected bytes (e.g. java.lang.Object overhead) */
private[AllocationTest] def calcAllocationInfo[T](fn: => T, cost: Long, text: String, ignoreEqualCheck: Boolean)(implicit execution: AllocationExecution = AllocationExecution()): AllocationInfo[T] = {
val expected = fn
val extraText = if (text.isEmpty) "" else s" -- $text"
val id = Thread.currentThread().getId
val counts = new Array[Long](execution.executionCount)
@tailrec def warmupLoop(i: Int): Unit = if (i < execution.warmupCount) {
val actual = fn
if (!ignoreEqualCheck && actual != expected)
assertEquals(s"warmup at index $i $expected $actual$extraText", expected, actual)
warmupLoop(i + 1)
}
@tailrec def testLoop(i: Int): Unit = if (i < execution.executionCount) {
val before = threadMXBean.getThreadAllocatedBytes(id)
val actual = fn
val after = threadMXBean.getThreadAllocatedBytes(id)
counts(i) = after - cost - before
if (!ignoreEqualCheck && actual != expected)
assertEquals(s"at index $i $expected $actual$extraText", expected, actual)
testLoop(i + 1)
}
warmupLoop(0)
testLoop(0)
AllocationInfo(expected, counts)
}
}
case class AllocationExecution(executionCount: Int = 1000, warmupCount: Int = 1000)
case class AllocationInfo[T](result: T, allocations: Array[Long]) {
def min: Long = allocations.min
}
| scala/scala | src/testkit/scala/tools/testkit/AllocationTest.scala | Scala | apache-2.0 | 6,873 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ly.stealth.latencies
import java.util.{Properties, UUID}
import _root_.kafka.serializer.DefaultDecoder
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql.CassandraConnector
import io.confluent.kafka.serializers.KafkaAvroSerializer
import org.apache.avro.Schema
import org.apache.avro.generic.GenericData.Record
import org.apache.avro.generic.{GenericData, GenericRecord}
import org.apache.avro.util.Utf8
import org.apache.kafka.clients.producer.ProducerConfig._
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka._
object Main extends App {
val parser = new scopt.OptionParser[AppConfig]("spark-analysis") {
head("Latencies calculation job", "1.0")
opt[String]("topics") unbounded() required() action { (value, config) =>
config.copy(topic = value)
} text ("Comma separated list of topics to read data from")
opt[String]("zookeeper") unbounded() required() action { (value, config) =>
config.copy(zookeeper = value)
} text ("Zookeeper connection string - host:port")
opt[String]("broker.list") unbounded() required() action { (value, config) =>
config.copy(brokerList = value)
} text ("Comma separated string of host:port")
opt[String]("schema.registry.url") unbounded() required() action { (value, config) =>
config.copy(schemaRegistryUrl = value)
} text ("Schema registry URL")
opt[Int]("partitions") unbounded() required() action { (value, config) =>
config.copy(partitions = value)
} text ("Initial amount of RDD partitions")
checkConfig { c =>
if (c.topic.isEmpty || c.brokerList.isEmpty) {
failure("You haven't provided all required parameters")
} else {
success
}
}
}
val appConfig = parser.parse(args, AppConfig()) match {
case Some(c) => c
case None => sys.exit(1)
}
val sparkConfig = new SparkConf().setAppName("spark-analysis").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
val ssc = new StreamingContext(sparkConfig, Seconds(1))
ssc.checkpoint("spark-analysis")
val cassandraConnector = CassandraConnector(sparkConfig)
cassandraConnector.withSessionDo(session => {
session.execute("CREATE KEYSPACE IF NOT EXISTS spark_analysis WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1}")
session.execute("CREATE TABLE IF NOT EXISTS spark_analysis.events(framework text, second bigint, message_size bigint, eventname text, latency counter, received_count counter, sent_count counter, PRIMARY KEY(framework, second, message_size, eventname)) WITH CLUSTERING ORDER BY (second DESC)")
})
val consumerConfig = Map(
"group.id" -> "spark-analysis-%s".format(UUID.randomUUID.toString),
"zookeeper.connect" -> appConfig.zookeeper,
"auto.offset.reset" -> "largest",
"schema.registry.url" -> appConfig.schemaRegistryUrl)
val producerConfig = new Properties()
producerConfig.put(BOOTSTRAP_SERVERS_CONFIG, appConfig.brokerList)
producerConfig.put(KEY_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer])
producerConfig.put(VALUE_SERIALIZER_CLASS_CONFIG, classOf[KafkaAvroSerializer])
producerConfig.put("schema.registry.url", appConfig.schemaRegistryUrl)
start(ssc, consumerConfig, producerConfig, appConfig.topic, appConfig.partitions)
ssc.start()
ssc.awaitTermination()
def start(ssc: StreamingContext, consumerConfig: Map[String, String], producerConfig: Properties, topics: String, partitions: Int) = {
val topicMap = topics.split(",").map(_ -> partitions).toMap
val latencyStream = KafkaUtils.createStream[Array[Byte], SchemaAndData, DefaultDecoder, AvroDecoder](ssc, consumerConfig, topicMap, StorageLevel.MEMORY_ONLY).map(value => {
val record = value._2.deserialize().asInstanceOf[GenericRecord]
import scala.collection.JavaConversions._
val timings = record.get("timings").asInstanceOf[GenericData.Array[Record]]
val topic = record.get("tag").asInstanceOf[java.util.Map[Utf8, Utf8]].get(new Utf8("topic")).toString
(timings.head.get("eventName").asInstanceOf[Utf8].toString + "-" + timings.last.get("eventName").asInstanceOf[Utf8].toString,
timings.head.get("value").asInstanceOf[Long] / 1000000000,
timings.last.get("value").asInstanceOf[Long] / 1000000000,
(timings.last.get("value").asInstanceOf[Long] - timings.head.get("value").asInstanceOf[Long]) / 1000000,
record.get("source").asInstanceOf[Utf8].toString,
record.get("size").asInstanceOf[Long],
topic)
}).transform( rdd => {
rdd.groupBy(entry => (entry._1, entry._3, entry._5, entry._6, entry._7))
}.map( entry => {
val key = entry._1
val values = entry._2
val receivedValuesCount = values.count(item => item._2 == item._3).toLong
(key._3, key._2, key._4, key._1, values.map(_._4).sum.toLong, receivedValuesCount, values.size.toLong, key._5)
})).persist()
val schema = "{\\"type\\":\\"record\\",\\"name\\":\\"event\\",\\"fields\\":[{\\"name\\":\\"framework\\",\\"type\\":\\"string\\"},{\\"name\\":\\"second\\",\\"type\\":\\"long\\"},{\\"name\\":\\"message_size\\",\\"type\\":\\"long\\"},{\\"name\\":\\"eventname\\",\\"type\\":\\"string\\"},{\\"name\\":\\"latency\\",\\"type\\":\\"long\\"},{\\"name\\":\\"received_count\\",\\"type\\":\\"long\\"},{\\"name\\":\\"sent_count\\",\\"type\\":\\"long\\"}]}"
latencyStream.foreachRDD(rdd => {
rdd.foreachPartition(events => {
val producer = new KafkaProducer[Any, AnyRef](producerConfig)
val eventSchema = new Schema.Parser().parse(schema)
try {
for (event <- events) {
val latencyRecord = new GenericData.Record(eventSchema)
latencyRecord.put("framework", event._1)
latencyRecord.put("second", event._2)
latencyRecord.put("message_size", event._3)
latencyRecord.put("eventname", event._4)
latencyRecord.put("latency", event._5)
latencyRecord.put("received_count", event._6)
latencyRecord.put("sent_count", event._7)
val record = new ProducerRecord[Any, AnyRef]("%s-latencies".format(event._8), latencyRecord)
producer.send(record).get()
}
} finally {
producer.close()
}
})
})
latencyStream.foreachRDD(rdd => {
rdd.saveToCassandra("spark_analysis", "events", SomeColumns("framework", "second", "message_size", "eventname", "latency", "received_count", "sent_count"))
})
}
}
case class AppConfig(topic: String = "", brokerList: String = "", zookeeper: String = "", partitions: Int = 1, schemaRegistryUrl: String = "")
| stealthly/spark-streaming | latencies-calc/src/main/scala/ly/stealth/latencies/Main.scala | Scala | apache-2.0 | 7,565 |
package org.faker
import scala.util.Random
/**
* Generates internet related fake data.
*
* {{{
* scala> Faker.Internet.domainName
* res1: String = westkihn.info
*
* scala> Faker.Internet.userName
* res2: String = cleora.kreiger
*
* scala> Faker.Internet.email
* res3: String = anissa.feil@vandervort.org
*
* scala> Faker.Internet.freeEmail
* res4: String = terrance_ruecker@yahoo.com
*
* scala> Faker.Internet.safeEmail
* res5: String = cathrine_gottlieb@example.com
*
* scala> Faker.Internet.domainWord
* res7: String = coleabbott
*
* scala> Faker.Internet.domainSuffix
* res8: String = com
*
* scala> Faker.Internet.password
* res9: String = consequaturautemoccaecati
*
* scala> Faker.Internet.macAddress()
* res13: String = 44:d5:ee:d0:f7:fb
*
* scala> Faker.Internet.ipV4Address
* res14: String = 142.246.100.144
*
* scala> Faker.Internet.ipV6Address
* res15: String = 77e2:40cb:3d2a:5697:d6cf:43ae:5cae:e343
* }}}
*/
object Internet extends Base {
val SEPARATORS = Array(".", "_")
/**
* generates a domain name build of a domain word (a company name without spaces or special chars)
* and a domain suffix (ex.: com, biz, org, ...)
*/
def domainName(implicit locale: FakerLocale = FakerLocale.default): String = s"${domainWord.fixUmlauts}.$domainSuffix"
/**
* generates a user name build of a persons first and last name delimited by a separator (dot or underscore)
*/
def userName(implicit locale: FakerLocale = FakerLocale.default): String = {
// TODO: Make more sophisicated
val nameParts = Array(Name.firstName, Name.lastName).map { _.replaceAll("""\\W""", "") }
nameParts.mkString(SEPARATORS.randomElement).fixUmlauts.toLowerCase()
}
/**
* generates a persons company email address
*/
def email(implicit locale: FakerLocale = FakerLocale.default): String = s"${userName()}@${domainName()}"
/**
* generates a persons email address at a free email provider (yahoo, gmail and the like)
*/
def freeEmail(implicit locale: FakerLocale = FakerLocale.default): String = {
val d = parse("internet.free_email")
s"$userName@$d"
}
/**
* generates a persons email address at one of the sites example.[com,org,net]
*/
def safeEmail(implicit locale: FakerLocale = FakerLocale.default): String = {
val domain = Array("com", "org", "net").randomElement
s"$userName@example.$domain"
}
/**
* generates the domain word from a generated companies name
*/
def domainWord(implicit locale: FakerLocale = FakerLocale.default): String = Company.name.split(" ").head.removeNonWordChars.toLowerCase
/**
* generates a domain suffix (com, org, net, ...) without leading period
*/
def domainSuffix(implicit locale: FakerLocale = FakerLocale.default): String = parse("internet.domain_suffix")
/**
* generates a password
*/
def password(implicit locale: FakerLocale = FakerLocale.default): String = Lorem.words().mkString
/**
* generates a mac address like `44:d5:ee:d0:f7:fb`
*/
def macAddress(prefix: String = ""): String = {
val prefixDigits = prefix.split(":").filterNot(_.isEmpty).map(s => Integer.parseInt(s, 16))
val addressDigits = (1 to (6 - prefixDigits.size)).map(_ => Random.nextInt(256))
(prefixDigits ++ addressDigits).map(i => f"$i%02x").mkString(":")
}
/**
* generates an ip address like `142.246.100.144`
*/
def ipV4Address: String = {
val ary = 2 to 254
Array(ary.randomElement, ary.randomElement, ary.randomElement, ary.randomElement).mkString(".")
}
/**
* generates an IP V6 address like `77e2:40cb:3d2a:5697:d6cf:43ae:5cae:e343`
*/
def ipV6Address: String = {
val ary = 0 to 65535
(1 to 8).map(_ => f"${ary.randomElement}%x").mkString(":")
}
}
| ralli/faker_scala | src/main/scala/org/faker/Internet.scala | Scala | bsd-3-clause | 3,774 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import cascading.property.ConfigDef.Getter
import cascading.pipe._
import cascading.flow._
import cascading.operation._
import cascading.operation.filter._
import cascading.tuple._
import scala.util.Random
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.immutable.Queue
object RichPipe extends java.io.Serializable {
private val nextPipe = new AtomicInteger(-1)
def apply(p: Pipe): RichPipe = new RichPipe(p)
implicit def toPipe(rp: RichPipe): Pipe = rp.pipe
def getNextName: String = "_pipe_" + nextPipe.incrementAndGet.toString
def assignName(p: Pipe) = new Pipe(getNextName, p)
private val REDUCER_KEY = "mapred.reduce.tasks"
/**
* Gets the underlying config for this pipe and sets the number of reducers
* useful for cascading GroupBy/CoGroup pipes.
*/
def setReducers(p: Pipe, reducers: Int): Pipe = {
if (reducers > 0) {
p.getStepConfigDef()
.setProperty(REDUCER_KEY, reducers.toString)
p.getStepConfigDef()
.setProperty(Config.WithReducersSetExplicitly, "true")
} else if (reducers != -1) {
throw new IllegalArgumentException(s"Number of reducers must be non-negative. Got: ${reducers}")
}
p
}
// A pipe can have more than one description when merged together, so we store them delimited with 255.toChar.
// Cannot use 1.toChar as we get an error if it is not a printable character.
private def encodePipeDescriptions(descriptions: Seq[String]): String = {
descriptions.map(_.replace(255.toChar, ' ')).filter(_.nonEmpty).mkString(255.toChar.toString)
}
private def decodePipeDescriptions(encoding: String): Seq[String] = {
encoding.split(255.toChar).toSeq
}
def getPipeDescriptions(p: Pipe): Seq[String] = {
if (p.getStepConfigDef.isEmpty)
Nil
else {
// We use empty getter so we can get latest config value of Config.PipeDescriptions in the step ConfigDef.
val encodedResult = p.getStepConfigDef.apply(Config.PipeDescriptions, new Getter {
override def update(s: String, s1: String): String = ???
override def get(s: String): String = null
})
Option(encodedResult)
.filterNot(_.isEmpty)
.map(decodePipeDescriptions)
.getOrElse(Nil)
}
}
def setPipeDescriptions(p: Pipe, descriptions: Seq[String]): Pipe = {
p.getStepConfigDef().setProperty(
Config.PipeDescriptions,
encodePipeDescriptions(getPipeDescriptions(p) ++ descriptions))
p
}
def setPipeDescriptionFrom(p: Pipe, ste: Option[StackTraceElement]): Pipe = {
ste.foreach { ste =>
setPipeDescriptions(p, List(ste.toString))
}
p
}
}
/**
* This is an enrichment-pattern class for cascading.pipe.Pipe.
* The rule is to never use this class directly in input or return types, but
* only to add methods to Pipe.
*/
class RichPipe(val pipe: Pipe) extends java.io.Serializable with JoinAlgorithms {
// We need this for the implicits
import Dsl._
import RichPipe.assignName
/**
* Rename the current pipe
*/
def name(s: String): Pipe = new Pipe(s, pipe)
/**
* Beginning of block with access to expensive nonserializable state. The state object should
* contain a function release() for resource management purpose.
*/
def using[C <: { def release(): Unit }](bf: => C) = new {
/**
* For pure side effect.
*/
def foreach[A](f: Fields)(fn: (C, A) => Unit)(implicit conv: TupleConverter[A], set: TupleSetter[Unit], flowDef: FlowDef, mode: Mode) = {
conv.assertArityMatches(f)
val newPipe = new Each(pipe, f, new SideEffectMapFunction(bf, fn,
new Function1[C, Unit] with java.io.Serializable {
def apply(c: C): Unit = { c.release() }
},
Fields.NONE, conv, set))
NullSource.writeFrom(newPipe)(flowDef, mode)
newPipe
}
/**
* map with state
*/
def map[A, T](fs: (Fields, Fields))(fn: (C, A) => T)(implicit conv: TupleConverter[A], set: TupleSetter[T]) = {
conv.assertArityMatches(fs._1)
set.assertArityMatches(fs._2)
val mf = new SideEffectMapFunction(bf, fn,
new Function1[C, Unit] with java.io.Serializable {
def apply(c: C): Unit = { c.release() }
},
fs._2, conv, set)
new Each(pipe, fs._1, mf, defaultMode(fs._1, fs._2))
}
/**
* flatMap with state
*/
def flatMap[A, T](fs: (Fields, Fields))(fn: (C, A) => TraversableOnce[T])(implicit conv: TupleConverter[A], set: TupleSetter[T]) = {
conv.assertArityMatches(fs._1)
set.assertArityMatches(fs._2)
val mf = new SideEffectFlatMapFunction(bf, fn,
new Function1[C, Unit] with java.io.Serializable {
def apply(c: C): Unit = { c.release() }
},
fs._2, conv, set)
new Each(pipe, fs._1, mf, defaultMode(fs._1, fs._2))
}
}
/**
* Keep only the given fields, and discard the rest.
* takes any number of parameters as long as we can convert
* them to a fields object
*/
def project(fields: Fields): Pipe =
new Each(pipe, fields, new Identity(fields))
/**
* Discard the given fields, and keep the rest.
* Kind of the opposite of project method.
*/
def discard(f: Fields): Pipe =
new Each(pipe, f, new NoOp, Fields.SWAP)
/**
* Insert a function into the pipeline:
*/
def thenDo[T, U](pfn: (T) => U)(implicit in: (RichPipe) => T): U = pfn(in(this))
/**
* group the Pipe based on fields
*
* builder is typically a block that modifies the given GroupBuilder
* the final OUTPUT of the block is used to schedule the new pipe
* each method in GroupBuilder returns this, so it is recommended
* to chain them and use the default input:
*
* {{{
* _.size.max('f1) etc...
* }}}
*/
def groupBy(f: Fields)(builder: GroupBuilder => GroupBuilder): Pipe =
builder(new GroupBuilder(f)).schedule(pipe.getName, pipe)
/**
* Returns the set of distinct tuples containing the specified fields
*/
def distinct(f: Fields): Pipe =
groupBy(f) { _.size('__uniquecount__) }.project(f)
/**
* Returns the set of unique tuples containing the specified fields. Same as distinct
*/
def unique(f: Fields): Pipe = distinct(f)
/**
* Merge or Concatenate several pipes together with this one:
*/
def ++(that: Pipe): Pipe = {
if (this.pipe == that) {
// Cascading fails on self merge:
// solution by Jack Guo
new Merge(assignName(this.pipe), assignName(new Each(that, new Identity)))
} else {
new Merge(assignName(this.pipe), assignName(that))
}
}
/**
* Group all tuples down to one reducer.
* (due to cascading limitation).
* This is probably only useful just before setting a tail such as Database
* tail, so that only one reducer talks to the DB. Kind of a hack.
*/
def groupAll: Pipe = groupAll { _.pass }
/**
* == Warning ==
* This kills parallelism. All the work is sent to one reducer.
*
* Only use this in the case that you truly need all the data on one
* reducer.
*
* Just about the only reasonable case of this method is to reduce all values of a column
* or count all the rows.
*/
def groupAll(gs: GroupBuilder => GroupBuilder) =
map(() -> '__groupAll__) { (u: Unit) => 1 }
.groupBy('__groupAll__) { gs(_).reducers(1) }
.discard('__groupAll__)
/**
* Force a random shuffle of all the data to exactly n reducers
*/
def shard(n: Int): Pipe = groupRandomly(n) { _.pass }
/**
* Force a random shuffle of all the data to exactly n reducers,
* with a given seed if you need repeatability.
*/
def shard(n: Int, seed: Int): Pipe = groupRandomly(n, seed) { _.pass }
/**
* Like groupAll, but randomly groups data into n reducers.
*
* you can provide a seed for the random number generator
* to get reproducible results
*/
def groupRandomly(n: Int)(gs: GroupBuilder => GroupBuilder): Pipe =
groupRandomlyAux(n, None)(gs)
/**
* like groupRandomly(n : Int) with a given seed in the randomization
*/
def groupRandomly(n: Int, seed: Long)(gs: GroupBuilder => GroupBuilder): Pipe =
groupRandomlyAux(n, Some(seed))(gs)
// achieves the behavior that reducer i gets i_th shard
// by relying on cascading to use java's hashCode, which hash ints
// to themselves
protected def groupRandomlyAux(n: Int, optSeed: Option[Long])(gs: GroupBuilder => GroupBuilder): Pipe = {
using(statefulRandom(optSeed))
.map(() -> '__shard__) { (r: Random, _: Unit) => r.nextInt(n) }
.groupBy('__shard__) { gs(_).reducers(n) }
.discard('__shard__)
}
private def statefulRandom(optSeed: Option[Long]): Random with Stateful = {
val random = new Random with Stateful
optSeed.foreach { x => random.setSeed(x) }
random
}
/**
* Put all rows in random order
*
* you can provide a seed for the random number generator
* to get reproducible results
*/
def shuffle(shards: Int): Pipe = groupAndShuffleRandomly(shards) { _.pass }
def shuffle(shards: Int, seed: Long): Pipe = groupAndShuffleRandomly(shards, seed) { _.pass }
/**
* Like shard, except do some operation im the reducers
*/
def groupAndShuffleRandomly(reducers: Int)(gs: GroupBuilder => GroupBuilder): Pipe =
groupAndShuffleRandomlyAux(reducers, None)(gs)
/**
* Like groupAndShuffleRandomly(reducers : Int) but with a fixed seed.
*/
def groupAndShuffleRandomly(reducers: Int, seed: Long)(gs: GroupBuilder => GroupBuilder): Pipe =
groupAndShuffleRandomlyAux(reducers, Some(seed))(gs)
private def groupAndShuffleRandomlyAux(reducers: Int, optSeed: Option[Long])(gs: GroupBuilder => GroupBuilder): Pipe = {
using(statefulRandom(optSeed))
.map(() -> ('__shuffle__)) { (r: Random, _: Unit) => r.nextDouble() }
.groupRandomlyAux(reducers, optSeed){ g: GroupBuilder =>
gs(g.sortBy('__shuffle__))
}
.discard('__shuffle__)
}
/**
* Adds a field with a constant value.
*
* == Usage ==
* {{{
* insert('a, 1)
* }}}
*/
def insert[A](fs: Fields, value: A)(implicit setter: TupleSetter[A]): Pipe =
map[Unit, A](() -> fs) { _: Unit => value }(implicitly[TupleConverter[Unit]], setter)
/**
* Rename some set of N fields as another set of N fields
*
* == Usage ==
* {{{
* rename('x -> 'z)
* rename(('x,'y) -> ('X,'Y))
* }}}
*
* == Warning ==
* `rename('x,'y)` is interpreted by scala as `rename(Tuple2('x,'y))`
* which then does `rename('x -> 'y)`. This is probably not what is intended
* but the compiler doesn't resolve the ambiguity. YOU MUST CALL THIS WITH
* A TUPLE2! If you don't, expect the unexpected.
*/
def rename(fields: (Fields, Fields)): Pipe = {
val (fromFields, toFields) = fields
val in_arity = fromFields.size
val out_arity = toFields.size
assert(in_arity == out_arity, "Number of field names must match for rename")
new Each(pipe, fromFields, new Identity(toFields), Fields.SWAP)
}
/**
* Keep only items that satisfy this predicate.
*/
def filter[A](f: Fields)(fn: (A) => Boolean)(implicit conv: TupleConverter[A]): Pipe = {
conv.assertArityMatches(f)
new Each(pipe, f, new FilterFunction(fn, conv))
}
/**
* Keep only items that don't satisfy this predicate.
* `filterNot` is equal to negating a `filter` operation.
*
* {{{ filterNot('name) { name: String => name contains "a" } }}}
*
* is the same as:
*
* {{{ filter('name) { name: String => !(name contains "a") } }}}
*/
def filterNot[A](f: Fields)(fn: (A) => Boolean)(implicit conv: TupleConverter[A]): Pipe =
filter[A](f)(!fn(_))
/**
* Text files can have corrupted data. If you use this function and a
* cascading trap you can filter out corrupted data from your pipe.
*/
def verifyTypes[A](f: Fields)(implicit conv: TupleConverter[A]): Pipe = {
pipe.filter(f) { (a: A) => true }
}
/**
* Given a function, partitions the pipe into several groups based on the
* output of the function. Then applies a GroupBuilder function on each of the
* groups.
*
* Example:
* pipe
* .mapTo(()->('age, 'weight) { ... }
* .partition('age -> 'isAdult) { _ > 18 } { _.average('weight) }
* pipe now contains the average weights of adults and minors.
*/
def partition[A, R](fs: (Fields, Fields))(fn: (A) => R)(
builder: GroupBuilder => GroupBuilder)(
implicit conv: TupleConverter[A],
ord: Ordering[R],
rset: TupleSetter[R]): Pipe = {
val (fromFields, toFields) = fs
conv.assertArityMatches(fromFields)
rset.assertArityMatches(toFields)
val tmpFields = new Fields("__temp__")
tmpFields.setComparator("__temp__", ord)
map(fromFields -> tmpFields)(fn)(conv, TupleSetter.singleSetter[R])
.groupBy(tmpFields)(builder)
.map[R, R](tmpFields -> toFields){ (r: R) => r }(TupleConverter.singleConverter[R], rset)
.discard(tmpFields)
}
/**
* If you use a map function that does not accept TupleEntry args,
* which is the common case, an implicit conversion in GeneratedConversions
* will convert your function into a `(TupleEntry => T)`. The result type
* T is converted to a cascading Tuple by an implicit `TupleSetter[T]`.
* acceptable T types are primitive types, cascading Tuples of those types,
* or `scala.Tuple(1-22)` of those types.
*
* After the map, the input arguments will be set to the output of the map,
* so following with filter or map is fine without a new using statement if
* you mean to operate on the output.
*
* {{{
* map('data -> 'stuff)
* }}}
*
* * if output equals input, REPLACE is used.
* * if output or input is a subset of the other SWAP is used.
* * otherwise we append the new fields (cascading Fields.ALL is used)
*
* {{{
* mapTo('data -> 'stuff)
* }}}
*
* Only the results (stuff) are kept (cascading Fields.RESULTS)
*
* == Note ==
* Using mapTo is the same as using map followed by a project for
* selecting just the output fields
*/
def map[A, T](fs: (Fields, Fields))(fn: A => T)(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
each(fs)(new MapFunction[A, T](fn, _, conv, setter))
}
def mapTo[A, T](fs: (Fields, Fields))(fn: A => T)(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
eachTo(fs)(new MapFunction[A, T](fn, _, conv, setter))
}
def flatMap[A, T](fs: (Fields, Fields))(fn: A => TraversableOnce[T])(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
each(fs)(new FlatMapFunction[A, T](fn, _, conv, setter))
}
def flatMapTo[A, T](fs: (Fields, Fields))(fn: A => TraversableOnce[T])(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
eachTo(fs)(new FlatMapFunction[A, T](fn, _, conv, setter))
}
/**
* Filters all data that is defined for this partial function and then applies that function
*/
def collect[A, T](fs: (Fields, Fields))(fn: PartialFunction[A, T])(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
pipe.each(fs)(new CollectFunction[A, T](fn, _, conv, setter))
}
def collectTo[A, T](fs: (Fields, Fields))(fn: PartialFunction[A, T])(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
pipe.eachTo(fs)(new CollectFunction[A, T](fn, _, conv, setter))
}
/**
* the same as
*
* {{{
* flatMap(fs) { it : TraversableOnce[T] => it }
* }}}
*
* Common enough to be useful.
*/
def flatten[T](fs: (Fields, Fields))(implicit conv: TupleConverter[TraversableOnce[T]], setter: TupleSetter[T]): Pipe =
flatMap[TraversableOnce[T], T](fs)({ it: TraversableOnce[T] => it })(conv, setter)
/**
* the same as
*
* {{{
* flatMapTo(fs) { it : TraversableOnce[T] => it }
* }}}
*
* Common enough to be useful.
*/
def flattenTo[T](fs: (Fields, Fields))(implicit conv: TupleConverter[TraversableOnce[T]], setter: TupleSetter[T]): Pipe =
flatMapTo[TraversableOnce[T], T](fs)({ it: TraversableOnce[T] => it })(conv, setter)
/**
* Force a materialization to disk in the flow.
* This is useful before crossWithTiny if you filter just before. Ideally scalding/cascading would
* see this (and may in future versions), but for now it is here to aid in hand-tuning jobs
*/
lazy val forceToDisk: Pipe = new Checkpoint(pipe)
/**
* Convenience method for integrating with existing cascading Functions
*/
def each(fs: (Fields, Fields))(fn: Fields => Function[_]) = {
new Each(pipe, fs._1, fn(fs._2), defaultMode(fs._1, fs._2))
}
/**
* Same as above, but only keep the results field.
*/
def eachTo(fs: (Fields, Fields))(fn: Fields => Function[_]) = {
new Each(pipe, fs._1, fn(fs._2), Fields.RESULTS)
}
/**
* This is an analog of the SQL/Excel unpivot function which converts columns of data
* into rows of data. Only the columns given as input fields are expanded in this way.
* For this operation to be reversible, you need to keep some unique key on each row.
* See GroupBuilder.pivot to reverse this operation assuming you leave behind a grouping key
* == Example ==
* {{{
* pipe.unpivot(('w,'x,'y,'z) -> ('feature, 'value))
* }}}
*
* takes rows like:
* {{{
* key, w, x, y, z
* 1, 2, 3, 4, 5
* 2, 8, 7, 6, 5
* }}}
* to:
* {{{
* key, feature, value
* 1, w, 2
* 1, x, 3
* 1, y, 4
* }}}
* etc...
*/
def unpivot(fieldDef: (Fields, Fields)): Pipe = {
assert(fieldDef._2.size == 2, "Must specify exactly two Field names for the results")
// toKeyValueList comes from TupleConversions
pipe.flatMap(fieldDef) { te: TupleEntry => TupleConverter.KeyValueList(te) }
.discard(fieldDef._1)
}
/**
* Keep at most n elements. This is implemented by keeping
* approximately n/k elements on each of the k mappers or reducers (whichever we wind
* up being scheduled on).
*/
def limit(n: Long): Pipe = new Each(pipe, new Limit(n))
/**
* Sample a fraction of elements. fraction should be between 0.00 (0%) and 1.00 (100%)
* you can provide a seed to get reproducible results
*
*/
def sample(fraction: Double): Pipe = new Each(pipe, new Sample(fraction))
def sample(fraction: Double, seed: Long): Pipe = new Each(pipe, new Sample(seed, fraction))
/**
* Sample fraction of elements with return. fraction should be between 0.00 (0%) and 1.00 (100%)
* you can provide a seed to get reproducible results
*
*/
def sampleWithReplacement(fraction: Double): Pipe = new Each(pipe, new SampleWithReplacement(fraction), Fields.ALL)
def sampleWithReplacement(fraction: Double, seed: Int): Pipe = new Each(pipe, new SampleWithReplacement(fraction, seed), Fields.ALL)
/**
* Print all the tuples that pass to stderr
*/
def debug: Pipe = debug(PipeDebug())
/**
* Print the tuples that pass with the options configured in debugger
* For instance:
* {{{ debug(PipeDebug().toStdOut.printTuplesEvery(100)) }}}
*/
def debug(dbg: PipeDebug): Pipe = dbg(pipe)
/**
* Write all the tuples to the given source and return this Pipe
*/
def write(outsource: Source)(implicit flowDef: FlowDef, mode: Mode) = {
/* This code is to hack around a known Cascading bug that they have decided not to fix. In a graph:
A -> FlatMap -> write(tsv) -> FlatMap
in the second flatmap cascading will read from the written tsv for running it. However TSV's use toString and so is not a bijection.
here we stick in an identity function before the tsv write to keep to force cascading to do any fork/split beforehand.
*/
val writePipe: Pipe = outsource match {
case t: Tsv => new Each(pipe, Fields.ALL, IdentityFunction, Fields.REPLACE)
case _ => pipe
}
outsource.writeFrom(writePipe)(flowDef, mode)
pipe
}
/**
* Adds a trap to the current pipe,
* which will capture all exceptions that occur in this pipe
* and save them to the trapsource given
*
* Traps do not include the original fields in a tuple,
* only the fields seen in an operation.
* Traps also do not include any exception information.
*
* There can only be at most one trap for each pipe.
*/
def addTrap(trapsource: Source)(implicit flowDef: FlowDef, mode: Mode) = {
flowDef.addTrap(pipe, trapsource.createTap(Write)(mode))
pipe
}
/**
* Divides sum of values for this variable by their sum; assumes without checking that division is supported
* on this type and that sum is not zero
*
* If those assumptions do not hold, will throw an exception -- consider checking sum sepsarately and/or using addTrap
*
* in some cases, crossWithTiny has been broken, the implementation supports a work-around
*/
def normalize(f: Fields, useTiny: Boolean = true): Pipe = {
val total = groupAll { _.sum[Double](f -> '__total_for_normalize__) }
(if (useTiny) {
crossWithTiny(total)
} else {
crossWithSmaller(total)
})
.map(Fields.merge(f, '__total_for_normalize__) -> f) { args: (Double, Double) =>
args._1 / args._2
}
}
/**
* Maps the input fields into an output field of type T. For example:
*
* {{{
* pipe.pack[(Int, Int)] (('field1, 'field2) -> 'field3)
* }}}
*
* will pack fields 'field1 and 'field2 to field 'field3, as long as 'field1 and 'field2
* can be cast into integers. The output field 'field3 will be of tupel `(Int, Int)`
*
*/
def pack[T](fs: (Fields, Fields))(implicit packer: TuplePacker[T], setter: TupleSetter[T]): Pipe = {
val (fromFields, toFields) = fs
assert(toFields.size == 1, "Can only output 1 field in pack")
val conv = packer.newConverter(fromFields)
pipe.map(fs) { input: T => input } (conv, setter)
}
/**
* Same as pack but only the to fields are preserved.
*/
def packTo[T](fs: (Fields, Fields))(implicit packer: TuplePacker[T], setter: TupleSetter[T]): Pipe = {
val (fromFields, toFields) = fs
assert(toFields.size == 1, "Can only output 1 field in pack")
val conv = packer.newConverter(fromFields)
pipe.mapTo(fs) { input: T => input } (conv, setter)
}
/**
* The opposite of pack. Unpacks the input field of type `T` into
* the output fields. For example:
*
* {{{
* pipe.unpack[(Int, Int)] ('field1 -> ('field2, 'field3))
* }}}
*
* will unpack 'field1 into 'field2 and 'field3
*/
def unpack[T](fs: (Fields, Fields))(implicit unpacker: TupleUnpacker[T], conv: TupleConverter[T]): Pipe = {
val (fromFields, toFields) = fs
assert(fromFields.size == 1, "Can only take 1 input field in unpack")
val fields = (fromFields, unpacker.getResultFields(toFields))
val setter = unpacker.newSetter(toFields)
pipe.map(fields) { input: T => input } (conv, setter)
}
/**
* Same as unpack but only the to fields are preserved.
*/
def unpackTo[T](fs: (Fields, Fields))(implicit unpacker: TupleUnpacker[T], conv: TupleConverter[T]): Pipe = {
val (fromFields, toFields) = fs
assert(fromFields.size == 1, "Can only take 1 input field in unpack")
val fields = (fromFields, unpacker.getResultFields(toFields))
val setter = unpacker.newSetter(toFields)
pipe.mapTo(fields) { input: T => input } (conv, setter)
}
/**
* Set of pipes reachable from this pipe (transitive closure of 'Pipe.getPrevious')
*/
def upstreamPipes: Set[Pipe] =
Iterator
.iterate(Seq(pipe))(pipes => for (p <- pipes; prev <- p.getPrevious) yield prev)
.takeWhile(_.length > 0)
.flatten
.toSet
/**
* This finds all the boxed serializations stored in the flow state map for this
* flowdef. We then find all the pipes back in the DAG from this pipe and apply
* those serializations.
*/
private[scalding] def applyFlowConfigProperties(flowDef: FlowDef): Pipe = {
case class ToVisit[T](queue: Queue[T], inQueue: Set[T]) {
def maybeAdd(t: T): ToVisit[T] = if (inQueue(t)) this else {
ToVisit(queue :+ t, inQueue + t)
}
def next: Option[(T, ToVisit[T])] =
if (inQueue.isEmpty) None
else Some((queue.head, ToVisit(queue.tail, inQueue - queue.head)))
}
@annotation.tailrec
def go(p: Pipe, visited: Set[Pipe], toVisit: ToVisit[Pipe]): Set[Pipe] = {
val notSeen: Set[Pipe] = p.getPrevious.filter(i => !visited.contains(i)).toSet
val nextVisited: Set[Pipe] = visited + p
val nextToVisit = notSeen.foldLeft(toVisit) { case (prev, n) => prev.maybeAdd(n) }
nextToVisit.next match {
case Some((h, innerNextToVisit)) => go(h, nextVisited, innerNextToVisit)
case _ => nextVisited
}
}
val allPipes = go(pipe, Set[Pipe](), ToVisit[Pipe](Queue.empty, Set.empty))
FlowStateMap.get(flowDef).foreach { fstm =>
fstm.flowConfigUpdates.foreach {
case (k, v) =>
allPipes.foreach { p =>
p.getStepConfigDef().setProperty(k, v)
}
}
}
pipe
}
}
/**
* A simple trait for releasable resource. Provides noop implementation.
*/
trait Stateful {
def release(): Unit = ()
}
| cchepelov/scalding | scalding-core/src/main/scala/com/twitter/scalding/RichPipe.scala | Scala | apache-2.0 | 26,247 |
package net.vanfleteren.objectvalidation
case class Error(message: String, path:String = "") | cvanfleteren/objectvalidation | src/main/scala/net/vanfleteren/objectvalidation/Error.scala | Scala | apache-2.0 | 93 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package generated.scala
class LongBooleanSupervisedTrainingSet(val _data: LongDenseMatrix, val _labels: BooleanDenseVector)
| tesendic/Relite | src/generated/scala/LongBooleanSupervisedTrainingSet.scala | Scala | agpl-3.0 | 1,114 |
implicit val request = play.api.test.FakeRequest("GET", "/", play.api.test.FakeHeaders(List((play.api.http.HeaderNames.HOST,List("tickets.studiocredo.be")))), None)
import controllers.auth.Mailer
def notifyProfileCreated(users: List[RichUser] = us.listInactive) = {
val length = users.length
users.zipWithIndex.foreach{ case (user,index) =>
Mailer.sendProfileCreatedEmail(user)
println(s"Processed ${user.name} (${index+1} out of $length)")
}
}
| studiocredo/ticket-reservation | scripts/mailer.scala | Scala | apache-2.0 | 453 |
package mesosphere.marathon
package api.forwarder
import akka.NotUsed
import akka.stream.Attributes
import akka.stream.scaladsl.Source
import akka.stream.stage.{GraphStage, GraphStageLogic, OutHandler}
import akka.stream.{Outlet, SourceShape}
import akka.util.ByteString
import com.typesafe.scalalogging.StrictLogging
import java.util.concurrent.atomic.AtomicBoolean
import javax.servlet.{ReadListener, ServletInputStream}
/**
* Graph stage which implements an non-blocking IO ServletInputStream reader, following the protocol outlined here:
*
* http://www.oracle.com/webfolder/technetwork/tutorials/obe/java/HTML5andServlet31/HTML5andServlet%203.1.html#section4
*
* The following runtime restrictions apply:
*
* - This Source cannot be materialized twice
* - No other readers for this inputStream may exist (IE no other component may register a readListener)
* - The associated context must be put in to async mode, first.
*
* Source will fail if httpServletRequest.startAsync() is not called beforehand, or if a readListener is
* already registered for the provided ServletInputStream.
*
* The inputStream IS NOT closed when the stage completes (exception or not).
*
* @param inputStream ServletInputStream for a HttpServletRequest which has been put in async mode.
* @param maxChunkSize The maximum number of bytes to read at a time
*/
class ServletInputStreamSource(inputStream: ServletInputStream, maxChunkSize: Int = 16384)
extends GraphStage[SourceShape[ByteString]]
with StrictLogging {
private val started = new AtomicBoolean(false)
private val outlet: Outlet[ByteString] = Outlet("ServletInputStreamSource")
override val shape: SourceShape[ByteString] = SourceShape(outlet)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) {
/**
* Downstream has asked for data, and we have not yet pushed yet
*/
private var pullPending = false
/**
* The inputStream has advertised that more data is available, and we have not read it yet.
*/
private var readPending = false
private val readBuffer = Array.ofDim[Byte](maxChunkSize)
val readPossible = createAsyncCallback[Unit] { _ =>
readPending = true
maybeReadAndPush()
}
val allDone = createAsyncCallback[Unit] { _ =>
completeStage()
}
val readerFailed = createAsyncCallback[Throwable] { ex =>
doFail(ex)
}
private def doFail(ex: Throwable): Unit = {
failStage(ex)
}
override def preStart(): Unit =
if (started.compareAndSet(false, true)) {
try {
inputStream.setReadListener(new ReadListener {
override def onDataAvailable(): Unit = {
readPossible.invoke(())
}
override def onAllDataRead(): Unit = {
allDone.invoke(())
}
override def onError(t: Throwable): Unit = {
logger.error("Error in inputStream", t)
readerFailed.invoke(t)
}
})
} catch {
case ex: Throwable =>
doFail(ex)
}
} else {
doFail(new IllegalStateException("This source can only be materialized once."))
}
setHandler(
outlet,
new OutHandler {
override def onPull(): Unit = {
pullPending = true
maybeReadAndPush()
}
override def onDownstreamFinish(): Unit = {
completeStage()
}
}
)
private def maybeReadAndPush(): Unit = {
if (readPending && pullPending) {
val len = inputStream.read(readBuffer)
if (len == -1)
completeStage()
else {
push(outlet, ByteString.fromArray(readBuffer, 0, len))
pullPending = false
readPending = inputStream.isReady()
}
}
}
}
}
object ServletInputStreamSource {
/**
* See the constructor documentation for [[ServletInputStreamSource]]
*/
def apply(inputStream: ServletInputStream): Source[ByteString, NotUsed] =
Source.fromGraph(new ServletInputStreamSource(inputStream))
}
| mesosphere/marathon | src/main/scala/mesosphere/marathon/api/forwarder/ServletInputStreamSource.scala | Scala | apache-2.0 | 4,309 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.i18n.scalai18n {
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import play.api._
import play.api.http.HttpConfiguration
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.mvc._
import play.api.test._
package views.html {
object formpage {
def apply()(implicit messages: play.api.i18n.Messages): String = {
""
}
}
}
//#i18n-messagescontroller
import javax.inject.Inject
import play.api.i18n._
class MyMessagesController @Inject()(mcc: MessagesControllerComponents)
extends MessagesAbstractController(mcc) {
def index = Action { implicit request: MessagesRequest[AnyContent] =>
val messages: Messages = request.messages
val message: String = messages("info.error")
Ok(message)
}
def messages2 = Action { implicit request: MessagesRequest[AnyContent] =>
val lang: Lang = request.messages.lang
val message: String = messagesApi("info.error")(lang)
Ok(message)
}
def messages4 = Action { implicit request: MessagesRequest[AnyContent] =>
// MessagesRequest is an implicit MessagesProvider
Ok(views.html.formpage())
}
}
//#i18n-messagescontroller
//#i18n-support
import javax.inject.Inject
import play.api.i18n._
class MySupportController @Inject()(val controllerComponents: ControllerComponents)
extends BaseController
with I18nSupport {
def index = Action { implicit request =>
// type enrichment through I18nSupport
val messages: Messages = request.messages
val message: String = messages("info.error")
Ok(message)
}
def messages2 = Action { implicit request =>
// type enrichment through I18nSupport
val lang: Lang = request.lang
val message: String = messagesApi("info.error")(lang)
Ok(message)
}
def messages3 = Action { request =>
// direct access with no implicits required
val messages: Messages = messagesApi.preferred(request)
val lang = messages.lang
val message: String = messages("info.error")
Ok(message)
}
def messages4 = Action { implicit request =>
// takes implicit Messages, converted using request2messages
// template defined with @()(implicit messages: Messages)
Ok(views.html.formpage())
}
}
//#i18n-support
@RunWith(classOf[JUnitRunner])
class ScalaI18nSpec extends AbstractController(Helpers.stubControllerComponents()) with PlaySpecification {
val conf = Configuration.reference ++ Configuration.from(Map("play.i18n.path" -> "scalaguide/i18n"))
"An i18nsupport controller" should {
"return the right message" in new WithApplication(GuiceApplicationBuilder().loadConfig(conf).build()) {
val controller = app.injector.instanceOf[MySupportController]
val result = controller.index(FakeRequest())
contentAsString(result) must contain("You aren't logged in!")
}
}
"An messages controller" should {
"return the right message" in new WithApplication(GuiceApplicationBuilder().loadConfig(conf).build()) {
val controller = app.injector.instanceOf[MyMessagesController]
val result = controller.index(FakeRequest())
contentAsString(result) must contain("You aren't logged in!")
}
}
"A Scala translation" should {
val env = Environment.simple()
val langs = new DefaultLangsProvider(conf).get
val httpConfiguration = HttpConfiguration.fromConfiguration(conf, env)
val messagesApi = new DefaultMessagesApiProvider(env, conf, langs, httpConfiguration).get
implicit val lang = Lang("en")
"escape single quotes" in {
//#apostrophe-messages
messagesApi("info.error") == "You aren't logged in!"
//#apostrophe-messages
}
"escape parameter substitution" in {
//#parameter-escaping
messagesApi("example.formatting") == "When using MessageFormat, '{0}' is replaced with the first parameter."
//#parameter-escaping
}
}
}
}
| Shenker93/playframework | documentation/manual/working/scalaGuide/main/i18n/code/ScalaI18N.scala | Scala | apache-2.0 | 4,164 |
/*
* Copyright (c) 2018 OVO Energy
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package com.ovoenergy.comms.model
package sms
import com.ovoenergy.comms.model.types.ProgressedEvent
import com.ovoenergy.comms.model.{Gateway, InternalMetadata, Metadata}
import com.sksamuel.avro4s.AvroDoc
case class SMSProgressed(
metadata: Metadata,
internalMetadata: InternalMetadata,
@AvroDoc("Status of given SMS (Queued, Delivered)") status: SMSStatus,
@AvroDoc("The Gateway from which the given event originated") gateway: Gateway,
@AvroDoc("The ID given to the event by the origin gateway") gatewayMessageId: String,
@AvroDoc("Providing context to the progression, i.e. a reason for a delivery failure") reason: Option[
String] = None
) extends ProgressedEvent
| ovotech/comms-kafka-messages | modules/core/src/main/scala/com/ovoenergy/comms/model/sms/SMSProgressed.scala | Scala | mit | 1,815 |
package com.plasmaconduit.waterhouse
import java.util
object HashList {
case class Container(top: HashDigest, list: Seq[HashDigest])
case class Block(content: Array[Byte], offset: Int)
def verifyTopHash(top: HashDigest, list: Seq[HashDigest]): Boolean = {
util.Arrays.equals(
top.bytes,
Hash.digest(top.algorithm, list.map(_.bytes).reduce({(m, n) => m ++ n})).bytes
)
}
def verifyBlock(block: Block, list: Seq[HashDigest]): Boolean = {
list
.lift(block.offset)
.filter({ hash =>
util.Arrays.equals(hash.bytes, Hash.digest(hash.algorithm, block.content).bytes)
})
.nonEmpty
}
def generate(algorithm: HashAlgorithm, content: Array[Byte], blockSize: Int): Container = {
val list = generateList(algorithm, content, blockSize)
Container(generateTopHash(algorithm, list), list)
}
def generateList(algorithm: HashAlgorithm, content: Array[Byte], blockSize: Int): Seq[HashDigest] = {
content.grouped(blockSize).map({ chunk => Hash.digest(algorithm, chunk) }).toList
}
def generateTopHash(algorithm: HashAlgorithm, list: Seq[HashDigest]): HashDigest = {
Hash.digest(algorithm, list.map(_.bytes).reduce({(m, n) => m ++ n}))
}
} | plasmaconduit/waterhouse | src/main/scala/com/plasmaconduit/waterhouse/HashList.scala | Scala | mit | 1,223 |
package com.readclosely.model
/*
Copyright 2009-2010 Karl Pichotta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import _root_.net.liftweb.mapper._
import _root_.net.liftweb.util._
import com.readclosely.util._
class Annotation extends LongKeyedMapper[Annotation] with IdPK{
def getSingleton = Annotation // what's the "meta" server
object commentText extends MappedTextarea(this, Annotation.MAX_LEN) {
override def textareaRows = 10
override def textareaCols = 50
}
object score extends MappedInt(this)
object authorID extends MappedLongForeignKey(this, User)
object passageID extends MappedLongForeignKey(this, Passage)
object sentenceID extends MappedInt(this)
//had to change since MappedDateTime sets time = 00:00:000
//object submissionDatetime extends MappedDateTime(this)
object submissionDateTimeMillis extends MappedLong(this)
//may be null.
object lastEditDateTimeMillis extends MappedLong(this)
//@todo: add dependency
object inReplyTo extends MappedLong(this) {
override def dbIndexed_? = true
}
object parentAnn extends MappedLong(this)
/**
*Int dictating which element in a "conversation" list this annotation appears.
*Everything that is not a reply to something else will have replyOrder 0;
*the first reply to a comment will have replyOrder 1, ....
*/
object replyOrder extends MappedInt(this) {
override def defaultValue = 0
}
object numReplies extends MappedInt(this) {
override def defaultValue = 0
}
}
/**
* The singleton that has methods for accessing the database
*/
object Annotation extends Annotation with LongKeyedMetaMapper[Annotation] {
//override def dbTableName = "annotation" // define the DB table name
val MAX_LEN = 2000
//index for fast lookup on (psgID, sentID):
override def dbIndexes = Index(IndexField(sentenceID), IndexField(passageID)) :: super.dbIndexes
}
| kpich/readclosely | src/main/scala/com/readclosely/model/Annotation.scala | Scala | apache-2.0 | 2,463 |
package com.typesafe.slick.testkit.tests
import com.typesafe.slick.testkit.util.{AsyncTest, JdbcTestDB}
import slick.jdbc.TransactionIsolation
class TransactionTest extends AsyncTest[JdbcTestDB] {
import tdb.profile.api._
def testTransactions = {
class T(tag: Tag) extends Table[Int](tag, "t") {
def a = column[Int]("a", O.PrimaryKey)
def * = a
}
val ts = TableQuery[T]
val getTI = SimpleDBIO(_.connection.getTransactionIsolation)
class ExpectedException extends RuntimeException
ts.schema.create andThen { // failed transaction
(for {
_ <- ts += 1
_ <- ts.result.map(_ shouldBe Seq(1))
_ <- GetTransactionality.map(_ shouldBe (1, false))
_ = throw new ExpectedException
} yield ()).transactionally.failed.map(_ should (_.isInstanceOf[ExpectedException]))
} andThen {
ts.result.map(_ shouldBe Nil) andThen
GetTransactionality.map(_ shouldBe (0, true))
} andThen { // successful transaction
(for {
_ <- ts += 2
_ <- ts.result.map(_ shouldBe Seq(2))
_ <- GetTransactionality.map(_ shouldBe (1, false))
} yield ()).transactionally
} andThen {
ts.result.map(_ shouldBe Seq(2))
} andThen { // nested successful transaction
(for {
_ <- ts += 3
_ <- ts.to[Set].result.map(_ shouldBe Set(2, 3))
_ <- GetTransactionality.map(_ shouldBe (2, false))
} yield ()).transactionally.transactionally
} andThen {
ts.to[Set].result.map(_ shouldBe Set(2, 3))
} andThen { // failed nested transaction
(for {
_ <- ts += 4
_ <- ts.to[Set].result.map(_ shouldBe Set(2, 3, 4))
_ <- GetTransactionality.map(_ shouldBe (2, false))
_ = throw new ExpectedException
} yield ()).transactionally.transactionally.failed.map(_ should (_.isInstanceOf[ExpectedException]))
} andThen { // fused successful transaction
(ts += 5).andThen(ts += 6).transactionally
} andThen {
ts.to[Set].result.map(_ shouldBe Set(2, 3, 5, 6)) andThen
GetTransactionality.map(_ shouldBe (0, true))
} andThen { // fused failed transaction
(ts += 7).andThen(ts += 6).transactionally.failed
} andThen {
ts.to[Set].result.map(_ shouldBe Set(2, 3, 5, 6)) andThen
GetTransactionality.map(_ shouldBe (0, true))
} andThen { ifCap(tcap.transactionIsolation) {
(for {
ti1 <- getTI
_ <- (for {
_ <- getTI.map(_ should(_ >= TransactionIsolation.ReadUncommitted.intValue))
_ <- getTI.withTransactionIsolation(TransactionIsolation.Serializable).map(_ should(_ >= TransactionIsolation.Serializable.intValue))
_ <- getTI.map(_ should(_ >= TransactionIsolation.ReadUncommitted.intValue))
} yield ()).withTransactionIsolation(TransactionIsolation.ReadUncommitted)
_ <- getTI.map(_ shouldBe ti1)
} yield ()).withPinnedSession
}}
}
}
| jkutner/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/TransactionTest.scala | Scala | bsd-2-clause | 2,955 |
package io.skysail.core.model
object EntityDescription {
def apply(model: EntityModel) = {
val fields = model.fields
new EntityDescription(model.fields.map(m => FieldDescription(m)).toList)
}
}
case class EntityDescription(fields: List[FieldDescription]) {} | evandor/skysail-core | skysail.core/src/io/skysail/core/model/EntityDescription.scala | Scala | apache-2.0 | 271 |
package s2s
import org.apache.poi.ss.usermodel
import usermodel.{Row, Sheet, WorkbookFactory}
import java.nio.file.FileSystems
import java.io.FileInputStream
import Cells._
object Noodle {
val path = FileSystems.getDefault().getPath("src/test/resources/simple-sum.xlsx")
val in = new FileInputStream(path.toFile)
val sheet = WorkbookFactory.create(in).getSheetAt(0)
val cellA3: Cell = sheet.getRow(2).getCell(0).asCell
in.close
}
object Main extends App {
println(Noodle.cellA3)
Noodle.cellA3 match {
case FormulaCell(f, v) =>
println(s"Parsing $f ...")
val tree = new FormulaParser(f).FORMULA.run()
println(tree)
case _ => println("That wasn't a formula")
}
} | d6y/s2s | src/main/scala/s2s/noodle.scala | Scala | apache-2.0 | 710 |
package es.alvsanand.spark_recommender.recommender
import com.mongodb.casbah.Imports._
import es.alvsanand.spark_recommender.model._
import es.alvsanand.spark_recommender.parser.DatasetIngestion
import es.alvsanand.spark_recommender.trainer.ALSTrainer
import es.alvsanand.spark_recommender.utils.{ESConfig, HashUtils, MongoConfig}
import org.elasticsearch.action.search.SearchResponse
import org.elasticsearch.client.Client
import org.elasticsearch.index.query.{MoreLikeThisQueryBuilder, QueryBuilders}
import org.elasticsearch.search.SearchHits
/**
* Created by alvsanand on 11/05/16.
*/
object RecommenderService {
private val MAX_RECOMMENDATIONS = 10
private val CF_RATING_FACTOR = 0.8
private def parseUserRecs(o: DBObject, maxItems: Int): List[Recommendation] = {
o.getAs[MongoDBList]("recs").getOrElse(MongoDBList()).map { case (o: DBObject) => parseRec(o) }.toList.sortBy(x => x.rating).reverse.take(maxItems)
}
private def parseProductRecs(o: DBObject, maxItems: Int): List[Recommendation] = {
o.getAs[MongoDBList]("recs").getOrElse(MongoDBList()).map { case (o: DBObject) => parseRec(o) }.toList.sortBy(x => x.rating).reverse.take(maxItems)
}
private def parseESResponse(response: SearchResponse): List[Recommendation] = {
response.getHits match {
case null => List[Recommendation]()
case hits: SearchHits if hits.getTotalHits == 0 => List[Recommendation]()
case hits: SearchHits if hits.getTotalHits > 0 => hits.getHits.map { hit => new Recommendation(hit.getId.toInt, hit.getScore) }.toList
}
}
private def parseRec(o: DBObject): Recommendation = {
new Recommendation(o.getAs[Int]("pid").getOrElse(0), o.getAs[Double]("r").getOrElse(0))
}
private def findProductCFRecs(productId: Int, maxItems: Int)(implicit mongoClient: MongoClient, mongoConf: MongoConfig): List[Recommendation] = {
val listRecs = mongoClient(mongoConf.db)(ALSTrainer.PRODUCT_RECS_COLLECTION_NAME).findOne(MongoDBObject("id" -> productId)).getOrElse(MongoDBObject())
return parseProductRecs(listRecs, MAX_RECOMMENDATIONS)
}
private def findUserCFRecs(userId: Int, maxItems: Int)(implicit mongoClient: MongoClient, mongoConf: MongoConfig): List[Recommendation] = {
val listRecs = mongoClient(mongoConf.db)(ALSTrainer.USER_RECS_COLLECTION_NAME).findOne(MongoDBObject("id" -> userId)).getOrElse(MongoDBObject())
return parseUserRecs(listRecs, MAX_RECOMMENDATIONS)
}
private def findContentBasedMoreLikeThisRecommendations(productId: Int, maxItems: Int)(implicit esClient: Client, esConf: ESConfig): List[Recommendation] = {
val indexName = esConf.index
val query = QueryBuilders.moreLikeThisQuery(Array("id"),
Array("features"),
Array(new MoreLikeThisQueryBuilder.Item(indexName, DatasetIngestion.PRODUCTS_INDEX_NAME, productId.toString)))
return parseESResponse(esClient.prepareSearch().setQuery(query).setSize(MAX_RECOMMENDATIONS).execute().actionGet())
}
private def findContentBasedSearchRecommendations(text: String, maxItems: Int)(implicit esClient: Client, esConf: ESConfig): List[Recommendation] = {
val indexName = esConf.index
val query = QueryBuilders.multiMatchQuery(text, "name", "features")
return parseESResponse(esClient.prepareSearch().setIndices(indexName).setTypes(DatasetIngestion.PRODUCTS_INDEX_NAME).setQuery(query).setSize(maxItems).execute().actionGet())
}
private def findHybridRecommendations(productId: Int, maxItems: Int, cfRatingFactor: Double)(implicit mongoClient: MongoClient, mongoConf: MongoConfig, esClient: Client, esConf: ESConfig): List[HybridRecommendation] = {
val cbRatingFactor = 1 - cfRatingFactor
val cfRecs = findProductCFRecs(productId, ALSTrainer.MAX_RECOMMENDATIONS)
.map(x => new HybridRecommendation(x.productId, x.rating, x.rating * cfRatingFactor))
val cbRecs = findContentBasedMoreLikeThisRecommendations(productId, ALSTrainer.MAX_RECOMMENDATIONS)
.map(x => new HybridRecommendation(x.productId, x.rating, x.rating * cbRatingFactor))
val finalRecs = cfRecs ::: cbRecs
return finalRecs.sortBy(x => -x.hybridRating).take(maxItems)
}
def getCollaborativeFilteringRecommendations(request: ProductRecommendationRequest)(implicit mongoClient: MongoClient, mongoConf: MongoConfig): List[Recommendation] = {
return findProductCFRecs(request.productId, MAX_RECOMMENDATIONS)
}
def getCollaborativeFilteringRecommendations(request: UserRecommendationRequest)(implicit mongoClient: MongoClient, mongoConf: MongoConfig): List[Recommendation] = {
return findUserCFRecs(request.userId, MAX_RECOMMENDATIONS)
}
def getContentBasedMoreLikeThisRecommendations(request: ProductRecommendationRequest)(implicit esClient: Client, esConf: ESConfig): List[Recommendation] = {
return findContentBasedMoreLikeThisRecommendations(request.productId, MAX_RECOMMENDATIONS)
}
def getContentBasedSearchRecommendations(request: SearchRecommendationRequest)(implicit esClient: Client, esConf: ESConfig): List[Recommendation] = {
return findContentBasedSearchRecommendations(request.text, MAX_RECOMMENDATIONS)
}
def getHybridRecommendations(request: ProductHybridRecommendationRequest)(implicit mongoClient: MongoClient, mongoConf: MongoConfig, esClient: Client, esConf: ESConfig): List[HybridRecommendation] = {
return findHybridRecommendations(request.productId, MAX_RECOMMENDATIONS, CF_RATING_FACTOR)
}
}
| alvsanand/spark_recommender | src/main/scala/es/alvsanand/spark_recommender/recommender/RecommenderService.scala | Scala | apache-2.0 | 5,432 |
package features
import com.github.nscala_time.time.DurationBuilder
import com.github.nscala_time.time.Imports._
import org.joda.time.DateTime
import org.loudkicks.console._
import org.loudkicks.service.{PostDistributor, InMemoryWalls, InMemoryTimeLines, TestTime}
trait WithApp {
val time = TestTime()
val thePresent = new DateTime
def in(when: DateTime) = {
time.now = when
this
}
def thePast(elapsed: DurationBuilder): DateTime = thePresent - elapsed
private val app = new ConsoleParser with AllParsers {
lazy val timeSource = WithApp.this.time
lazy val timeLines = InMemoryTimeLines(timeSource)
lazy val walls = InMemoryWalls(timeLines)
lazy val posts = PostDistributor(Seq(timeLines, walls), timeSource)
}
val parse = app.parse _
}
| timothygordon32/loudkicks | src/test/scala/features/WithApp.scala | Scala | apache-2.0 | 788 |
package scala.util.control {
object NonLocalReturns {
class ReturnThrowable[T] extends ControlThrowable {
private var myResult: T = compiletime.uninitialized
def throwReturn(result: T): Nothing = {
myResult = result
throw this
}
def result: T = myResult
}
def throwReturn[T](result: T)(implicit returner: ReturnThrowable[T]): Nothing =
returner.throwReturn(result)
def returning[T](op: ReturnThrowable[T] ?=> T): T = {
val returner = new ReturnThrowable[T]
try op(using returner)
catch {
case ex: ReturnThrowable[_] =>
if (ex `eq` returner) ex.result.asInstanceOf[T] else throw ex
}
}
}
}
object Test extends App {
import scala.util.control.NonLocalReturns.*
import scala.collection.mutable.ListBuffer
def has(xs: List[Int], elem: Int) =
returning {
for (x <- xs)
if (x == elem) throwReturn(true)
false
}
def takeUntil(xs: List[Int], elem: Int) =
returning {
var buf = new ListBuffer[Int]
for (x <- xs)
yield {
if (x == elem) throwReturn(buf.toList)
buf += x
x
}
}
assert(has(List(1, 2, 3), 2))
assert(takeUntil(List(1, 2, 3), 3) == List(1, 2))
} | dotty-staging/dotty | tests/run/returning.scala | Scala | apache-2.0 | 1,222 |
package domain.accesspass
import com.softwaremill.tagging._
import domain.SchedulerAggregate
import akka.util.ByteString
import akka.actor._
import org.bouncycastle.openpgp._
import dit4c.protobuf.tokens.ClusterAccessPass
import scala.util._
import java.time.Instant
import domain.BaseResponse
import org.apache.commons.lang3.CharUtils
import java.util.Base64
import dit4c.common.KeyHelpers.PGPFingerprint
import akka.event.LoggingReceive
object PassVerificationWorker {
trait Result extends BaseResponse
trait GoodResult extends Result
trait BadResult extends Result
case class ValidPass(
cap: ClusterAccessPass,
expiry: Option[Instant],
signedBy: PGPFingerprint) extends GoodResult
case class ExpiredPass(
cap: ClusterAccessPass) extends BadResult
case class UnverifiablePass(
reason: String) extends BadResult
}
class PassVerificationWorker(
scheduler: ActorRef @@ SchedulerAggregate,
signedData: ByteString)
extends Actor with ActorLogging {
import PassVerificationWorker._
import dit4c.common.KeyHelpers._
override def preStart = {
scheduler ! SchedulerAggregate.GetKeys
}
val receive: Receive = LoggingReceive {
case SchedulerAggregate.CurrentKeys(keyBlock) =>
context.parent ! resultFromKeyBlock(keyBlock)
context.stop(self)
case SchedulerAggregate.NoKeysAvailable =>
context.parent ! UnverifiablePass("No keys are available to do validation")
context.stop(self)
}
def resultFromKeyBlock(primaryKeyBlock: String): Result =
keysForVerification(primaryKeyBlock) match {
case Left(msg) =>
UnverifiablePass(msg)
case Right(Nil) =>
UnverifiablePass("No known key IDs found in signature")
case Right(keys) =>
resultFromKeys(keys)
}
def resultFromKeys(keys: List[PGPPublicKey]): Result =
verify(keys) match {
case Right((cap, expiries)) if expiries.isEmpty =>
ExpiredPass(cap)
case Right((cap, expiries)) =>
// Pick the signature with the longest expiry
val (signedBy, expires) =
expiries.reduce[(PGPPublicKey, Option[Instant])] {
case (p1 @ (k1, None), _) => p1
case (_, p2 @ (k2, None)) => p2
case (p1 @ (k1, Some(e1)), p2 @ (k2, Some(e2))) =>
if (e1.isBefore(e2)) p2
else p1
}
ValidPass(cap, expires, signedBy.fingerprint)
case Left(msg) =>
UnverifiablePass(s"Verification failed: $msg")
}
def keysForVerification(primaryKeyBlock: String): Either[String, List[PGPPublicKey]] =
parseArmoredPublicKeyRing(primaryKeyBlock)
.right.flatMap { kr =>
extractSignatureKeyIds(signedData)
.right
.map((_, kr))
}
.right.map { case (keyIds: List[Long], keys: PGPPublicKeyRing) =>
// Only test with signing keys found in signature block
keys.signingKeys.filter(k => keyIds.contains(k.getKeyID))
}
def verify(keys: List[PGPPublicKey]): Either[String, (ClusterAccessPass, Map[PGPPublicKey, Option[Instant]])] =
verifyData(signedData, keys)
.right.flatMap { case (data: ByteString, expiries) =>
// Extract token data
Try(ClusterAccessPass.parseFrom(data.toArray))
.transform[Either[String, ClusterAccessPass]](
{ cap => Success(Right(cap)) },
{ e => Success(Left(e.getMessage)) }
)
.get
.right.map((_, expiries))
}
/**
* Get a safe form of the signed data for logging.
*/
protected lazy val dataForLog: String =
Try(signedData.decodeString("UTF-8"))
.filter(_.forall(CharUtils.isAsciiPrintable))
.getOrElse(Base64.getEncoder.encodeToString(signedData.toArray))
} | dit4c/dit4c | dit4c-portal/app/domain/accesspass/PassVerificationWorker.scala | Scala | mit | 3,766 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.avocado.util
import org.scalatest.FunSuite
import scala.math.log
class LogPhredSuite extends FunSuite {
test("convert log error probabilities to phred scores") {
val phred10 = LogPhred.logErrorToPhred(log(0.1))
assert(phred10 > 9.999 && phred10 < 10.001)
val phred50 = LogPhred.logErrorToPhred(log(0.00001))
assert(phred50 > 49.999 && phred50 < 50.001)
}
}
| heuermh/bdg-avocado | avocado-core/src/test/scala/org/bdgenomics/avocado/util/LogPhredSuite.scala | Scala | apache-2.0 | 1,197 |
// Copyright 2017 EPFL DATA Lab (data.epfl.ch)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package squid
package feature
import utils._
class TypeImplicits extends MyFunSuite {
import TestDSL.Predef._
test("Using Existential Type Representations") {
val typs = List[CodeType[_]](codeTypeOf[Int], codeTypeOf[Double], codeTypeOf[String])
assert((typs map {
case typ: CodeType[t] =>
code"Option.empty[($typ,t)]"
}, List(
code"Option.empty[(Int,Int)]",
code"Option.empty[(Double,Double)]",
code"Option.empty[(String,String)]"
)).zipped forall (_ =~= _))
typs match {
case (t0:CodeType[t1]) :: (t1:CodeType[t0]) :: _ :: Nil => // makes sure resolution is no more based on names
code"Map.empty[$t0,$t1]" eqt code"Map.empty[Int,Double]"
code"Map.empty[ t0, t1]" eqt code"Map.empty[Double,Int]"
case _ => fail
}
// Note how here we get an existential type `_$1` inserted, but the QQ now keeps track of inserted types and finds the corresponding tree
code"Map.empty[${typs.head},${typs.tail.head}]" eqt code"Map.empty[Int,Double]"
}
test("ClassTag") {
import scala.reflect.{classTag, ClassTag}
assert(codeTypeOf[Int].classTag == classTag[Int])
assert(codeTypeOf[List[Int]].classTag == classTag[List[String]])
def foo[T:CodeType](e: ClosedCode[T]) = {
val a = code"Array.fill(10)($e)(ClassTag(${Const(codeTypeOf[T].runtimeClass)}))"
val b = code"Array.fill(10)($e)(${codeTypeOf[T].classTagCode})"
a eqt b
a
}
val e = foo(c"42")
assert(e.run.getClass == classOf[Array[Int]])
assert(e.compile.getClass == classOf[Array[Int]])
}
import scala.language.implicitConversions
val strCodeType = codeTypeOf[String]
implicit def toCode(str: String) = strCodeType
test("Inserting Types by Implicit Conversion") {
same(codeTypeOf("test"), strCodeType)
eqt(code"Option.empty[${"test"}]", code"Option.empty[String]")
assertDoesNotCompile(""" code"Option.empty[${42}]" """)
// ^ Quasiquote Error: Cannot unquote object of type 'Int(42)' as a type: type mismatch;
// found : Int(42)
// required: squid.TestDSL.CodeType[?]
// code"Option.empty[${42}]"
}
}
| epfldata/squid | src/test/scala/squid/feature/TypeImplicits.scala | Scala | apache-2.0 | 2,852 |
/*
* Copyright (c) 2012 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless.examples
/**
* Searching arbitrarily nested case classes, tuples, and lists.
*
* @author Travis Brown
*/
object DeepSearchExamples extends App {
import shapeless._
// Evidence that an A is something that we can look around in for Qs that
// satisfy some predicate.
trait Searchable[A, Q] {
def find(p: Q => Boolean)(a: A): Option[Q]
}
implicit def elemSearchable[A] = new Searchable[A, A] {
def find(p: A => Boolean)(a: A) = if (p(a)) Some(a) else None
}
implicit def listSearchable[A, Q](implicit s: Searchable[A, Q]) =
new Searchable[List[A], Q] {
def find(p: Q => Boolean)(a: List[A]) = a.flatMap(s.find(p)).headOption
}
implicit def hnilSearchable[Q] = new Searchable[HNil, Q] {
def find(p: Q => Boolean)(a: HNil) = None
}
implicit def hlistSearchable[H, T <: HList, Q](
implicit hs: Searchable[H, Q] = null, ts: Searchable[T, Q]
) = new Searchable[H :: T, Q] {
def find(p: Q => Boolean)(a: H :: T) =
Option(hs).flatMap(_.find(p)(a.head)) orElse ts.find(p)(a.tail)
}
implicit def hlistishSearchable[A, L <: HList, Q](
implicit gen: GenericAux[A, L], s: Searchable[L, Q]
) = new Searchable[A, Q] {
def find(p: Q => Boolean)(a: A) = s.find(p)(gen to a)
}
case class SearchableWrapper[A](a: A) {
def deepFind[Q](p: Q => Boolean)(implicit s: Searchable[A, Q]) =
s.find(p)(a)
}
implicit def wrapSearchable[A](a: A) = SearchableWrapper(a)
// An example predicate:
val p = (_: String) endsWith "o"
// On strings:
assert("hello".deepFind(p) == Some("hello"))
assert("hell".deepFind(p) == None)
// On lists:
assert(List("yes", "maybe", "no").deepFind(p) == Some("no"))
// On arbitrarily sized and nested tuples:
assert(("yes", "maybe", ("no", "why")).deepFind(p) == Some("no"))
assert(("a", ("b", "c"), "d").deepFind(p) == None)
// On tuples with non-string elements:
assert((1, "two", ('three, '4')).deepFind(p) == Some("two"))
// Search the same tuple for a specific character instead:
assert((1, "two", ('three, '4')).deepFind((_: Char) == 52) == Some('4'))
// Our case class:
case class Foo(a: String, b: String, c: List[String])
// And it works:
assert(Foo("four", "three", List("two", "one")).deepFind(p) == Some("two"))
assert(Foo("a", "b", "c" :: Nil).deepFind(p) == None)
}
| non/shapeless | examples/src/main/scala/shapeless/examples/deepsearch.scala | Scala | apache-2.0 | 2,952 |
package uk.gov.gds.ier.transaction.ordinary
trait WithOrdinaryControllers {
val ordinary: OrdinaryControllers
}
| michaeldfallen/ier-frontend | app/uk/gov/gds/ier/transaction/ordinary/WithOrdinaryControllers.scala | Scala | mit | 115 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.sql.execution.columnar.encoding
import java.math.{BigDecimal, BigInteger}
import org.apache.spark.sql.catalyst.util.{SerializedArray, SerializedMap, SerializedRow}
import org.apache.spark.sql.collection.Utils
import org.apache.spark.sql.execution.columnar.encoding.ColumnEncoding.littleEndian
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
trait Uncompressed extends ColumnEncoding {
final def typeId: Int = 0
final def supports(dataType: DataType): Boolean = true
}
final class UncompressedDecoder
extends UncompressedDecoderBase with NotNullDecoder
final class UncompressedDecoderNullable
extends UncompressedDecoderBase with NullableDecoder
final class UncompressedEncoder
extends NotNullEncoder with UncompressedEncoderBase
final class UncompressedEncoderNullable
extends NullableEncoder with UncompressedEncoderBase
abstract class UncompressedDecoderBase
extends ColumnDecoder with Uncompressed {
protected final var baseCursor = 0L
override protected def initializeCursor(columnBytes: AnyRef, cursor: Long,
field: StructField): Long = {
// adjust cursor for the first next call to avoid extra checks in next
Utils.getSQLDataType(field.dataType) match {
case BooleanType | ByteType => cursor - 1
case ShortType => cursor - 2
case IntegerType | FloatType | DateType => cursor - 4
case LongType | DoubleType | TimestampType => cursor - 8
case CalendarIntervalType => cursor - 12
case d: DecimalType if d.precision <= Decimal.MAX_LONG_DIGITS =>
cursor - 8
case StringType | BinaryType | _: DecimalType |
_: ArrayType | _: MapType | _: StructType =>
// these will check for zero value of cursor and adjust in first next
baseCursor = cursor
0L
case NullType => 0L // no role of cursor for NullType
case t => throw new UnsupportedOperationException(s"Unsupported type $t")
}
}
override def nextBoolean(columnBytes: AnyRef, cursor: Long): Long =
cursor + 1
override def readBoolean(columnBytes: AnyRef, cursor: Long): Boolean =
Platform.getByte(columnBytes, cursor) == 1
override def nextByte(columnBytes: AnyRef, cursor: Long): Long =
cursor + 1
override def readByte(columnBytes: AnyRef, cursor: Long): Byte =
Platform.getByte(columnBytes, cursor)
override def nextShort(columnBytes: AnyRef, cursor: Long): Long =
cursor + 2
override def readShort(columnBytes: AnyRef, cursor: Long): Short =
ColumnEncoding.readShort(columnBytes, cursor)
override def nextInt(columnBytes: AnyRef, cursor: Long): Long =
cursor + 4
override def readInt(columnBytes: AnyRef, cursor: Long): Int =
ColumnEncoding.readInt(columnBytes, cursor)
override def nextLong(columnBytes: AnyRef, cursor: Long): Long =
cursor + 8
override def readLong(columnBytes: AnyRef, cursor: Long): Long =
ColumnEncoding.readLong(columnBytes, cursor)
override def nextFloat(columnBytes: AnyRef, cursor: Long): Long =
cursor + 4
override def readFloat(columnBytes: AnyRef, cursor: Long): Float =
ColumnEncoding.readFloat(columnBytes, cursor)
override def nextDouble(columnBytes: AnyRef, cursor: Long): Long =
cursor + 8
override def readDouble(columnBytes: AnyRef, cursor: Long): Double =
ColumnEncoding.readDouble(columnBytes, cursor)
override def nextLongDecimal(columnBytes: AnyRef, cursor: Long): Long =
cursor + 8
override def readLongDecimal(columnBytes: AnyRef, precision: Int,
scale: Int, cursor: Long): Decimal =
Decimal.createUnsafe(ColumnEncoding.readLong(columnBytes, cursor),
precision, scale)
override def nextDecimal(columnBytes: AnyRef, cursor: Long): Long = {
// cursor == 0 indicates first call so don't increment cursor
if (cursor != 0) {
val size = ColumnEncoding.readInt(columnBytes, cursor)
cursor + 4 + size
} else {
baseCursor
}
}
override def readDecimal(columnBytes: AnyRef, precision: Int,
scale: Int, cursor: Long): Decimal = {
Decimal.apply(new BigDecimal(new BigInteger(readBinary(columnBytes,
cursor)), scale), precision, scale)
}
override def nextUTF8String(columnBytes: AnyRef, cursor: Long): Long = {
// cursor == 0 indicates first call so don't increment cursor
if (cursor != 0) {
val size = ColumnEncoding.readInt(columnBytes, cursor)
cursor + 4 + size
} else {
baseCursor
}
}
override def readUTF8String(columnBytes: AnyRef, cursor: Long): UTF8String =
ColumnEncoding.readUTF8String(columnBytes, cursor)
override def nextInterval(columnBytes: AnyRef, cursor: Long): Long =
cursor + 12
override def readInterval(columnBytes: AnyRef,
cursor: Long): CalendarInterval = {
val months = ColumnEncoding.readInt(columnBytes, cursor)
val micros = ColumnEncoding.readLong(columnBytes, cursor + 4)
new CalendarInterval(months, micros)
}
override def nextBinary(columnBytes: AnyRef, cursor: Long): Long = {
// cursor == 0 indicates first call so don't increment cursor
if (cursor != 0) {
val size = ColumnEncoding.readInt(columnBytes, cursor)
cursor + 4 + size
} else {
baseCursor
}
}
override def readBinary(columnBytes: AnyRef, cursor: Long): Array[Byte] = {
val size = ColumnEncoding.readInt(columnBytes, cursor)
val b = new Array[Byte](size)
Platform.copyMemory(columnBytes, cursor + 4, b,
Platform.BYTE_ARRAY_OFFSET, size)
b
}
override def nextArray(columnBytes: AnyRef, cursor: Long): Long = {
// cursor == 0 indicates first call so don't increment cursor
if (cursor != 0) {
val size = ColumnEncoding.readInt(columnBytes, cursor)
// size includes the 4 bytes for the size itself
cursor + size
} else {
baseCursor
}
}
override def readArray(columnBytes: AnyRef, cursor: Long): SerializedArray = {
// 4 bytes for size and then 4 bytes for number of elements
val result = new SerializedArray(8)
val size = ColumnEncoding.readInt(columnBytes, cursor)
result.pointTo(columnBytes, cursor, size)
result
}
override def nextMap(columnBytes: AnyRef, cursor: Long): Long = {
// cursor == 0 indicates first call so don't increment cursor
if (cursor != 0) {
var position = cursor
// first read is of keyArraySize and second of valueArraySize
position += ColumnEncoding.readInt(columnBytes, position)
position + ColumnEncoding.readInt(columnBytes, position)
} else {
baseCursor
}
}
override def readMap(columnBytes: AnyRef, cursor: Long): SerializedMap = {
val result = new SerializedMap
result.pointTo(columnBytes, cursor)
result
}
override def nextStruct(columnBytes: AnyRef, cursor: Long): Long =
nextArray(columnBytes, cursor)
override def readStruct(columnBytes: AnyRef, numFields: Int,
cursor: Long): SerializedRow = {
// creates a SerializedRow with skipBytes = 4 and does not change the
// cursor itself to get best 8-byte word alignment (the 4 bytes are
// subsumed in the null bit mask at the start)
val result = new SerializedRow(4, numFields)
val size = ColumnEncoding.readInt(columnBytes, cursor)
result.pointTo(columnBytes, cursor, size)
result
}
}
trait UncompressedEncoderBase extends ColumnEncoder with Uncompressed {
override def writeBoolean(cursor: Long, value: Boolean): Long = {
var position = cursor
val b: Byte = if (value) 1 else 0
if (position + 1 > columnEndPosition) {
position = expand(position, 1)
}
Platform.putByte(columnBytes, position, b)
updateLongStats(b)
position + 1
}
override def writeByte(cursor: Long, value: Byte): Long = {
var position = cursor
if (position + 1 > columnEndPosition) {
position = expand(position, 1)
}
Platform.putByte(columnBytes, position, value)
updateLongStats(value)
position + 1
}
override def writeShort(cursor: Long, value: Short): Long = {
var position = cursor
if (position + 2 > columnEndPosition) {
position = expand(position, 2)
}
ColumnEncoding.writeShort(columnBytes, position, value)
updateLongStats(value)
position + 2
}
override def writeInt(cursor: Long, value: Int): Long = {
var position = cursor
if (position + 4 > columnEndPosition) {
position = expand(position, 4)
}
ColumnEncoding.writeInt(columnBytes, position, value)
updateLongStats(value)
position + 4
}
override def writeLong(cursor: Long, value: Long): Long = {
var position = cursor
if (position + 8 > columnEndPosition) {
position = expand(position, 8)
}
ColumnEncoding.writeLong(columnBytes, position, value)
updateLongStats(value)
position + 8
}
override def writeFloat(cursor: Long, value: Float): Long = {
var position = cursor
if (position + 4 > columnEndPosition) {
position = expand(position, 4)
}
if (java.lang.Float.isNaN(value)) {
if (littleEndian) Platform.putFloat(columnBytes, position, Float.NaN)
else Platform.putInt(columnBytes, position,
java.lang.Integer.reverseBytes(java.lang.Float.floatToIntBits(Float.NaN)))
} else {
if (littleEndian) Platform.putFloat(columnBytes, position, value)
else Platform.putInt(columnBytes, position,
java.lang.Integer.reverseBytes(java.lang.Float.floatToIntBits(value)))
updateDoubleStats(value.toDouble)
}
position + 4
}
override def writeDouble(cursor: Long, value: Double): Long = {
var position = cursor
if (position + 8 > columnEndPosition) {
position = expand(position, 8)
}
if (java.lang.Double.isNaN(value)) {
if (littleEndian) Platform.putDouble(columnBytes, position, Double.NaN)
else Platform.putLong(columnBytes, position,
java.lang.Long.reverseBytes(java.lang.Double.doubleToLongBits(Double.NaN)))
} else {
if (littleEndian) Platform.putDouble(columnBytes, position, value)
else Platform.putLong(columnBytes, position,
java.lang.Long.reverseBytes(java.lang.Double.doubleToLongBits(value)))
updateDoubleStats(value)
}
position + 8
}
override def writeLongDecimal(cursor: Long, value: Decimal,
ordinal: Int, precision: Int, scale: Int): Long = {
if ((value.precision != precision || value.scale != scale) &&
!value.changePrecision(precision, scale)) {
writeIsNull(ordinal)
cursor
} else {
writeLong(cursor, value.toUnscaledLong)
}
}
override def writeDecimal(cursor: Long, value: Decimal,
ordinal: Int, precision: Int, scale: Int): Long = {
if ((value.precision != precision || value.scale != scale) &&
!value.changePrecision(precision, scale)) {
writeIsNull(ordinal)
cursor
} else {
val b = value.toJavaBigDecimal.unscaledValue.toByteArray
updateDecimalStats(value)
writeBinary(cursor, b)
}
}
override def writeInterval(cursor: Long, value: CalendarInterval): Long = {
val position = writeInt(cursor, value.months)
writeLong(position, value.microseconds)
}
override def writeUTF8String(cursor: Long, value: UTF8String): Long = {
var position = cursor
val size = value.numBytes
if (position + size + 4 > columnEndPosition) {
position = expand(position, size + 4)
}
updateStringStatsClone(value)
ColumnEncoding.writeUTF8String(columnBytes, position, value, size)
}
override def writeBinary(cursor: Long, value: Array[Byte]): Long = {
var position = cursor
val size = value.length
if (position + size + 4 > columnEndPosition) {
position = expand(position, size + 4)
}
val columnBytes = this.columnBytes
ColumnEncoding.writeInt(columnBytes, position, size)
position += 4
Platform.copyMemory(value, Platform.BYTE_ARRAY_OFFSET, columnBytes,
position, size)
position + size
}
override def writeBooleanUnchecked(cursor: Long, value: Boolean): Long = {
val b: Byte = if (value) 1 else 0
Platform.putByte(columnBytes, cursor, b)
cursor + 1
}
override def writeByteUnchecked(cursor: Long, value: Byte): Long = {
Platform.putByte(columnBytes, cursor, value)
cursor + 1
}
override def writeShortUnchecked(cursor: Long, value: Short): Long = {
ColumnEncoding.writeShort(columnBytes, cursor, value)
cursor + 2
}
override def writeIntUnchecked(cursor: Long, value: Int): Long = {
ColumnEncoding.writeInt(columnBytes, cursor, value)
cursor + 4
}
override def writeLongUnchecked(cursor: Long, value: Long): Long = {
ColumnEncoding.writeLong(columnBytes, cursor, value)
cursor + 8
}
override def writeFloatUnchecked(cursor: Long, value: Float): Long = {
if (java.lang.Float.isNaN(value)) {
if (littleEndian) Platform.putFloat(columnBytes, cursor, Float.NaN)
else Platform.putInt(columnBytes, cursor,
java.lang.Integer.reverseBytes(java.lang.Float.floatToIntBits(Float.NaN)))
} else {
if (littleEndian) Platform.putFloat(columnBytes, cursor, value)
else Platform.putInt(columnBytes, cursor,
java.lang.Integer.reverseBytes(java.lang.Float.floatToIntBits(value)))
}
cursor + 4
}
override def writeDoubleUnchecked(cursor: Long, value: Double): Long = {
if (java.lang.Double.isNaN(value)) {
if (littleEndian) Platform.putDouble(columnBytes, cursor, Double.NaN)
else Platform.putLong(columnBytes, cursor,
java.lang.Long.reverseBytes(java.lang.Double.doubleToLongBits(Double.NaN)))
} else {
if (littleEndian) Platform.putDouble(columnBytes, cursor, value)
else Platform.putLong(columnBytes, cursor,
java.lang.Long.reverseBytes(java.lang.Double.doubleToLongBits(value)))
}
cursor + 8
}
override def writeUnsafeData(cursor: Long, baseObject: AnyRef,
baseOffset: Long, numBytes: Int): Long = {
var position = cursor
if (position + numBytes > columnEndPosition) {
position = expand(position, numBytes)
}
// assume size is already written as per skipBytes in SerializedRowData
Platform.copyMemory(baseObject, baseOffset, columnBytes, position, numBytes)
position + numBytes
}
}
| vjr/snappydata | core/src/main/scala/org/apache/spark/sql/execution/columnar/encoding/Uncompressed.scala | Scala | apache-2.0 | 15,061 |
package io.github.finaglecircuit
/**
* Circuit states. Of note is HalfOpen:
* - The first call attempted is allowed through without failing fast
* - All other calls fail-fast with an exception just as in Open state
* - If the first call succeeds, the breaker is reset back to Closed state
* - If the first call fails, the breaker is tripped again into the Open state for another full resetTimeout
*/
object CircuitStatus extends Enumeration {
type CircuitStatus = Value
val Open, Closed, HalfOpen = Value
}
| daviddenton/finagle-circuit | src/main/scala/io/github/finaglecircuit/CircuitStatus.scala | Scala | apache-2.0 | 518 |
package com.rocketfuel.sdbc.scalaz
import com.rocketfuel.sdbc.cassandra.datastax._
import com.rocketfuel.sdbc.scalaz.datastax._
import scalaz.concurrent.Task
import scalaz.stream._
object DatastaxProcess {
object datastax {
/**
* Create a stream from one query, whose result is ignored.
* @param execute
* @param session
* @return a stream of one () value.
*/
def execute(execute: Execute)(implicit session: Session): Process[Task, Unit] = {
Process.eval(runExecute(execute))
}
/**
* Create a stream of values from a query's results.
* @param select
* @param session
* @tparam T
* @return a stream of the query results.
*/
def select[T](select: Select[T])(implicit session: Session): Process[Task, T] = {
runSelect(select)
}
object params {
/**
* Create a stream from parameter lists, which are independently
* added to a query and executed, ignoring the results.
*
* The session is not closed when the stream completes.
* @param execute The query to add parameters to.
* @param session
* @return A stream of ().
*/
def execute(
execute: Execute
)(implicit session: Session
): Sink[Task, ParameterList] = {
sink.lift[Task, Seq[(String, Option[ParameterValue])]] { params =>
runExecute(execute.on(params: _*))
}
}
/**
* Create a stream from parameter lists, which are independently
* added to a query and executed, to streams of query results.
*
* Use merge.mergeN to run the queries in parallel, or
* .flatMap(identity) to concatenate them.
*
* The session is not closed when the stream completes.
* @param select The query to add parameters to.
* @param session
* @tparam Value
* @return
*/
def select[Value](
select: Select[Value]
)(implicit session: Session
): Channel[Task, ParameterList, Process[Task, Value]] = {
channel.lift[Task, Seq[(String, Option[ParameterValue])], Process[Task, Value]] { params =>
Task.delay(runSelect[Value](select.on(params: _*)))
}
}
/**
* Create a stream from parameter lists, which are independently
* added to a query and executed, ignoring the results.
*
* A session is created for the given keyspace, and is closed when the stream completes.
* @param execute
* @param keyspace
* @param cluster
* @return
*/
def execute(
execute: Execute,
keyspace: Option[String] = None
)(cluster: Cluster
): Sink[Task, ParameterList] = {
Process.await(connect(cluster, keyspace)) {implicit session =>
params.execute(execute).onComplete(Process.eval_(closeSession(session)))
}
}
/**
* Create a stream from keyspace names and parameter lists, which are
* independently added to a query and executed, ignoring the results.
*
* A session is created for each keyspace in the source stream,
* and they are closed when the stream completes.
* @param execute
* @tparam Value
* @return
*/
def executeWithKeyspace[Value](
execute: Execute
): Cluster => Sink[Task, (String, ParameterList)] = {
forClusterWithKeyspaceAux[ParameterList, Unit] { params => implicit session =>
runExecute(execute.on(params: _*))
}
}
/**
* Create a stream from parameter lists, which are independently
* added to a query and executed, to streams of query results.
*
* Use merge.mergeN to run the queries in parallel, or
* .flatMap(identity) to concatenate them.
*
* A session is created for each keyspace in the source stream,
* and they are closed when the stream completes.
* @param select
* @param keyspace
* @param cluster
* @tparam Value
* @return
*/
def select[Value](
select: Select[Value],
keyspace: Option[String] = None
)(cluster: Cluster
): Channel[Task, ParameterList, Process[Task, Value]] = {
Process.await(connect(cluster, keyspace)) {implicit session =>
params.select[Value](select).onComplete(Process.eval_(closeSession(session)))
}
}
/**
* Create a stream from keyspace names and parameter lists, which
* are independently added to a query and executed, to
* streams of query results.
*
* Use merge.mergeN to run the queries in parallel, or
* .flatMap(identity) to concatenate them.
*
* A session is created for each keyspace in the source stream,
* and they are closed when the stream completes.
* @param select
* @tparam Value
* @return
*/
def selectWithKeyspace[Value](
select: Select[Value]
): Cluster => Channel[Task, (String, ParameterList), Process[Task, Value]] = {
forClusterWithKeyspaceAux[ParameterList, Process[Task, Value]] { params => implicit session =>
Task.delay(runSelect[Value](select.on(params: _*)))
}
}
}
object keys {
/**
* Use an instance of Executable to create a stream of queries, whose results are ignored.
*
* The session is not closed when the stream completes.
* @param session
* @param executable
* @tparam Key
* @return
*/
def execute[Key](
session: Session
)(implicit executable: Executable[Key]
): Sink[Task, Key] = {
sink.lift[Task, Key] { key =>
runExecute(executable.execute(key))(session)
}
}
/**
* Use an instance of Selectable to create a stream of query result streams.
*
* Use merge.mergeN on the result to run the queries in parallel, or .flatMap(identity)
* to concatenate them.
*
* The session is not closed when the stream completes.
* @param session
* @param selectable
* @tparam Key
* @tparam Value
* @return
*/
def select[Key, Value](
session: Session
)(implicit selectable: Selectable[Key, Value]
): Channel[Task, Key, Process[Task, Value]] = {
channel.lift[Task, Key, Process[Task, Value]] { key =>
Task.delay(runSelect[Value](selectable.select(key))(session))
}
}
/**
* Use an instance of Executable to create a stream of queries, whose results are ignored.
*
* A session is created for the given namespace, which is closed when the stream completes.
* @param cluster
* @param keyspace
* @param executable
* @tparam Key
* @return
*/
def execute[Key](
cluster: Cluster,
keyspace: Option[String] = None
)(implicit executable: Executable[Key]
): Sink[Task, Key] = {
Process.await(connect(cluster, keyspace)) { session =>
execute(session).onComplete(Process.eval_(closeSession(session)))
}
}
/**
* Use an instance of Executable to create a stream of queries, whose results are ignored.
*
* A session is created for each keyspace in the source stream,
* and they are closed when the stream completes.
* @param cluster
* @param executable
* @tparam Key
* @tparam Value
* @return
*/
def executeWithKeyspace[Key, Value](
cluster: Cluster
)(implicit executable: Executable[Key]
): Sink[Task, (String, Key)] = {
forClusterWithKeyspaceAux[Key, Unit] { key => implicit session =>
runExecute(executable.execute(key))
}(cluster)
}
/**
* Use an instance of Selectable to create a stream of query result streams.
*
* A session is created for the given namespace, which is closed when the stream completes.
*
* Use merge.mergeN on the result to run the queries in parallel, or .flatMap(identity)
* to concatenate them.
* @param cluster
* @param keyspace
* @param selectable
* @tparam Key
* @tparam Value
* @return
*/
def select[Key, Value](
cluster: Cluster,
keyspace: Option[String] = None
)(implicit selectable: Selectable[Key, Value]
): Channel[Task, Key, Process[Task, Value]] = {
Process.await(connect(cluster, keyspace)) { session =>
select(session).onComplete(Process.eval_(closeSession(session)))
}
}
/**
* Use an instance of Selectable to create a stream of query result streams.
*
* A session is created for each keyspace in the source stream,
* and they are closed when the stream completes.
*
* Use merge.mergeN on the result to run the queries in parallel, or .flatMap(identity)
* to concatenate them.
*
* @param cluster
* @param selectable
* @tparam Key
* @tparam Value
* @return
*/
def selectWithKeyspace[Key, Value](
cluster: Cluster
)(implicit selectable: Selectable[Key, Value]
): Channel[Task, (String, Key), Process[Task, Value]] = {
forClusterWithKeyspaceAux[Key, Process[Task, Value]] { key => implicit session =>
Task.delay(runSelect[Value](selectable.select(key)))
}(cluster)
}
}
}
}
| wdacom/sdbc | cassandra.scalaz/src/main/scala/com/rocketfuel/sdbc/scalaz/DatastaxProcess.scala | Scala | bsd-3-clause | 9,589 |
package sbt
object Signals
{
val CONT = "CONT"
val INT = "INT"
def withHandler[T](handler: () => Unit, signal: String = INT)(action: () => T): T =
{
val result =
try
{
val signals = new Signals0
signals.withHandler(signal, handler, action)
}
catch { case e: LinkageError => Right(action()) }
result match {
case Left(e) => throw e
case Right(v) => v
}
}
def supported(signal: String): Boolean =
try
{
val signals = new Signals0
signals.supported(signal)
}
catch { case e: LinkageError => false }
}
// Must only be referenced using a
// try { } catch { case e: LinkageError => ... }
// block to
private final class Signals0
{
def supported(signal: String): Boolean =
{
import sun.misc.Signal
try { new Signal(signal); true }
catch { case e: IllegalArgumentException => false }
}
// returns a LinkageError in `action` as Left(t) in order to avoid it being
// incorrectly swallowed as missing Signal/SignalHandler
def withHandler[T](signal: String, handler: () => Unit, action: () => T): Either[Throwable, T] =
{
import sun.misc.{Signal,SignalHandler}
val intSignal = new Signal(signal)
val newHandler = new SignalHandler {
def handle(sig: Signal) { handler() }
}
val oldHandler = Signal.handle(intSignal, newHandler)
try Right(action())
catch { case e: LinkageError => Left(e) }
finally Signal.handle(intSignal, oldHandler)
}
} | olove/xsbt | util/collection/src/main/scala/sbt/Signal.scala | Scala | bsd-3-clause | 1,423 |
package views.html
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one or more *
* contributor license agreements. See the NOTICE file distributed with *
* this work for additional information regarding copyright ownership. *
* The ASF licenses this file to You under the Apache License, Version 2.0 *
* (the "License"); you may not use this file except in compliance with *
* the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
*******************************************************************************/
object flash_message extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template0[play.api.templates.HtmlFormat.Appendable] {
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one or more *
* contributor license agreements. See the NOTICE file distributed with *
* this work for additional information regarding copyright ownership. *
* The ASF licenses this file to You under the Apache License, Version 2.0 *
* (the "License"); you may not use this file except in compliance with *
* the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
*******************************************************************************/
def apply():play.api.templates.HtmlFormat.Appendable = {
_display_ {import helper._
Seq[Any](format.raw/*19.1*/("""
"""),_display_(Seq[Any](/*20.2*/if(flash.containsKey("error"))/*20.32*/ {_display_(Seq[Any](format.raw/*20.34*/("""
<div class="alert alert-danger">
<strong>Oops!</strong> """),_display_(Seq[Any](/*23.41*/flash/*23.46*/.get("error"))),format.raw/*23.59*/("""
<a class="close" data-dismiss="alert">x</a>
</div>
""")))})),format.raw/*27.2*/("""
"""),_display_(Seq[Any](/*29.2*/if(flash.containsKey("success"))/*29.34*/ {_display_(Seq[Any](format.raw/*29.36*/("""
<div class="alert alert-success">
"""),_display_(Seq[Any](/*32.18*/flash/*32.23*/.get("success"))),format.raw/*32.38*/("""
<a class="close" data-dismiss="alert">x</a>
</div>
""")))})),format.raw/*36.2*/("""
"""))}
}
def render(): play.api.templates.HtmlFormat.Appendable = apply()
def f:(() => play.api.templates.HtmlFormat.Appendable) = () => apply()
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Thu Apr 07 14:52:41 PDT 2016
SOURCE: /home/dimitris/CMU/SA&D/Project/ApacheCMDA-Frontend/app/views/flash_message.scala.html
HASH: bc76ec21b6e0d1e7a199f2dd57bbafc261199186
MATRIX: 3274->1221|3311->1223|3350->1253|3390->1255|3517->1346|3531->1351|3566->1364|3683->1450|3721->1453|3762->1485|3802->1487|3907->1556|3921->1561|3958->1576|4075->1662
LINES: 60->19|61->20|61->20|61->20|64->23|64->23|64->23|68->27|70->29|70->29|70->29|73->32|73->32|73->32|77->36
-- GENERATED --
*/
| dsarlis/SAD-Spring-2016-Project-Team4 | ApacheCMDA-Frontend/target/scala-2.10/src_managed/main/views/html/flash_message.template.scala | Scala | mit | 4,969 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector.catalog
import java.util
import java.util.Collections
import scala.collection.JavaConverters._
import org.apache.spark.sql.catalyst.analysis.{NamedRelation, NoSuchDatabaseException, NoSuchNamespaceException, NoSuchTableException, UnresolvedV2Relation}
import org.apache.spark.sql.catalyst.plans.logical.AlterTable
import org.apache.spark.sql.connector.catalog.TableChange._
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.types.{ArrayType, MapType, StructField, StructType}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
private[sql] object CatalogV2Util {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
/**
* The list of reserved table properties, which can not be removed or changed directly by
* the syntax:
* {{
* ALTER TABLE ... SET TBLPROPERTIES ...
* }}
*
* They need specific syntax to modify
*/
val TABLE_RESERVED_PROPERTIES =
Seq(TableCatalog.PROP_COMMENT,
TableCatalog.PROP_LOCATION,
TableCatalog.PROP_PROVIDER,
TableCatalog.PROP_OWNER)
/**
* The list of reserved namespace properties, which can not be removed or changed directly by
* the syntax:
* {{
* ALTER NAMESPACE ... SET PROPERTIES ...
* }}
*
* They need specific syntax to modify
*/
val NAMESPACE_RESERVED_PROPERTIES =
Seq(SupportsNamespaces.PROP_COMMENT,
SupportsNamespaces.PROP_LOCATION,
SupportsNamespaces.PROP_OWNER)
/**
* Apply properties changes to a map and return the result.
*/
def applyNamespaceChanges(
properties: Map[String, String],
changes: Seq[NamespaceChange]): Map[String, String] = {
applyNamespaceChanges(properties.asJava, changes).asScala.toMap
}
/**
* Apply properties changes to a Java map and return the result.
*/
def applyNamespaceChanges(
properties: util.Map[String, String],
changes: Seq[NamespaceChange]): util.Map[String, String] = {
val newProperties = new util.HashMap[String, String](properties)
changes.foreach {
case set: NamespaceChange.SetProperty =>
newProperties.put(set.property, set.value)
case unset: NamespaceChange.RemoveProperty =>
newProperties.remove(unset.property)
case _ =>
// ignore non-property changes
}
Collections.unmodifiableMap(newProperties)
}
/**
* Apply properties changes to a map and return the result.
*/
def applyPropertiesChanges(
properties: Map[String, String],
changes: Seq[TableChange]): Map[String, String] = {
applyPropertiesChanges(properties.asJava, changes).asScala.toMap
}
/**
* Apply properties changes to a Java map and return the result.
*/
def applyPropertiesChanges(
properties: util.Map[String, String],
changes: Seq[TableChange]): util.Map[String, String] = {
val newProperties = new util.HashMap[String, String](properties)
changes.foreach {
case set: SetProperty =>
newProperties.put(set.property, set.value)
case unset: RemoveProperty =>
newProperties.remove(unset.property)
case _ =>
// ignore non-property changes
}
Collections.unmodifiableMap(newProperties)
}
/**
* Apply schema changes to a schema and return the result.
*/
def applySchemaChanges(schema: StructType, changes: Seq[TableChange]): StructType = {
changes.foldLeft(schema) { (schema, change) =>
change match {
case add: AddColumn =>
add.fieldNames match {
case Array(name) =>
val field = StructField(name, add.dataType, nullable = add.isNullable)
val newField = Option(add.comment).map(field.withComment).getOrElse(field)
addField(schema, newField, add.position())
case names =>
replace(schema, names.init, parent => parent.dataType match {
case parentType: StructType =>
val field = StructField(names.last, add.dataType, nullable = add.isNullable)
val newField = Option(add.comment).map(field.withComment).getOrElse(field)
Some(parent.copy(dataType = addField(parentType, newField, add.position())))
case _ =>
throw new IllegalArgumentException(s"Not a struct: ${names.init.last}")
})
}
case rename: RenameColumn =>
replace(schema, rename.fieldNames, field =>
Some(StructField(rename.newName, field.dataType, field.nullable, field.metadata)))
case update: UpdateColumnType =>
replace(schema, update.fieldNames, field => {
Some(field.copy(dataType = update.newDataType))
})
case update: UpdateColumnNullability =>
replace(schema, update.fieldNames, field => {
Some(field.copy(nullable = update.nullable))
})
case update: UpdateColumnComment =>
replace(schema, update.fieldNames, field =>
Some(field.withComment(update.newComment)))
case update: UpdateColumnPosition =>
def updateFieldPos(struct: StructType, name: String): StructType = {
val oldField = struct.fields.find(_.name == name).getOrElse {
throw new IllegalArgumentException("Field not found: " + name)
}
val withFieldRemoved = StructType(struct.fields.filter(_ != oldField))
addField(withFieldRemoved, oldField, update.position())
}
update.fieldNames() match {
case Array(name) =>
updateFieldPos(schema, name)
case names =>
replace(schema, names.init, parent => parent.dataType match {
case parentType: StructType =>
Some(parent.copy(dataType = updateFieldPos(parentType, names.last)))
case _ =>
throw new IllegalArgumentException(s"Not a struct: ${names.init.last}")
})
}
case delete: DeleteColumn =>
replace(schema, delete.fieldNames, _ => None)
case _ =>
// ignore non-schema changes
schema
}
}
}
private def addField(
schema: StructType,
field: StructField,
position: ColumnPosition): StructType = {
if (position == null) {
schema.add(field)
} else if (position.isInstanceOf[First]) {
StructType(field +: schema.fields)
} else {
val afterCol = position.asInstanceOf[After].column()
val fieldIndex = schema.fields.indexWhere(_.name == afterCol)
if (fieldIndex == -1) {
throw new IllegalArgumentException("AFTER column not found: " + afterCol)
}
val (before, after) = schema.fields.splitAt(fieldIndex + 1)
StructType(before ++ (field +: after))
}
}
private def replace(
struct: StructType,
fieldNames: Seq[String],
update: StructField => Option[StructField]): StructType = {
val pos = struct.getFieldIndex(fieldNames.head)
.getOrElse(throw new IllegalArgumentException(s"Cannot find field: ${fieldNames.head}"))
val field = struct.fields(pos)
val replacement: Option[StructField] = (fieldNames.tail, field.dataType) match {
case (Seq(), _) =>
update(field)
case (names, struct: StructType) =>
val updatedType: StructType = replace(struct, names, update)
Some(StructField(field.name, updatedType, field.nullable, field.metadata))
case (Seq("key"), map @ MapType(keyType, _, _)) =>
val updated = update(StructField("key", keyType, nullable = false))
.getOrElse(throw new IllegalArgumentException(s"Cannot delete map key"))
Some(field.copy(dataType = map.copy(keyType = updated.dataType)))
case (Seq("key", names @ _*), map @ MapType(keyStruct: StructType, _, _)) =>
Some(field.copy(dataType = map.copy(keyType = replace(keyStruct, names, update))))
case (Seq("value"), map @ MapType(_, mapValueType, isNullable)) =>
val updated = update(StructField("value", mapValueType, nullable = isNullable))
.getOrElse(throw new IllegalArgumentException(s"Cannot delete map value"))
Some(field.copy(dataType = map.copy(
valueType = updated.dataType,
valueContainsNull = updated.nullable)))
case (Seq("value", names @ _*), map @ MapType(_, valueStruct: StructType, _)) =>
Some(field.copy(dataType = map.copy(valueType = replace(valueStruct, names, update))))
case (Seq("element"), array @ ArrayType(elementType, isNullable)) =>
val updated = update(StructField("element", elementType, nullable = isNullable))
.getOrElse(throw new IllegalArgumentException(s"Cannot delete array element"))
Some(field.copy(dataType = array.copy(
elementType = updated.dataType,
containsNull = updated.nullable)))
case (Seq("element", names @ _*), array @ ArrayType(elementStruct: StructType, _)) =>
Some(field.copy(dataType = array.copy(elementType = replace(elementStruct, names, update))))
case (names, dataType) =>
throw new IllegalArgumentException(
s"Cannot find field: ${names.head} in ${dataType.simpleString}")
}
val newFields = struct.fields.zipWithIndex.flatMap {
case (_, index) if pos == index =>
replacement
case (other, _) =>
Some(other)
}
new StructType(newFields)
}
def loadTable(catalog: CatalogPlugin, ident: Identifier): Option[Table] =
try {
Option(catalog.asTableCatalog.loadTable(ident))
} catch {
case _: NoSuchTableException => None
case _: NoSuchDatabaseException => None
case _: NoSuchNamespaceException => None
}
def loadRelation(catalog: CatalogPlugin, ident: Identifier): Option[NamedRelation] = {
loadTable(catalog, ident).map(DataSourceV2Relation.create(_, Some(catalog), Some(ident)))
}
def isSessionCatalog(catalog: CatalogPlugin): Boolean = {
catalog.name().equalsIgnoreCase(CatalogManager.SESSION_CATALOG_NAME)
}
def convertTableProperties(
properties: Map[String, String],
options: Map[String, String],
location: Option[String],
comment: Option[String],
provider: Option[String]): Map[String, String] = {
properties ++ options ++
provider.map(TableCatalog.PROP_PROVIDER -> _) ++
comment.map(TableCatalog.PROP_COMMENT -> _) ++
location.map(TableCatalog.PROP_LOCATION -> _)
}
def withDefaultOwnership(properties: Map[String, String]): Map[String, String] = {
properties ++ Map(TableCatalog.PROP_OWNER -> Utils.getCurrentUserName())
}
def createAlterTable(
originalNameParts: Seq[String],
catalog: CatalogPlugin,
tableName: Seq[String],
changes: Seq[TableChange]): AlterTable = {
val tableCatalog = catalog.asTableCatalog
val ident = tableName.asIdentifier
val unresolved = UnresolvedV2Relation(originalNameParts, tableCatalog, ident)
AlterTable(tableCatalog, ident, unresolved, changes)
}
def getTableProviderCatalog(
provider: SupportsCatalogOptions,
catalogManager: CatalogManager,
options: CaseInsensitiveStringMap): TableCatalog = {
Option(provider.extractCatalog(options))
.map(catalogManager.catalog)
.getOrElse(catalogManager.v2SessionCatalog)
.asTableCatalog
}
}
| goldmedal/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/connector/catalog/CatalogV2Util.scala | Scala | apache-2.0 | 12,301 |
/*
* Copyright (c) 2014-2016
* nonblocking.at gmbh [http://www.nonblocking.at]
*
* This file is part of Cliwix.
*
* Cliwix is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package at.nonblocking.cliwix.core.liferay62.handler
import at.nonblocking.cliwix.core.command._
import at.nonblocking.cliwix.core.handler.Handler
import at.nonblocking.cliwix.core.liferay61.util.{PortalInstanceUtil}
import at.nonblocking.cliwix.model._
import com.liferay.portal.kernel.dao.orm.SessionFactory
import com.liferay.portal.kernel.util.InfrastructureUtil
import com.liferay.portal.service.CompanyLocalService
import scala.beans.BeanProperty
import scala.collection.mutable
class CompanyDeleteHandler extends Handler[DeleteCommand[Company], Company] {
@BeanProperty
var companyService: CompanyLocalService = _
@BeanProperty
var sessionFactory: SessionFactory = _
@BeanProperty
var portalInstanceUtil: PortalInstanceUtil = _
private[core] override def handle(command: DeleteCommand[Company]): CommandResult[Company] = {
logger.debug("Deleting company: {}", command.entity)
val cliwixCompany = command.entity
this.companyService.deleteCompany(cliwixCompany.getCompanyId)
this.portalInstanceUtil.removePortalInstance(cliwixCompany.getCompanyId)
CommandResult(null)
}
/**
* Remove all table rows which contain companyId=[companyId] in the database.
* Necessary for Liferay < 6.2, since removeCompany() does not clean up.
*
* @param companyId Long
*/
private def eraseAllCompanyData(companyId: Long) = {
val connection = InfrastructureUtil.getDataSource.getConnection
val hibernateSession = this.sessionFactory.openSession
val metaData = connection.getMetaData
val tables = metaData.getTables(null, null, "%", null)
val tableNames = new mutable.MutableList[String]
while (tables.next()) tableNames += tables.getString(3)
tableNames.foreach{ tableName =>
val columnNames = new mutable.MutableList[String]
val columns = metaData.getColumns(null, null, tableName, null)
while (columns.next()) columnNames += columns.getString(4)
if (columnNames.exists(_.equalsIgnoreCase("companyId"))) {
logger.debug(s"Removing all rows from table '$tableName' belonging to company with ID: $companyId")
try {
val rowsRemoved = hibernateSession.createSQLQuery(s"DELETE FROM $tableName WHERE companyId=$companyId").executeUpdate()
logger.info(s"$rowsRemoved rows removed from table '$tableName' belonging to company with ID: $companyId")
} catch {
case e: Throwable =>
logger.error(s"Failed to clean table $tableName", e)
}
}
}
connection.close()
}
}
| nonblocking/cliwix | cliwix-core-handlers-6-2/src/main/scala/at/nonblocking/cliwix/core/liferay62/handler/CompanyHandler.scala | Scala | agpl-3.0 | 3,338 |
package com.avast.cactus
import io.grpc._
package object grpc {
private[grpc] val MetadataContextKey = ContextKeys.get[Metadata]("headers")
type ServerResponse[Resp] = Either[ServerError, Resp]
case class ServerError(status: Status, headers: Metadata = new Metadata())
case class GrpcMetadata(context: Context, headers: Metadata) {
def withContext(f: Context => Context): GrpcMetadata = copy(context = f(context))
def withHeaders(f: Metadata => Metadata): GrpcMetadata = copy(headers = f(headers))
}
}
| avast/cactus | grpc-common/src/main/scala/com/avast/cactus/grpc/grpc.scala | Scala | apache-2.0 | 528 |
package org.marxc
import scala.collection.mutable
import org.parboiled.errors.ParsingException
import org.objectweb.asm.Opcodes._
case class SymbolTable(upperLevel: Option[SymbolTable], currentMethod: String) {
val FirstSymbolTableAddress = 0
private val variableTable = new mutable.HashMap[String, Integer]()
private val methodTable = new mutable.HashMap[String, MethodInformation]()
val initialNextVarAddress: Int = FirstSymbolTableAddress
def size(): Int = {
initialNextVarAddress + variableTable.size
}
def getStackFrame: Array[AnyRef] = {
Stream.iterate(INTEGER: AnyRef) {
i => i
}.take(size()).toArray
}
def putVariable(variableName: String) = {
val newVarAddress = initialNextVarAddress + variableTable.size
if (variableTable.contains(variableName)) {
throw new ParsingException("DUPLICATE VARIABLE: " + variableName)
}
variableTable += (variableName -> newVarAddress)
}
def getVariableAddress(variableName: String): Integer = {
variableTable.getOrElse(variableName, {
if (upperLevel.isEmpty) {
throw new ParsingException("VARIABLE: " + variableName + " NOT DECLARED!")
}
upperLevel.get.getVariableAddress(variableName)
})
}
def putMethod(methodName: String, methodInformation: MethodInformation) = {
methodTable.put(methodName, methodInformation)
}
def getMethodDescription(methodName: String): String = {
if (methodName.equals("main")) {
"([Ljava/lang/String;)V"
}
else {
val method = getMethodInformation(methodName)
val numberOfArguments = method.numberOfArguments
val returnValue = if (method.returnsValue) "I" else "V"
"(" + "I" * numberOfArguments + ")" + returnValue
}
}
def getCurrentMethod(): MethodInformation = {
getMethodInformation(currentMethod)
}
def getMethodInformation(methodName: String): MethodInformation = {
methodTable.getOrElse(methodName, {
if (upperLevel.isEmpty) {
throw new ParsingException("METHOD: " + methodName + " NOT DECLARED!")
}
upperLevel.get.getMethodInformation(methodName)
})
}
def getFileName(): String = {
if (upperLevel.isEmpty) {
currentMethod
}
else {
upperLevel.get.getFileName()
}
}
} | iogr/MarxC | src/main/scala/org/marxc/SymbolTable.scala | Scala | apache-2.0 | 2,287 |
package net.benmur.riemann.client
trait EventDSL {
def mergeEvents(e: EventPart, overlay: EventPart) = EventPart(
overlay.host orElse e.host,
overlay.service orElse e.service,
overlay.state orElse e.state,
overlay.time orElse e.time,
overlay.description orElse e.description,
(overlay.tags.toSet ++ e.tags).toSeq.sorted,
overlay.metric orElse e.metric,
overlay.ttl orElse e.ttl)
implicit class EventPartCombinator(e: EventPart) {
def |(overlay: EventPart) = mergeEvents(e, overlay)
}
def oneEvent() = EventPart()
def host(s: String) = EventPart(host = Some(s))
def service(s: String) = EventPart(service = Some(s))
def state(s: String) = EventPart(state = Some(s))
def time(l: Long) = EventPart(time = Some(l))
def description(s: String) = EventPart(description = Some(s))
def tags(s: String*) = EventPart(tags = s)
def metric(m: Long) = EventPart(metric = Some(m))
def metric(m: Float) = EventPart(metric = Some(m))
def metric(m: Double) = EventPart(metric = Some(m))
def ttl(f: Float) = EventPart(ttl = Some(f))
}
object EventDSL extends EventDSL
| mallman/riemann-scala-client | src/main/scala/net/benmur/riemann/client/EventDSL.scala | Scala | mit | 1,117 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.sampler
import com.twitter.finagle.Service
import com.twitter.finagle.httpx.{HttpMuxer, Method, Request, Response}
import com.twitter.util.{Extractable, Future, Var}
/**
* A wrapper around a Var[Double] that exposes it to an HTTP endpoint
* at "/vars/`name`". The Var can be read via a GET request and
* updated via a POST request. The body of the POST will be used
* as the new value.
*/
class HttpVar(name: String, default: Double = 1.0) {
private[this] val underlying = Var(default)
def apply(): Var[Double] with Extractable[Double] = underlying
HttpMuxer.addRichHandler("/vars/"+name, Service.mk[Request, Response] {
case req if req.method == Method.Get =>
req.response.contentString = underlying().toString
Future.value(req.response)
case req if req.method == Method.Post =>
val rep = req.response
try {
val newRate = req.contentString.toDouble
if (newRate > 1 || newRate < 0) {
rep.statusCode = 400
rep.contentString = "invalid rate"
} else {
underlying.update(newRate)
rep.contentString = newRate.toString
}
} catch {
case e: Exception =>
rep.statusCode = 500
rep.contentString = e.toString
}
Future.value(rep)
case req =>
req.response.statusCode = 404
Future.value(req.response)
})
}
| chang2394/zipkin | zipkin-sampler/src/main/scala/com/twitter/zipkin/sampler/HttpVar.scala | Scala | apache-2.0 | 2,011 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Optional
import kafka.utils.TestUtils
import org.apache.kafka.common.Uuid
import org.apache.kafka.common.errors.UnsupportedVersionException
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.{MetadataRequest, MetadataResponse}
import org.apache.kafka.metadata.BrokerState
import org.apache.kafka.test.TestUtils.isValidClusterId
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{BeforeEach, TestInfo}
import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.ValueSource
import scala.collection.Seq
import scala.jdk.CollectionConverters._
class MetadataRequestTest extends AbstractMetadataRequestTest {
@BeforeEach
override def setUp(testInfo: TestInfo): Unit = {
doSetup(testInfo, createOffsetsTopic = false)
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testClusterIdWithRequestVersion1(quorum: String): Unit = {
val v1MetadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort))
val v1ClusterId = v1MetadataResponse.clusterId
assertNull(v1ClusterId, s"v1 clusterId should be null")
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testClusterIdIsValid(quorum: String): Unit = {
val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(2.toShort))
isValidClusterId(metadataResponse.clusterId)
}
/**
* This test only runs in ZK mode because in KRaft mode, the controller ID visible to
* the client is randomized.
*/
@ParameterizedTest
@ValueSource(strings = Array("zk"))
def testControllerId(): Unit = {
val controllerServer = servers.find(_.kafkaController.isActive).get
val controllerId = controllerServer.config.brokerId
val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort))
assertEquals(controllerId,
metadataResponse.controller.id, "Controller id should match the active controller")
// Fail over the controller
controllerServer.shutdown()
controllerServer.startup()
val controllerServer2 = servers.find(_.kafkaController.isActive).get
val controllerId2 = controllerServer2.config.brokerId
assertNotEquals(controllerId, controllerId2, "Controller id should switch to a new broker")
TestUtils.waitUntilTrue(() => {
val metadataResponse2 = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort))
metadataResponse2.controller != null && controllerServer2.dataPlaneRequestProcessor.brokerId == metadataResponse2.controller.id
}, "Controller id should match the active controller after failover", 5000)
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testRack(quorum: String): Unit = {
val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort))
// Validate rack matches what's set in generateConfigs() above
metadataResponse.brokers.forEach { broker =>
assertEquals(s"rack/${broker.id}", broker.rack, "Rack information should match config")
}
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testIsInternal(quorum: String): Unit = {
val internalTopic = Topic.GROUP_METADATA_TOPIC_NAME
val notInternalTopic = "notInternal"
// create the topics
createTopic(internalTopic, 3, 2)
createTopic(notInternalTopic, 3, 2)
val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort))
assertTrue(metadataResponse.errors.isEmpty, "Response should have no errors")
val topicMetadata = metadataResponse.topicMetadata.asScala
val internalTopicMetadata = topicMetadata.find(_.topic == internalTopic).get
val notInternalTopicMetadata = topicMetadata.find(_.topic == notInternalTopic).get
assertTrue(internalTopicMetadata.isInternal, "internalTopic should show isInternal")
assertFalse(notInternalTopicMetadata.isInternal, "notInternalTopic topic not should show isInternal")
assertEquals(Set(internalTopic).asJava, metadataResponse.buildCluster().internalTopics)
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testNoTopicsRequest(quorum: String): Unit = {
// create some topics
createTopic("t1", 3, 2)
createTopic("t2", 3, 2)
// v0, Doesn't support a "no topics" request
// v1, Empty list represents "no topics"
val metadataResponse = sendMetadataRequest(new MetadataRequest.Builder(List[String]().asJava, true, 1.toShort).build)
assertTrue(metadataResponse.errors.isEmpty, "Response should have no errors")
assertTrue(metadataResponse.topicMetadata.isEmpty, "Response should have no topics")
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testAutoTopicCreation(quorum: String): Unit = {
val topic1 = "t1"
val topic2 = "t2"
val topic3 = "t3"
val topic4 = "t4"
val topic5 = "t5"
createTopic(topic1)
val response1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1, topic2).asJava, true).build())
assertNull(response1.errors.get(topic1))
checkAutoCreatedTopic(topic2, response1)
// The default behavior in old versions of the metadata API is to allow topic creation, so
// protocol downgrades should happen gracefully when auto-creation is explicitly requested.
val response2 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic3).asJava, true).build(1))
checkAutoCreatedTopic(topic3, response2)
// V3 doesn't support a configurable allowAutoTopicCreation, so disabling auto-creation is not supported
assertThrows(classOf[UnsupportedVersionException], () => sendMetadataRequest(new MetadataRequest(requestData(List(topic4), false), 3.toShort)))
// V4 and higher support a configurable allowAutoTopicCreation
val response3 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic4, topic5).asJava, false, 4.toShort).build)
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response3.errors.get(topic4))
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response3.errors.get(topic5))
if (!isKRaftTest()) {
assertEquals(None, zkClient.getTopicPartitionCount(topic5))
}
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testAutoCreateTopicWithInvalidReplicationFactor(quorum: String): Unit = {
// Shutdown all but one broker so that the number of brokers is less than the default replication factor
brokers.tail.foreach(_.shutdown())
brokers.tail.foreach(_.awaitShutdown())
val topic1 = "testAutoCreateTopic"
val response1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1).asJava, true).build)
assertEquals(1, response1.topicMetadata.size)
val topicMetadata = response1.topicMetadata.asScala.head
if (isKRaftTest()) {
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, topicMetadata.error)
} else {
assertEquals(Errors.INVALID_REPLICATION_FACTOR, topicMetadata.error)
}
assertEquals(topic1, topicMetadata.topic)
assertEquals(0, topicMetadata.partitionMetadata.size)
}
@ParameterizedTest
@ValueSource(strings = Array("zk"))
def testAutoCreateOfCollidingTopics(quorum: String): Unit = {
val topic1 = "testAutoCreate.Topic"
val topic2 = "testAutoCreate_Topic"
val response1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1, topic2).asJava, true).build)
assertEquals(2, response1.topicMetadata.size)
val responseMap = response1.topicMetadata.asScala.map(metadata => (metadata.topic(), metadata.error)).toMap
assertEquals(Set(topic1, topic2), responseMap.keySet)
// The topic creation will be delayed, and the name collision error will be swallowed.
assertEquals(Set(Errors.LEADER_NOT_AVAILABLE, Errors.INVALID_TOPIC_EXCEPTION), responseMap.values.toSet)
val topicCreated = responseMap.head._1
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topicCreated, 0)
TestUtils.waitForPartitionMetadata(brokers, topicCreated, 0)
// retry the metadata for the first auto created topic
val response2 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topicCreated).asJava, true).build)
val topicMetadata1 = response2.topicMetadata.asScala.head
assertEquals(Errors.NONE, topicMetadata1.error)
assertEquals(Seq(Errors.NONE), topicMetadata1.partitionMetadata.asScala.map(_.error))
assertEquals(1, topicMetadata1.partitionMetadata.size)
val partitionMetadata = topicMetadata1.partitionMetadata.asScala.head
assertEquals(0, partitionMetadata.partition)
assertEquals(2, partitionMetadata.replicaIds.size)
assertTrue(partitionMetadata.leaderId.isPresent)
assertTrue(partitionMetadata.leaderId.get >= 0)
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testAllTopicsRequest(quorum: String): Unit = {
// create some topics
createTopic("t1", 3, 2)
createTopic("t2", 3, 2)
// v0, Empty list represents all topics
val metadataResponseV0 = sendMetadataRequest(new MetadataRequest(requestData(List(), true), 0.toShort))
assertTrue(metadataResponseV0.errors.isEmpty, "V0 Response should have no errors")
assertEquals(2, metadataResponseV0.topicMetadata.size(), "V0 Response should have 2 (all) topics")
// v1, Null represents all topics
val metadataResponseV1 = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort))
assertTrue(metadataResponseV1.errors.isEmpty, "V1 Response should have no errors")
assertEquals(2, metadataResponseV1.topicMetadata.size(), "V1 Response should have 2 (all) topics")
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testTopicIdsInResponse(quorum: String): Unit = {
val replicaAssignment = Map(0 -> Seq(1, 2, 0), 1 -> Seq(2, 0, 1))
val topic1 = "topic1"
val topic2 = "topic2"
createTopicWithAssignment(topic1, replicaAssignment)
createTopicWithAssignment(topic2, replicaAssignment)
// if version < 9, return ZERO_UUID in MetadataResponse
val resp1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1, topic2).asJava, true, 0, 9).build(), Some(anySocketServer))
assertEquals(2, resp1.topicMetadata.size)
resp1.topicMetadata.forEach { topicMetadata =>
assertEquals(Errors.NONE, topicMetadata.error)
assertEquals(Uuid.ZERO_UUID, topicMetadata.topicId())
}
// from version 10, UUID will be included in MetadataResponse
val resp2 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1, topic2).asJava, true, 10, 10).build(), Some(anySocketServer))
assertEquals(2, resp2.topicMetadata.size)
resp2.topicMetadata.forEach { topicMetadata =>
assertEquals(Errors.NONE, topicMetadata.error)
assertNotEquals(Uuid.ZERO_UUID, topicMetadata.topicId())
assertNotNull(topicMetadata.topicId())
}
}
/**
* Preferred replica should be the first item in the replicas list
*/
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testPreferredReplica(quorum: String): Unit = {
val replicaAssignment = Map(0 -> Seq(1, 2, 0), 1 -> Seq(2, 0, 1))
createTopicWithAssignment("t1", replicaAssignment)
// Test metadata on two different brokers to ensure that metadata propagation works correctly
val responses = Seq(0, 1).map(index =>
sendMetadataRequest(new MetadataRequest.Builder(Seq("t1").asJava, true).build(),
Some(brokers(index).socketServer)))
responses.foreach { response =>
assertEquals(1, response.topicMetadata.size)
val topicMetadata = response.topicMetadata.iterator.next()
assertEquals(Errors.NONE, topicMetadata.error)
assertEquals("t1", topicMetadata.topic)
assertEquals(Set(0, 1), topicMetadata.partitionMetadata.asScala.map(_.partition).toSet)
topicMetadata.partitionMetadata.forEach { partitionMetadata =>
val assignment = replicaAssignment(partitionMetadata.partition)
assertEquals(assignment, partitionMetadata.replicaIds.asScala)
assertEquals(assignment, partitionMetadata.inSyncReplicaIds.asScala)
assertEquals(Optional.of(assignment.head), partitionMetadata.leaderId)
}
}
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testReplicaDownResponse(quorum: String): Unit = {
val replicaDownTopic = "replicaDown"
val replicaCount = 3
// create a topic with 3 replicas
createTopic(replicaDownTopic, 1, replicaCount)
// Kill a replica node that is not the leader
val metadataResponse = sendMetadataRequest(new MetadataRequest.Builder(List(replicaDownTopic).asJava, true).build())
val partitionMetadata = metadataResponse.topicMetadata.asScala.head.partitionMetadata.asScala.head
val downNode = brokers.find { broker =>
val serverId = broker.dataPlaneRequestProcessor.brokerId
val leaderId = partitionMetadata.leaderId
val replicaIds = partitionMetadata.replicaIds.asScala
leaderId.isPresent && leaderId.get() != serverId && replicaIds.contains(serverId)
}.get
downNode.shutdown()
TestUtils.waitUntilTrue(() => {
val response = sendMetadataRequest(new MetadataRequest.Builder(List(replicaDownTopic).asJava, true).build())
!response.brokers.asScala.exists(_.id == downNode.dataPlaneRequestProcessor.brokerId)
}, "Replica was not found down", 50000)
// Validate version 0 still filters unavailable replicas and contains error
val v0MetadataResponse = sendMetadataRequest(new MetadataRequest(requestData(List(replicaDownTopic), true), 0.toShort))
val v0BrokerIds = v0MetadataResponse.brokers().asScala.map(_.id).toSeq
assertTrue(v0MetadataResponse.errors.isEmpty, "Response should have no errors")
assertFalse(v0BrokerIds.contains(downNode.config.brokerId), s"The downed broker should not be in the brokers list")
assertTrue(v0MetadataResponse.topicMetadata.size == 1, "Response should have one topic")
val v0PartitionMetadata = v0MetadataResponse.topicMetadata.asScala.head.partitionMetadata.asScala.head
assertTrue(v0PartitionMetadata.error == Errors.REPLICA_NOT_AVAILABLE, "PartitionMetadata should have an error")
assertTrue(v0PartitionMetadata.replicaIds.size == replicaCount - 1, s"Response should have ${replicaCount - 1} replicas")
// Validate version 1 returns unavailable replicas with no error
val v1MetadataResponse = sendMetadataRequest(new MetadataRequest.Builder(List(replicaDownTopic).asJava, true).build(1))
val v1BrokerIds = v1MetadataResponse.brokers().asScala.map(_.id).toSeq
assertTrue(v1MetadataResponse.errors.isEmpty, "Response should have no errors")
assertFalse(v1BrokerIds.contains(downNode.config.brokerId), s"The downed broker should not be in the brokers list")
assertEquals(1, v1MetadataResponse.topicMetadata.size, "Response should have one topic")
val v1PartitionMetadata = v1MetadataResponse.topicMetadata.asScala.head.partitionMetadata.asScala.head
assertEquals(Errors.NONE, v1PartitionMetadata.error, "PartitionMetadata should have no errors")
assertEquals(replicaCount, v1PartitionMetadata.replicaIds.size, s"Response should have $replicaCount replicas")
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testIsrAfterBrokerShutDownAndJoinsBack(quorum: String): Unit = {
def checkIsr[B <: KafkaBroker](
brokers: Seq[B],
topic: String
): Unit = {
val activeBrokers = brokers.filter(_.brokerState != BrokerState.NOT_RUNNING)
val expectedIsr = activeBrokers.map(_.config.brokerId).toSet
// Assert that topic metadata at new brokers is updated correctly
activeBrokers.foreach { broker =>
var actualIsr = Set.empty[Int]
TestUtils.waitUntilTrue(() => {
val metadataResponse = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic).asJava, false).build,
Some(brokerSocketServer(broker.config.brokerId)))
val firstPartitionMetadata = metadataResponse.topicMetadata.asScala.headOption.flatMap(_.partitionMetadata.asScala.headOption)
actualIsr = firstPartitionMetadata.map { partitionMetadata =>
partitionMetadata.inSyncReplicaIds.asScala.map(Int.unbox).toSet
}.getOrElse(Set.empty)
expectedIsr == actualIsr
}, s"Topic metadata not updated correctly in broker $broker\n" +
s"Expected ISR: $expectedIsr \n" +
s"Actual ISR : $actualIsr")
}
}
val topic = "isr-after-broker-shutdown"
val replicaCount = 3
createTopic(topic, 1, replicaCount)
brokers.last.shutdown()
brokers.last.awaitShutdown()
brokers.last.startup()
checkIsr(brokers, topic)
}
@ParameterizedTest
@ValueSource(strings = Array("zk", "kraft"))
def testAliveBrokersWithNoTopics(quorum: String): Unit = {
def checkMetadata[B <: KafkaBroker](
brokers: Seq[B],
expectedBrokersCount: Int
): Unit = {
var response: Option[MetadataResponse] = None
TestUtils.waitUntilTrue(() => {
val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build,
Some(anySocketServer))
response = Some(metadataResponse)
metadataResponse.brokers.size == expectedBrokersCount
}, s"Expected $expectedBrokersCount brokers, but there are ${response.get.brokers.size}")
val brokersSorted = response.get.brokers.asScala.toSeq.sortBy(_.id)
// Assert that metadata is propagated correctly
brokers.filter(_.brokerState == BrokerState.RUNNING).foreach { broker =>
TestUtils.waitUntilTrue(() => {
val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build,
Some(brokerSocketServer(broker.config.brokerId)))
val brokers = metadataResponse.brokers.asScala.toSeq.sortBy(_.id)
val topicMetadata = metadataResponse.topicMetadata.asScala.toSeq.sortBy(_.topic)
brokersSorted == brokers && metadataResponse.topicMetadata.asScala.toSeq.sortBy(_.topic) == topicMetadata
}, s"Topic metadata not updated correctly")
}
}
val brokerToShutdown = if (isKRaftTest()) {
brokers.last
} else {
servers.filterNot(_.kafkaController.isActive).last
}
brokerToShutdown.shutdown()
brokerToShutdown.awaitShutdown()
checkMetadata(brokers, brokers.size - 1)
brokerToShutdown.startup()
checkMetadata(brokers, brokers.size)
}
}
| TiVo/kafka | core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala | Scala | apache-2.0 | 19,406 |
package com.twitter.finagle.client
import com.twitter.finagle.Stack.Module0
import com.twitter.finagle._
import com.twitter.finagle.factory.BindingFactory
import com.twitter.finagle.loadbalancer.LoadBalancerFactory
import com.twitter.finagle.service.FailFastFactory.FailFast
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle.util.StackRegistry
import com.twitter.finagle.{param, Name}
import com.twitter.util._
import java.net.InetSocketAddress
import org.junit.runner.RunWith
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.FunSuite
import org.scalatest.junit.{AssertionsForJUnit, JUnitRunner}
@RunWith(classOf[JUnitRunner])
class StackClientTest extends FunSuite
with StringClient
with AssertionsForJUnit
with Eventually
with IntegrationPatience {
trait Ctx {
val sr = new InMemoryStatsReceiver
val client = stringClient
.configured(param.Stats(sr))
}
test("client stats are scoped to label")(new Ctx {
// use dest when no label is set
client.newService("inet!localhost:8080")
eventually {
assert(sr.counters(Seq("inet!localhost:8080", "loadbalancer", "adds")) === 1)
}
// use param.Label when set
client.configured(param.Label("myclient")).newService("localhost:8080")
eventually {
assert(sr.counters(Seq("myclient", "loadbalancer", "adds")) === 1)
}
// use evaled label when both are set
client.configured(param.Label("myclient")).newService("othername=localhost:8080")
eventually {
assert(sr.counters(Seq("othername", "loadbalancer", "adds")) === 1)
}
})
test("Client added to client registry")(new Ctx {
ClientRegistry.clear()
val name = "testClient"
client.newClient(Name.bound(new InetSocketAddress(8080)), name)
client.newClient(Name.bound(new InetSocketAddress(8080)), name)
assert(ClientRegistry.registrants.count { e: StackRegistry.Entry =>
val param.Label(actual) = e.params[param.Label]
name == actual
} === 1)
})
test("FailFast is respected") {
val ctx = new Ctx { }
val ex = new RuntimeException("lol")
val alwaysFail = new Module0[ServiceFactory[String, String]] {
val role = Stack.Role("lol")
val description = "lool"
def make(next: ServiceFactory[String, String]) =
ServiceFactory.apply(() => Future.exception(ex))
}
val alwaysFailStack = new StackBuilder(stack.nilStack[String, String])
.push(alwaysFail)
.result
val stk = ctx.client.stack.concat(alwaysFailStack)
def newClient(name: String, failFastOn: Option[Boolean]): Service[String, String] = {
var stack = ctx.client
.configured(param.Label(name))
.withStack(stk)
failFastOn.foreach { ffOn =>
stack = stack.configured(FailFast(ffOn))
}
val client = stack.newClient("/$/inet/localhost/0")
new FactoryToService[String, String](client)
}
def testClient(name: String, failFastOn: Option[Boolean]): Unit = {
val svc = newClient(name, failFastOn)
val e = intercept[RuntimeException] { Await.result(svc("hi")) }
assert(e === ex)
failFastOn match {
case Some(on) if !on =>
assert(ctx.sr.counters.get(Seq(name, "failfast", "marked_dead")) === None)
intercept[RuntimeException] { Await.result(svc("hi2")) }
case _ =>
eventually {
assert(ctx.sr.counters(Seq(name, "failfast", "marked_dead")) === 1)
}
intercept[FailedFastException] { Await.result(svc("hi2")) }
}
}
testClient("ff-client-default", None)
testClient("ff-client-enabled", Some(true))
testClient("ff-client-disabled", Some(false))
}
test("FactoryToService close propagated to underlying service") {
/*
* This test ensures that the following one doesn't succeed vacuously.
*/
var closed = false
val underlyingFactory = new ServiceFactory[Unit, Unit] {
def apply(conn: ClientConnection) = Future.value(new Service[Unit, Unit] {
def apply(request: Unit): Future[Unit] = Future.Unit
override def close(deadline: Time) = {
closed = true
Future.Done
}
})
def close(deadline: Time) = Future.Done
}
val stack = StackClient.newStack[Unit, Unit]
.concat(Stack.Leaf(Stack.Role("role"), underlyingFactory))
// don't pool or else we don't see underlying close until service is ejected from pool
.remove(DefaultPool.Role)
val factory = stack.make(Stack.Params.empty +
FactoryToService.Enabled(true) +
// default Dest is /$/fail
BindingFactory.Dest(Name.Path(Path.read("/$/inet/localhost/0"))))
val service = new FactoryToService(factory)
Await.result(service(()))
assert(closed)
}
test("prepFactory above FactoryToService") {
/*
* This approximates code in finagle-http which wraps services (in
* prepFactory) so the close is delayed until the chunked response
* has been read. We need prepFactory above FactoryToService or
* else FactoryToService closes the underlying service too soon.
*/
var closed = false
val underlyingFactory = new ServiceFactory[Unit, Unit] {
def apply(conn: ClientConnection) = Future.value(new Service[Unit, Unit] {
def apply(request: Unit): Future[Unit] = Future.Unit
override def close(deadline: Time) = {
closed = true
Future.Done
}
})
def close(deadline: Time) = Future.Done
}
val stack = StackClient.newStack[Unit, Unit]
.concat(Stack.Leaf(Stack.Role("role"), underlyingFactory))
// don't pool or else we don't see underlying close until service is ejected from pool
.remove(DefaultPool.Role)
.replace(StackClient.Role.prepFactory, { next: ServiceFactory[Unit, Unit] =>
next map { service: Service[Unit, Unit] =>
new ServiceProxy[Unit, Unit](service) {
override def close(deadline: Time) = Future.never
}
}
})
val factory = stack.make(Stack.Params.empty +
FactoryToService.Enabled(true) +
// default Dest is /$/fail
BindingFactory.Dest(Name.Path(Path.read("/$/inet/localhost/0"))))
val service = new FactoryToService(factory)
Await.result(service(()))
assert(!closed)
}
trait RequeueCtx {
var count = 0
var _status: Status = Status.Open
var runSideEffect = (_: Int) => false
var sideEffect = () => ()
val stubLB = new ServiceFactory[String, String] {
def apply(conn: ClientConnection) = Future.value(new Service[String, String] {
def apply(request: String): Future[String] = {
count += 1
if (runSideEffect(count)) sideEffect()
Future.exception(WriteException(new Exception("boom")))
}
override def close(deadline: Time) = Future.Done
})
def close(deadline: Time) = Future.Done
override def status = _status
}
val sr = new InMemoryStatsReceiver
val client = stringClient.configured(param.Stats(sr))
val stk = client.stack.replace(
LoadBalancerFactory.role,
(_: ServiceFactory[String, String]) => stubLB
)
val cl = client
.withStack(stk)
.configured(param.Label("myclient"))
.newClient("/$/inet/localhost/0")
def requeues = sr.counters.get(Seq("myclient", "requeue", "requeues"))
def budget = sr.gauges(Seq("myclient", "requeue", "budget"))()
}
test("requeue failing requests when the stack is Open")(new RequeueCtx {
val session = cl()
val b = budget
// failing request and Open load balancer => max requeues
Await.ready(session.map(_("hi")))
assert(requeues === Some(b))
assert(budget === 0)
})
for (status <- Seq(Status.Busy, Status.Closed)) {
test(s"don't requeue failing requests when the stack is $status")(new RequeueCtx {
// failing request and Busy | Closed load balancer => zero requeues
_status = status
Await.ready(cl().map(_("hi")))
assert(!requeues.isDefined)
})
}
test("dynamically stop requeuing")(new RequeueCtx {
// load balancer begins Open, becomes Busy after 10 requeues => 10 requeues
_status = Status.Open
runSideEffect = _ > 10
sideEffect = () => _status = Status.Busy
Await.ready(cl().map(_("hi")))
assert(requeues === Some(10))
})
test("service acquisition requeues use a separate fixed budget")(new RequeueCtx {
override val stubLB = new ServiceFactory[String, String] {
def apply(conn: ClientConnection) = Future.exception(
Failure.rejected("unable to establish session")
)
def close(deadline: Time) = Future.Done
}
intercept[Failure] { Await.result(cl()) }
assert(requeues.isDefined)
assert(budget > 0)
})
test("service acquisition requeues respect Failure.Restartable")(new RequeueCtx {
override val stubLB = new ServiceFactory[String, String] {
def apply(conn: ClientConnection) = Future.exception(
Failure("don't restart this!")
)
def close(deadline: Time) = Future.Done
}
intercept[Failure] { Await.result(cl()) }
assert(!requeues.isDefined)
assert(budget > 0)
})
test("service acquisition requeues respect Status.Open")(new RequeueCtx {
_status = Status.Closed
Await.result(cl())
assert(!requeues.isDefined)
assert(budget > 0)
})
test("Requeues all go to the same cluster in a Union") {
/*
* Once we have distributed a request to a particular cluster (in
* BindingFactory), retries should go to the same cluster rather
* than being redistributed (possibly to a different cluster).
*/
class CountFactory extends ServiceFactory[Unit, Unit] {
var count = 0
val service = new Service[Unit, Unit] {
def apply(request: Unit): Future[Unit] = {
count = count + 1
Future.exception(WriteException(null))
}
}
def apply(conn: ClientConnection) = Future.value(service)
def close(deadline: Time) = Future.Done
}
val fac1 = new CountFactory
val fac2 = new CountFactory
val addr1 = new InetSocketAddress(1729)
val addr2 = new InetSocketAddress(1730)
// override name resolution to a Union of two addresses
val dtab = new Dtab(Dtab.base) {
override def lookup(path: Path): Activity[NameTree[Name]] =
Activity.value(NameTree.Union(
NameTree.Weighted(1D, NameTree.Leaf(Name.bound(addr1))),
NameTree.Weighted(1D, NameTree.Leaf(Name.bound(addr2)))))
}
val stack = StackClient.newStack[Unit, Unit]
// direct the two addresses to the two service factories instead
// of trying to connect to them
.replace(LoadBalancerFactory.role,
new Stack.Module1[LoadBalancerFactory.Dest, ServiceFactory[Unit, Unit]] {
val role = new Stack.Role("role")
val description = "description"
def make(dest: LoadBalancerFactory.Dest, next: ServiceFactory[Unit, Unit]) = {
val LoadBalancerFactory.Dest(va) = dest
va.sample match {
case Addr.Bound(addrs, _) if addrs == Set(addr1) => fac1
case Addr.Bound(addrs, _) if addrs == Set(addr2) => fac2
case _ => throw new IllegalArgumentException("wat")
}
}
})
val sr = new InMemoryStatsReceiver
val service =
new FactoryToService(stack.make(Stack.Params.empty +
FactoryToService.Enabled(true) +
param.Stats(sr) +
BindingFactory.BaseDtab(() => dtab)))
intercept[ChannelWriteException] {
Await.result(service(()))
}
val requeues = sr.counters(Seq("requeue", "requeues"))
// all retries go to one service
assert(
(fac1.count == requeues+1 && fac2.count == 0) ||
(fac2.count == requeues+1 && fac1.count == 0))
}
test("StackBasedClient.configured is a StackClient") {
// compilation test
val client: StackBasedClient[String, String] = stringClient
val client2: StackBasedClient[String, String] =
client.configured(param.Label("foo"))
val client3: StackBasedClient[String, String] =
client.configured[param.Label]((param.Label("foo"), param.Label.param))
}
test("StackClient.configured is a StackClient") {
// compilation test
val client: StackClient[String, String] = stringClient
val client2: StackClient[String, String] =
client.configured(param.Label("foo"))
val client3: StackClient[String, String] =
client.configured[param.Label]((param.Label("foo"), param.Label.param))
}
}
| Krasnyanskiy/finagle | finagle-core/src/test/scala/com/twitter/finagle/client/StackClientTest.scala | Scala | apache-2.0 | 12,694 |
package uk.gov.gds.ier.transaction.ordinary.openRegister
import uk.gov.gds.ier.test.ControllerTestSuite
class OpenRegisterControllerTests extends ControllerTestSuite {
behavior of "OpenRegisterController.get"
it should "display the page" in {
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(GET, "/register-to-vote/open-register").withIerSession()
)
status(result) should be(OK)
contentType(result) should be(Some("text/html"))
contentAsString(result) should include("Do you want to include your name and address on the open register?")
contentAsString(result) should include("/register-to-vote/open-register")
}
}
behavior of "OpenRegisterController.post"
it should "bind successfully and redirect to the Previous Name step" in {
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/open-register")
.withIerSession()
.withFormUrlEncodedBody("openRegister.optIn" -> "true")
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/postal-vote"))
}
}
it should "bind successfully and redirect to the confirmation step with complete Application" in {
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/open-register")
.withIerSession()
.withApplication(completeOrdinaryApplication)
.withFormUrlEncodedBody("openRegister.optIn" -> "true")
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/confirmation"))
}
}
it should "not display any errors because we are evil dark patterny" in {
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/open-register").withIerSession()
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/postal-vote"))
}
}
behavior of "Completing a prior step when this question is incomplete"
it should "stop on this page" in {
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/country-of-residence")
.withIerSession()
.withApplication(completeOrdinaryApplication.copy(openRegisterOptin = None))
.withFormUrlEncodedBody(
"country.residence" -> "England"
)
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/open-register"))
}
}
behavior of "OpenRegisterController.editGet"
it should "display the page" in {
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(GET, "/register-to-vote/edit/open-register").withIerSession()
)
status(result) should be(OK)
contentType(result) should be(Some("text/html"))
contentAsString(result) should include("Do you want to include your name and address on the open register?")
contentAsString(result) should include("/register-to-vote/edit/open-register")
}
}
behavior of "OpenRegisterController.editPost"
it should "bind successfully and redirect to the Previous Name step" in {
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/edit/open-register")
.withIerSession()
.withFormUrlEncodedBody("openRegister.optIn" -> "true")
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/postal-vote"))
}
}
it should "bind successfully and redirect to the confirmation step with complete Application" in {
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/edit/open-register")
.withIerSession()
.withApplication(completeOrdinaryApplication)
.withFormUrlEncodedBody("openRegister.optIn" -> "true")
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/confirmation"))
}
}
it should "not display any errors because we are evil dark patterny" in {
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/edit/open-register").withIerSession()
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/postal-vote"))
}
}
}
| alphagov/ier-frontend | test/uk/gov/gds/ier/transaction/ordinary/openRegister/OpenRegisterControllerTests.scala | Scala | mit | 4,573 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule
/**
* Stream channel interfaces
*/
package object stream {
/**
* Create a stream input channel from a system input channel.
*
* @param ch the system-level channel
*
* @return the stream channel
*/
implicit def liftIChan[A: Message](ch: channel.IChan[A]): IChan[A] =
ichan.FrontIChan(ichan.BackIChan(ch))
/**
* Create a stream output channel from a system output channel.
*
* @param ch the system-level channel
*
* @return the stream output channel
*/
implicit def liftOChan[A: Message](ch: channel.OChan[A]): OChan[A] =
ochan.BackOChan(ch)
implicit def liftOChanFactory[A: Message](factory: channel.OChanFactory[A]): ochan.OChanFactory[A] =
ochan.OChanFactory.lift(factory)
} | molecule-labs/molecule | molecule-core/src/main/scala/molecule/stream/package.scala | Scala | apache-2.0 | 1,478 |
case class AffineImageShape(axes: Seq[Int]) {
def this(axes: Array[Int]) = this(axes)
}
class X(i: Int) {
def this(d: Double) = this(d.toLong)
def this(n: Long) = this(n.toInt)
}
| felixmulder/scala | test/files/neg/t9045.scala | Scala | bsd-3-clause | 186 |
// These are meant to be typed into the REPL. You can also run
// scala -Xnojline < repl-session.scala to run them all at once.
val scores = Map("Alice" -> 1729, "Fred" -> 42)
scores.get("Alice") match {
case Some(score) => println(score)
case None => println("No score")
}
val alicesScore = scores.get("Alice")
if (alicesScore.isEmpty) { println("No score")
} else println(alicesScore.get)
println(alicesScore.getOrElse("No score"))
println(scores.getOrElse("Alice", "No score"))
for (score <- scores.get("Alice")) println(score)
scores.get("Alice").foreach(println _)
| yeahnoob/scala-impatient-2e-code | src/ch14/sec16/repl-session.scala | Scala | gpl-3.0 | 581 |
package main.scala
import org.apache.spark.sql.simba.SimbaSession
import org.apache.spark.sql.simba.index._
/**
* Created by and on 3/20/17.
*/
object PartitionViewer {
case class PointItem(id: Int, x: Double, y: Double)
var master: String = "local[*]"
var filename: String = "/opt/Datasets/Beijing/P10K.csv"
var epsilon: Double = 10.0
var logs: String = "ERROR"
def main(args: Array[String]): Unit = {
// master = args(0)
// filename = args(1)
// logs = args(2)
val simbaSession = SimbaSession
.builder()
.master(master)
.appName("PartitionViewer")
.config("simba.index.partitions", "256")
.getOrCreate()
import simbaSession.implicits._
import simbaSession.simbaImplicits._
val sc = simbaSession.sparkContext
sc.setLogLevel(logs)
val points = sc.textFile(filename,10)
.map(_.split(","))
.map(p => PointItem(id = p(0).trim.toInt, x = p(1).trim.toDouble, y = p(2).trim.toDouble))
.toDS()
println(points.count())
points.index(RTreeType, "rt", Array("x", "y"))
val mbrs = points.rdd.mapPartitionsWithIndex{ (index, iterator) =>
var min_x: Double = Double.MaxValue
var min_y: Double = Double.MaxValue
var max_x: Double = Double.MinValue
var max_y: Double = Double.MinValue
var size: Int = 0
iterator.toList.foreach{row =>
val x = row.x
val y = row.y
if(x < min_x){
min_x = x
}
if(y < min_y){
min_y = y
}
if(x > max_x){
max_x = x
}
if(y > max_y){
max_y = y
}
size += 1
}
List((min_x,min_y,max_x,max_y, s"$index", size)).iterator
}
val gson = new GeoGSON("4799")
mbrs.collect().foreach {row =>
gson.makeMBR(row._1,row._2,row._3,row._4,row._5, row._6)
}
gson.saveGeoJSON("out/RTree_P100K.json")
val gson2 = new GeoGSON("4799")
mbrs.collect().foreach {row =>
if(row._3 - row._1 > epsilon && row._4 - row._2 > epsilon){
gson2.makeMBR(row._1 + epsilon,row._2 + epsilon,row._3 - epsilon,row._4 - epsilon,row._5, row._6)
}
}
gson2.saveGeoJSON("out/RTree_P100K_buffer.json")
mbrs.map(r => r._6).toDF("n").agg(Map("n" -> "avg")).show()
sc.stop()
}
} | aocalderon/PhD | Y2Q3/PBFE4/src/main/scala/PartitionViewer.scala | Scala | lgpl-3.0 | 2,310 |
package org.ldaniels528.mahout
import java.io.File
import org.apache.mahout.cf.taste.eval.RecommenderBuilder
import org.apache.mahout.cf.taste.impl.eval.AverageAbsoluteDifferenceRecommenderEvaluator
import org.apache.mahout.cf.taste.impl.model.file.FileDataModel
import org.apache.mahout.cf.taste.impl.neighborhood.ThresholdUserNeighborhood
import org.apache.mahout.cf.taste.impl.recommender.GenericUserBasedRecommender
import org.apache.mahout.cf.taste.impl.similarity.PearsonCorrelationSimilarity
import org.apache.mahout.cf.taste.model.DataModel
import org.slf4j.LoggerFactory
/**
* Sample Evaluation
* @author lawrence.daniels@gmail.com
*/
object SampleEvaluation {
private val logger = LoggerFactory.getLogger(getClass)
/**
* Main application entry point
* @param args the given command line arguments
*/
def main(args: Array[String]) {
/*
* You might ask yourself, how to make sure that your recommender returns good results.
* Unfortunately, the only way to be really sure about the quality is by doing an A/B
* test with real users in a live system.
*
* We can however try to get a feel of the quality, by statistical offline evaluation.
* Just keep in mind that this does not replace a test with real users! One way to check
* whether the recommender returns good results is by doing a hold-out test. We partition
* our dataset into two sets: a trainingset consisting of 90% of the data and a testset
* consisting of 10%. Then we train our recommender using the training set and look how
* well it predicts the unknown interactions in the testset.
*
* To test our recommender, we create a class called EvaluateRecommender with a main method
* and add an inner class called MyRecommenderBuilder that implements the RecommenderBuilder
* interface. We implement the buildRecommender method and make it setup our user-based
* recommender:
*/
val dataModel = new FileDataModel(new File("./src/main/resources/dataset.csv"))
val evaluator = new AverageAbsoluteDifferenceRecommenderEvaluator()
val builder = new MyRecommenderBuilder()
val result = evaluator.evaluate(builder, null, dataModel, 0.9, 1.0)
logger.info(s"result: $result")
}
class MyRecommenderBuilder extends RecommenderBuilder {
override def buildRecommender(dataModel: DataModel) = {
val similarity = new PearsonCorrelationSimilarity(dataModel)
val neighborhood = new ThresholdUserNeighborhood(0.1, similarity, dataModel)
new GenericUserBasedRecommender(dataModel, neighborhood, similarity)
}
}
}
| ldaniels528/mahout_samples | src/main/scala/org/ldaniels528/mahout/SampleEvaluation.scala | Scala | apache-2.0 | 2,624 |
package lila.tv
import scala.concurrent.duration._
import akka.actor._
import akka.actor.ActorSelection
import akka.pattern.{ ask, pipe }
import chess.Color
import play.twirl.api.Html
import lila.db.api._
import lila.game.tube.gameTube
import lila.game.{ Game, GameRepo }
final class Featured(
rendererActor: ActorSelection,
system: ActorSystem) {
import Featured._
implicit private def timeout = makeTimeout(50 millis)
private type Fuog = Fu[Option[Game]]
private val bus = system.lilaBus
def one: Fuog =
(actor ? GetGame mapTo manifest[Option[String]]) recover {
case _: Exception => none
} flatMap { _ ?? GameRepo.game }
private[tv] val actor = system.actorOf(Props(new Actor {
private var oneId = none[String]
def receive = {
case GetGame => sender ! oneId
case SetGame(game) =>
oneId = game.id.some
rendererActor ? actorApi.RenderFeaturedJs(game) onSuccess {
case html: Html =>
val msg = lila.socket.Socket.makeMessage(
"featured",
play.api.libs.json.Json.obj(
"html" -> html.toString,
"color" -> game.firstColor.name,
"id" -> game.id))
bus.publish(lila.hub.actorApi.game.ChangeFeatured(game.id, msg), 'changeFeaturedGame)
}
GameRepo setTv game.id
case Continue =>
oneId ?? $find.byId[Game] foreach {
case None => feature foreach elect
case Some(game) if !fresh(game) => wayBetter(game) orElse rematch(game) orElse featureIfOld(game) foreach elect
case _ =>
}
case Disrupt =>
oneId ?? $find.byId[Game] foreach {
case Some(game) if fresh(game) => wayBetter(game) foreach elect
case _ =>
}
}
def elect(gameOption: Option[Game]) {
gameOption foreach { self ! SetGame(_) }
}
def fresh(game: Game) = game.isBeingPlayed && !game.olderThan(30)
def wayBetter(game: Game): Fuog = feature map {
case Some(next) if isWayBetter(game, next) => next.some
case _ => none
}
def isWayBetter(g1: Game, g2: Game) =
score(g2.resetTurns) > (score(g1.resetTurns) * 1.1)
def rematch(game: Game): Fuog = game.next ?? $find.byId[Game]
def featureIfOld(game: Game): Fuog = (game olderThan 7) ?? feature
def feature: Fuog = GameRepo.featuredCandidates map { games =>
Featured.sort(games filter fresh filter Featured.acceptableVariant).headOption
} orElse GameRepo.random
}))
actor ! Continue
}
object Featured {
private val variants = Set[chess.variant.Variant](
chess.variant.Standard,
chess.variant.Chess960,
chess.variant.KingOfTheHill)
private case object GetGame
private case class SetGame(game: Game)
case object Continue
case object Disrupt
def sort(games: List[Game]): List[Game] = games sortBy { -score(_) }
private def acceptableVariant(g: Game) = variants contains g.variant
private[tv] def score(game: Game): Int = math.round {
(heuristics map {
case (fn, coefficient) => heuristicBox(fn(game)) * coefficient
}).sum * 1000
}
private type Heuristic = Game => Float
private val heuristicBox = box(0 to 1) _
private val ratingBox = box(1000 to 2700) _
private val turnBox = box(1 to 25) _
private val heuristics: List[(Heuristic, Float)] = List(
ratingHeuristic(Color.White) -> 1.2f,
ratingHeuristic(Color.Black) -> 1.2f,
progressHeuristic -> 0.7f)
private[tv] def ratingHeuristic(color: Color): Heuristic = game =>
ratingBox(game.player(color).rating | 1400)
private[tv] def progressHeuristic: Heuristic = game =>
1 - turnBox(game.turns)
// boxes and reduces to 0..1 range
private[tv] def box(in: Range.Inclusive)(v: Float): Float =
(math.max(in.start, math.min(v, in.end)) - in.start) / (in.end - in.start).toFloat
}
| danilovsergey/i-bur | modules/tv/src/main/Featured.scala | Scala | mit | 4,002 |
package qrhl.tactic
import java.io.PrintWriter
import qrhl.isabellex.{IsabelleConsts, IsabelleX, RichTerm}
import qrhl.{AmbientSubgoal, QRHLSubgoal, State, Subgoal, Tactic, UserException}
import IsabelleX.{globalIsabelle => GIsabelle}
import GIsabelle.Ops
import de.unruh.isabelle.mlvalue.MLValue
import de.unruh.isabelle.pure.{App, Const}
import hashedcomputation.{Hash, HashTag}
// Implicits
import de.unruh.isabelle.pure.Implicits._
import de.unruh.isabelle.mlvalue.Implicits._
import GIsabelle.isabelleControl
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Expects subgoal: {A ⊓ R} c ~ d {B ⊓ R}
* New subgaol:
* - colocal_pred_qvars R fv(c1,d2)
* - {A} c ~ d {B}
*
* Conditions:
* - written classical variables of c,d (with index 1,2) do not occur in R
*/
case object FrameRuleTac extends Tactic {
override val hash: Hash[FrameRuleTac.this.type] = HashTag()()
override def apply(state: State, goal: Subgoal)(implicit output: PrintWriter): List[Subgoal] = goal match {
case AmbientSubgoal(_) => throw UserException("Expected qRHL judgment")
case QRHLSubgoal(left, right, pre, post, assumptions) =>
val (b, r) = post.isabelleTerm match {
case App(App(Const(IsabelleConsts.inf, _), b2), r2) => (b2, r2)
case _ => throw UserException(s"""Postcondition must be of the form "B ⊓ R", not $post""")
}
val rRich = RichTerm(GIsabelle.predicateT, r)
val a = pre.isabelleTerm match {
case App(App(Const(IsabelleConsts.inf, _), a2), r2) =>
if (r2!=r)
throw UserException(s"Rhs of precondition and rhs of postcondition must be equal ($rRich vs ${RichTerm(GIsabelle.predicateT, r2)})")
a2
case _ => throw UserException(s"""Precondition must be of the form "A ⊓ R", not $pre""")
}
val env = state.environment
val leftVarUse = left.variableUse(env)
val rightVarUse = right.variableUse(env)
/** Classical vars in R */
val rCVars = rRich.variables(env).classical
/** Written classical vars in c (indexed) */
val leftCW1 = leftVarUse.writtenClassical.map(_.index1)
/** Written classical vars in c occurring in R */
val leftCWinter = rCVars.intersect(leftCW1)
if (leftCWinter.nonEmpty)
throw UserException(s"Rhs of postcondition ($rRich) and left program share written classical variable(s) ${leftCWinter.mkString(", ")}")
val rightCW2 = rightVarUse.writtenClassical.map(_.index2)
val rightCWinter = rCVars.intersect(rightCW2)
if (rightCWinter.nonEmpty)
throw UserException(s"Rhs of postcondition ($rRich) and right program share written classical variable(s) ${rightCWinter.mkString(", ")}")
/** quantum variables in c,d (with index) */
val qVars12 = leftVarUse.quantum.map(_.index1).union(rightVarUse.quantum.map(_.index2))
val qVars12list = qVars12.toList.map { v => (v.variableName, v.valueTyp) }
/** "colocal_pred_qvars R qVars12" */
val colocality = AmbientSubgoal(RichTerm(Ops.colocalityOp(((r, qVars12list))).retrieveNow))
val qrhlSubgoal = QRHLSubgoal(left, right, RichTerm(GIsabelle.predicateT, a), RichTerm(GIsabelle.predicateT, b), assumptions)
List(colocality, qrhlSubgoal)
}
} | dominique-unruh/qrhl-tool | src/main/scala/qrhl/tactic/FrameRuleTac.scala | Scala | mit | 3,265 |
package lila.analyse
import chess.Color
import chess.format.Nag
import org.joda.time.DateTime
case class Analysis(
id: String,
infos: List[Info],
startPly: Int,
done: Boolean,
date: DateTime) {
lazy val infoAdvices: InfoAdvices = {
(Info.start(startPly) :: infos) sliding 2 collect {
case List(prev, info) => info -> {
info.hasVariation ?? Advice(prev, info)
}
}
}.toList
lazy val advices: List[Advice] = infoAdvices.map(_._2).flatten
// ply -> UCI
def bestMoves: Map[Int, String] = (infos map { i =>
i.best map { b => i.ply -> b.keys }
}).flatten.toMap
def complete(infos: List[Info]) = copy(
infos = infos,
done = true)
def encode: RawAnalysis = RawAnalysis(id, encodeInfos, startPly.some.filterNot(0 ==), done, date)
private def encodeInfos = Info encodeList infos
def summary: List[(Color, List[(Nag, Int)])] = Color.all map { color =>
color -> (Nag.badOnes map { nag =>
nag -> (advices count { adv =>
adv.color == color && adv.nag == nag
})
})
}
def valid = encodeInfos.replace(";", "").nonEmpty
def stalled = (done && !valid) || (!done && date.isBefore(DateTime.now minusHours 6))
def nbEmptyInfos = infos.count(_.isEmpty)
def emptyRatio: Double = nbEmptyInfos.toDouble / infos.size
}
object Analysis {
import lila.db.JsTube, JsTube.Helpers._
import play.api.libs.json._
private[analyse] lazy val tube = JsTube(
reader = (__.json update readDate('date)) andThen Reads[Analysis](js =>
~(for {
obj ← js.asOpt[JsObject]
rawAnalysis ← RawAnalysis.tube.read(obj).asOpt
analysis ← rawAnalysis.decode
} yield JsSuccess(analysis): JsResult[Analysis])
),
writer = Writes[Analysis](analysis =>
RawAnalysis.tube.write(analysis.encode) getOrElse JsUndefined("[db] Can't write analysis " + analysis.id)
) andThen (__.json update writeDate('date))
)
}
private[analyse] case class RawAnalysis(
id: String,
data: String,
ply: Option[Int],
done: Boolean,
date: DateTime) {
def decode: Option[Analysis] = (done, data) match {
case (true, "") => new Analysis(id, Nil, ~ply, false, date).some
case (true, d) => Info.decodeList(d, ~ply) map { new Analysis(id, _, ~ply, done, date) }
case (false, _) => new Analysis(id, Nil, ~ply, false, date).some
}
}
private[analyse] object RawAnalysis {
import lila.db.JsTube
import JsTube.Helpers._
import play.api.libs.json._
private def defaults = Json.obj(
"data" -> "",
"done" -> false)
private[analyse] lazy val tube = JsTube(
(__.json update merge(defaults)) andThen Json.reads[RawAnalysis],
Json.writes[RawAnalysis])
}
| Happy0/lila | modules/analyse/src/main/Analysis.scala | Scala | mit | 2,722 |
package com.scala.bala.ftp
import akka.actor.Actor
import com.scala.bala.ftp.util.FTPConfigurationReader
object Download {
case class downloadFiles(credential:Array[String])
}
class Downloaders extends Actor {
val fileNames = FTPConfigurationReader.fileName
val localPath = FTPConfigurationReader.getConfiguration("LOCAL_FOLDER_LOCATION")
def receive = {
case Download.downloadFiles(c) => downloadFiles(c)
}
private def downloadFiles(credential:Array[String]) = {
val client = new FTPClient();
fileNames.foreach(client.initgetFile(credential, localPath, _))
}
} | bbalajisg/scala-projects | ftp-client/src/main/scala/com/scala/bala/ftp/Downloaders.scala | Scala | gpl-2.0 | 641 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io._
import java.lang.reflect.{InvocationTargetException, Modifier, UndeclaredThrowableException}
import java.net.URL
import java.security.PrivilegedExceptionAction
import java.text.ParseException
import scala.annotation.tailrec
import scala.collection.mutable.{ArrayBuffer, HashMap, Map}
import scala.util.{Properties, Try}
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.conf.{Configuration => HadoopConfiguration}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.ivy.Ivy
import org.apache.ivy.core.LogOptions
import org.apache.ivy.core.module.descriptor._
import org.apache.ivy.core.module.id.{ArtifactId, ModuleId, ModuleRevisionId}
import org.apache.ivy.core.report.ResolveReport
import org.apache.ivy.core.resolve.ResolveOptions
import org.apache.ivy.core.retrieve.RetrieveOptions
import org.apache.ivy.core.settings.IvySettings
import org.apache.ivy.plugins.matcher.GlobPatternMatcher
import org.apache.ivy.plugins.repository.file.FileRepository
import org.apache.ivy.plugins.resolver.{ChainResolver, FileSystemResolver, IBiblioResolver}
import org.apache.spark._
import org.apache.spark.api.r.RUtils
import org.apache.spark.deploy.rest._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.util._
/**
* Whether to submit, kill, or request the status of an application.
* The latter two operations are currently supported only for standalone and Mesos cluster modes.
*/
private[deploy] object SparkSubmitAction extends Enumeration {
type SparkSubmitAction = Value
val SUBMIT, KILL, REQUEST_STATUS = Value
}
/**
* Main gateway of launching a Spark application.
*
* This program handles setting up the classpath with relevant Spark dependencies and provides
* a layer over the different cluster managers and deploy modes that Spark supports.
*/
object SparkSubmit extends CommandLineUtils with Logging {
import DependencyUtils._
// Cluster managers
private val YARN = 1
private val STANDALONE = 2
private val MESOS = 4
private val LOCAL = 8
private val KUBERNETES = 16
private val ALL_CLUSTER_MGRS = YARN | STANDALONE | MESOS | LOCAL | KUBERNETES
// Deploy modes
private val CLIENT = 1
private val CLUSTER = 2
private val ALL_DEPLOY_MODES = CLIENT | CLUSTER
// Special primary resource names that represent shells rather than application jars.
private val SPARK_SHELL = "spark-shell"
private val PYSPARK_SHELL = "pyspark-shell"
private val SPARKR_SHELL = "sparkr-shell"
private val SPARKR_PACKAGE_ARCHIVE = "sparkr.zip"
private val R_PACKAGE_ARCHIVE = "rpkg.zip"
private val CLASS_NOT_FOUND_EXIT_STATUS = 101
// Following constants are visible for testing.
private[deploy] val YARN_CLUSTER_SUBMIT_CLASS =
"org.apache.spark.deploy.yarn.YarnClusterApplication"
private[deploy] val REST_CLUSTER_SUBMIT_CLASS = classOf[RestSubmissionClientApp].getName()
private[deploy] val STANDALONE_CLUSTER_SUBMIT_CLASS = classOf[ClientApp].getName()
private[deploy] val KUBERNETES_CLUSTER_SUBMIT_CLASS =
"org.apache.spark.deploy.k8s.submit.KubernetesClientApplication"
// scalastyle:off println
private[spark] def printVersionAndExit(): Unit = {
printStream.println("""Welcome to
____ __
/ __/__ ___ _____/ /__
_\\ \\/ _ \\/ _ `/ __/ '_/
/___/ .__/\\_,_/_/ /_/\\_\\ version %s
/_/
""".format(SPARK_VERSION))
printStream.println("Using Scala %s, %s, %s".format(
Properties.versionString, Properties.javaVmName, Properties.javaVersion))
printStream.println("Branch %s".format(SPARK_BRANCH))
printStream.println("Compiled by user %s on %s".format(SPARK_BUILD_USER, SPARK_BUILD_DATE))
printStream.println("Revision %s".format(SPARK_REVISION))
printStream.println("Url %s".format(SPARK_REPO_URL))
printStream.println("Type --help for more information.")
exitFn(0)
}
// scalastyle:on println
override def main(args: Array[String]): Unit = {
// Initialize logging if it hasn't been done yet. Keep track of whether logging needs to
// be reset before the application starts.
val uninitLog = initializeLogIfNecessary(true, silent = true)
val appArgs = new SparkSubmitArguments(args)
if (appArgs.verbose) {
// scalastyle:off println
printStream.println(appArgs)
// scalastyle:on println
}
appArgs.action match {
case SparkSubmitAction.SUBMIT => submit(appArgs, uninitLog)
case SparkSubmitAction.KILL => kill(appArgs)
case SparkSubmitAction.REQUEST_STATUS => requestStatus(appArgs)
}
}
/**
* Kill an existing submission using the REST protocol. Standalone and Mesos cluster mode only.
*/
private def kill(args: SparkSubmitArguments): Unit = {
new RestSubmissionClient(args.master)
.killSubmission(args.submissionToKill)
}
/**
* Request the status of an existing submission using the REST protocol.
* Standalone and Mesos cluster mode only.
*/
private def requestStatus(args: SparkSubmitArguments): Unit = {
new RestSubmissionClient(args.master)
.requestSubmissionStatus(args.submissionToRequestStatusFor)
}
/**
* Submit the application using the provided parameters.
*
* This runs in two steps. First, we prepare the launch environment by setting up
* the appropriate classpath, system properties, and application arguments for
* running the child main class based on the cluster manager and the deploy mode.
* Second, we use this launch environment to invoke the main method of the child
* main class.
*/
@tailrec
private def submit(args: SparkSubmitArguments, uninitLog: Boolean): Unit = {
val (childArgs, childClasspath, sparkConf, childMainClass) = prepareSubmitEnvironment(args)
def doRunMain(): Unit = {
if (args.proxyUser != null) {
val proxyUser = UserGroupInformation.createProxyUser(args.proxyUser,
UserGroupInformation.getCurrentUser())
try {
proxyUser.doAs(new PrivilegedExceptionAction[Unit]() {
override def run(): Unit = {
runMain(childArgs, childClasspath, sparkConf, childMainClass, args.verbose)
}
})
} catch {
case e: Exception =>
// Hadoop's AuthorizationException suppresses the exception's stack trace, which
// makes the message printed to the output by the JVM not very helpful. Instead,
// detect exceptions with empty stack traces here, and treat them differently.
if (e.getStackTrace().length == 0) {
// scalastyle:off println
printStream.println(s"ERROR: ${e.getClass().getName()}: ${e.getMessage()}")
// scalastyle:on println
exitFn(1)
} else {
throw e
}
}
} else {
runMain(childArgs, childClasspath, sparkConf, childMainClass, args.verbose)
}
}
// Let the main class re-initialize the logging system once it starts.
if (uninitLog) {
Logging.uninitialize()
}
// In standalone cluster mode, there are two submission gateways:
// (1) The traditional RPC gateway using o.a.s.deploy.Client as a wrapper
// (2) The new REST-based gateway introduced in Spark 1.3
// The latter is the default behavior as of Spark 1.3, but Spark submit will fail over
// to use the legacy gateway if the master endpoint turns out to be not a REST server.
if (args.isStandaloneCluster && args.useRest) {
try {
// scalastyle:off println
printStream.println("Running Spark using the REST application submission protocol.")
// scalastyle:on println
doRunMain()
} catch {
// Fail over to use the legacy submission gateway
case e: SubmitRestConnectionException =>
printWarning(s"Master endpoint ${args.master} was not a REST server. " +
"Falling back to legacy submission gateway instead.")
args.useRest = false
submit(args, false)
}
// In all other modes, just run the main class as prepared
} else {
doRunMain()
}
}
/**
* Prepare the environment for submitting an application.
*
* @param args the parsed SparkSubmitArguments used for environment preparation.
* @param conf the Hadoop Configuration, this argument will only be set in unit test.
* @return a 4-tuple:
* (1) the arguments for the child process,
* (2) a list of classpath entries for the child,
* (3) a map of system properties, and
* (4) the main class for the child
*
* Exposed for testing.
*/
private[deploy] def prepareSubmitEnvironment(
args: SparkSubmitArguments,
conf: Option[HadoopConfiguration] = None)
: (Seq[String], Seq[String], SparkConf, String) = {
// Return values
val childArgs = new ArrayBuffer[String]()
val childClasspath = new ArrayBuffer[String]()
val sparkConf = new SparkConf()
var childMainClass = ""
// Set the cluster manager
val clusterManager: Int = args.master match {
case "yarn" => YARN
case "yarn-client" | "yarn-cluster" =>
printWarning(s"Master ${args.master} is deprecated since 2.0." +
" Please use master \\"yarn\\" with specified deploy mode instead.")
YARN
case m if m.startsWith("spark") => STANDALONE
case m if m.startsWith("mesos") => MESOS
case m if m.startsWith("k8s") => KUBERNETES
case m if m.startsWith("local") => LOCAL
case _ =>
printErrorAndExit("Master must either be yarn or start with spark, mesos, k8s, or local")
-1
}
// Set the deploy mode; default is client mode
var deployMode: Int = args.deployMode match {
case "client" | null => CLIENT
case "cluster" => CLUSTER
case _ => printErrorAndExit("Deploy mode must be either client or cluster"); -1
}
// Because the deprecated way of specifying "yarn-cluster" and "yarn-client" encapsulate both
// the master and deploy mode, we have some logic to infer the master and deploy mode
// from each other if only one is specified, or exit early if they are at odds.
if (clusterManager == YARN) {
(args.master, args.deployMode) match {
case ("yarn-cluster", null) =>
deployMode = CLUSTER
args.master = "yarn"
case ("yarn-cluster", "client") =>
printErrorAndExit("Client deploy mode is not compatible with master \\"yarn-cluster\\"")
case ("yarn-client", "cluster") =>
printErrorAndExit("Cluster deploy mode is not compatible with master \\"yarn-client\\"")
case (_, mode) =>
args.master = "yarn"
}
// Make sure YARN is included in our build if we're trying to use it
if (!Utils.classIsLoadable(YARN_CLUSTER_SUBMIT_CLASS) && !Utils.isTesting) {
printErrorAndExit(
"Could not load YARN classes. " +
"This copy of Spark may not have been compiled with YARN support.")
}
}
if (clusterManager == KUBERNETES) {
args.master = Utils.checkAndGetK8sMasterUrl(args.master)
// Make sure KUBERNETES is included in our build if we're trying to use it
if (!Utils.classIsLoadable(KUBERNETES_CLUSTER_SUBMIT_CLASS) && !Utils.isTesting) {
printErrorAndExit(
"Could not load KUBERNETES classes. " +
"This copy of Spark may not have been compiled with KUBERNETES support.")
}
}
// Fail fast, the following modes are not supported or applicable
(clusterManager, deployMode) match {
case (STANDALONE, CLUSTER) if args.isPython =>
printErrorAndExit("Cluster deploy mode is currently not supported for python " +
"applications on standalone clusters.")
case (STANDALONE, CLUSTER) if args.isR =>
printErrorAndExit("Cluster deploy mode is currently not supported for R " +
"applications on standalone clusters.")
case (KUBERNETES, _) if args.isPython =>
printErrorAndExit("Python applications are currently not supported for Kubernetes.")
case (KUBERNETES, _) if args.isR =>
printErrorAndExit("R applications are currently not supported for Kubernetes.")
case (KUBERNETES, CLIENT) =>
printErrorAndExit("Client mode is currently not supported for Kubernetes.")
case (LOCAL, CLUSTER) =>
printErrorAndExit("Cluster deploy mode is not compatible with master \\"local\\"")
case (_, CLUSTER) if isShell(args.primaryResource) =>
printErrorAndExit("Cluster deploy mode is not applicable to Spark shells.")
case (_, CLUSTER) if isSqlShell(args.mainClass) =>
printErrorAndExit("Cluster deploy mode is not applicable to Spark SQL shell.")
case (_, CLUSTER) if isThriftServer(args.mainClass) =>
printErrorAndExit("Cluster deploy mode is not applicable to Spark Thrift server.")
case _ =>
}
// Update args.deployMode if it is null. It will be passed down as a Spark property later.
(args.deployMode, deployMode) match {
case (null, CLIENT) => args.deployMode = "client"
case (null, CLUSTER) => args.deployMode = "cluster"
case _ =>
}
val isYarnCluster = clusterManager == YARN && deployMode == CLUSTER
val isMesosCluster = clusterManager == MESOS && deployMode == CLUSTER
val isStandAloneCluster = clusterManager == STANDALONE && deployMode == CLUSTER
val isKubernetesCluster = clusterManager == KUBERNETES && deployMode == CLUSTER
if (!isMesosCluster && !isStandAloneCluster) {
// Resolve maven dependencies if there are any and add classpath to jars. Add them to py-files
// too for packages that include Python code
val resolvedMavenCoordinates = DependencyUtils.resolveMavenDependencies(
args.packagesExclusions, args.packages, args.repositories, args.ivyRepoPath)
if (!StringUtils.isBlank(resolvedMavenCoordinates)) {
args.jars = mergeFileLists(args.jars, resolvedMavenCoordinates)
if (args.isPython) {
args.pyFiles = mergeFileLists(args.pyFiles, resolvedMavenCoordinates)
}
}
// install any R packages that may have been passed through --jars or --packages.
// Spark Packages may contain R source code inside the jar.
if (args.isR && !StringUtils.isBlank(args.jars)) {
RPackageUtils.checkAndBuildRPackage(args.jars, printStream, args.verbose)
}
}
args.sparkProperties.foreach { case (k, v) => sparkConf.set(k, v) }
val hadoopConf = conf.getOrElse(SparkHadoopUtil.newConfiguration(sparkConf))
val targetDir = Utils.createTempDir()
// assure a keytab is available from any place in a JVM
if (clusterManager == YARN || clusterManager == LOCAL || clusterManager == MESOS) {
if (args.principal != null) {
if (args.keytab != null) {
require(new File(args.keytab).exists(), s"Keytab file: ${args.keytab} does not exist")
// Add keytab and principal configurations in sysProps to make them available
// for later use; e.g. in spark sql, the isolated class loader used to talk
// to HiveMetastore will use these settings. They will be set as Java system
// properties and then loaded by SparkConf
sparkConf.set(KEYTAB, args.keytab)
sparkConf.set(PRINCIPAL, args.principal)
UserGroupInformation.loginUserFromKeytab(args.principal, args.keytab)
}
}
}
// Resolve glob path for different resources.
args.jars = Option(args.jars).map(resolveGlobPaths(_, hadoopConf)).orNull
args.files = Option(args.files).map(resolveGlobPaths(_, hadoopConf)).orNull
args.pyFiles = Option(args.pyFiles).map(resolveGlobPaths(_, hadoopConf)).orNull
args.archives = Option(args.archives).map(resolveGlobPaths(_, hadoopConf)).orNull
lazy val secMgr = new SecurityManager(sparkConf)
// In client mode, download remote files.
var localPrimaryResource: String = null
var localJars: String = null
var localPyFiles: String = null
if (deployMode == CLIENT) {
localPrimaryResource = Option(args.primaryResource).map {
downloadFile(_, targetDir, sparkConf, hadoopConf, secMgr)
}.orNull
localJars = Option(args.jars).map {
downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr)
}.orNull
localPyFiles = Option(args.pyFiles).map {
downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr)
}.orNull
}
// When running in YARN, for some remote resources with scheme:
// 1. Hadoop FileSystem doesn't support them.
// 2. We explicitly bypass Hadoop FileSystem with "spark.yarn.dist.forceDownloadSchemes".
// We will download them to local disk prior to add to YARN's distributed cache.
// For yarn client mode, since we already download them with above code, so we only need to
// figure out the local path and replace the remote one.
if (clusterManager == YARN) {
val forceDownloadSchemes = sparkConf.get(FORCE_DOWNLOAD_SCHEMES)
def shouldDownload(scheme: String): Boolean = {
forceDownloadSchemes.contains(scheme) ||
Try { FileSystem.getFileSystemClass(scheme, hadoopConf) }.isFailure
}
def downloadResource(resource: String): String = {
val uri = Utils.resolveURI(resource)
uri.getScheme match {
case "local" | "file" => resource
case e if shouldDownload(e) =>
val file = new File(targetDir, new Path(uri).getName)
if (file.exists()) {
file.toURI.toString
} else {
downloadFile(resource, targetDir, sparkConf, hadoopConf, secMgr)
}
case _ => uri.toString
}
}
args.primaryResource = Option(args.primaryResource).map { downloadResource }.orNull
args.files = Option(args.files).map { files =>
Utils.stringToSeq(files).map(downloadResource).mkString(",")
}.orNull
args.pyFiles = Option(args.pyFiles).map { pyFiles =>
Utils.stringToSeq(pyFiles).map(downloadResource).mkString(",")
}.orNull
args.jars = Option(args.jars).map { jars =>
Utils.stringToSeq(jars).map(downloadResource).mkString(",")
}.orNull
args.archives = Option(args.archives).map { archives =>
Utils.stringToSeq(archives).map(downloadResource).mkString(",")
}.orNull
}
// If we're running a python app, set the main class to our specific python runner
if (args.isPython && deployMode == CLIENT) {
if (args.primaryResource == PYSPARK_SHELL) {
args.mainClass = "org.apache.spark.api.python.PythonGatewayServer"
} else {
// If a python file is provided, add it to the child arguments and list of files to deploy.
// Usage: PythonAppRunner <main python file> <extra python files> [app arguments]
args.mainClass = "org.apache.spark.deploy.PythonRunner"
args.childArgs = ArrayBuffer(localPrimaryResource, localPyFiles) ++ args.childArgs
if (clusterManager != YARN) {
// The YARN backend distributes the primary file differently, so don't merge it.
args.files = mergeFileLists(args.files, args.primaryResource)
}
}
if (clusterManager != YARN) {
// The YARN backend handles python files differently, so don't merge the lists.
args.files = mergeFileLists(args.files, args.pyFiles)
}
if (localPyFiles != null) {
sparkConf.set("spark.submit.pyFiles", localPyFiles)
}
}
// In YARN mode for an R app, add the SparkR package archive and the R package
// archive containing all of the built R libraries to archives so that they can
// be distributed with the job
if (args.isR && clusterManager == YARN) {
val sparkRPackagePath = RUtils.localSparkRPackagePath
if (sparkRPackagePath.isEmpty) {
printErrorAndExit("SPARK_HOME does not exist for R application in YARN mode.")
}
val sparkRPackageFile = new File(sparkRPackagePath.get, SPARKR_PACKAGE_ARCHIVE)
if (!sparkRPackageFile.exists()) {
printErrorAndExit(s"$SPARKR_PACKAGE_ARCHIVE does not exist for R application in YARN mode.")
}
val sparkRPackageURI = Utils.resolveURI(sparkRPackageFile.getAbsolutePath).toString
// Distribute the SparkR package.
// Assigns a symbol link name "sparkr" to the shipped package.
args.archives = mergeFileLists(args.archives, sparkRPackageURI + "#sparkr")
// Distribute the R package archive containing all the built R packages.
if (!RUtils.rPackages.isEmpty) {
val rPackageFile =
RPackageUtils.zipRLibraries(new File(RUtils.rPackages.get), R_PACKAGE_ARCHIVE)
if (!rPackageFile.exists()) {
printErrorAndExit("Failed to zip all the built R packages.")
}
val rPackageURI = Utils.resolveURI(rPackageFile.getAbsolutePath).toString
// Assigns a symbol link name "rpkg" to the shipped package.
args.archives = mergeFileLists(args.archives, rPackageURI + "#rpkg")
}
}
// TODO: Support distributing R packages with standalone cluster
if (args.isR && clusterManager == STANDALONE && !RUtils.rPackages.isEmpty) {
printErrorAndExit("Distributing R packages with standalone cluster is not supported.")
}
// TODO: Support distributing R packages with mesos cluster
if (args.isR && clusterManager == MESOS && !RUtils.rPackages.isEmpty) {
printErrorAndExit("Distributing R packages with mesos cluster is not supported.")
}
// If we're running an R app, set the main class to our specific R runner
if (args.isR && deployMode == CLIENT) {
if (args.primaryResource == SPARKR_SHELL) {
args.mainClass = "org.apache.spark.api.r.RBackend"
} else {
// If an R file is provided, add it to the child arguments and list of files to deploy.
// Usage: RRunner <main R file> [app arguments]
args.mainClass = "org.apache.spark.deploy.RRunner"
args.childArgs = ArrayBuffer(localPrimaryResource) ++ args.childArgs
args.files = mergeFileLists(args.files, args.primaryResource)
}
}
if (isYarnCluster && args.isR) {
// In yarn-cluster mode for an R app, add primary resource to files
// that can be distributed with the job
args.files = mergeFileLists(args.files, args.primaryResource)
}
// Special flag to avoid deprecation warnings at the client
sys.props("SPARK_SUBMIT") = "true"
// A list of rules to map each argument to system properties or command-line options in
// each deploy mode; we iterate through these below
val options = List[OptionAssigner](
// All cluster managers
OptionAssigner(args.master, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES, confKey = "spark.master"),
OptionAssigner(args.deployMode, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = "spark.submit.deployMode"),
OptionAssigner(args.name, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES, confKey = "spark.app.name"),
OptionAssigner(args.ivyRepoPath, ALL_CLUSTER_MGRS, CLIENT, confKey = "spark.jars.ivy"),
OptionAssigner(args.driverMemory, ALL_CLUSTER_MGRS, CLIENT,
confKey = "spark.driver.memory"),
OptionAssigner(args.driverExtraClassPath, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = "spark.driver.extraClassPath"),
OptionAssigner(args.driverExtraJavaOptions, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = "spark.driver.extraJavaOptions"),
OptionAssigner(args.driverExtraLibraryPath, ALL_CLUSTER_MGRS, ALL_DEPLOY_MODES,
confKey = "spark.driver.extraLibraryPath"),
// Propagate attributes for dependency resolution at the driver side
OptionAssigner(args.packages, STANDALONE | MESOS, CLUSTER, confKey = "spark.jars.packages"),
OptionAssigner(args.repositories, STANDALONE | MESOS, CLUSTER,
confKey = "spark.jars.repositories"),
OptionAssigner(args.ivyRepoPath, STANDALONE | MESOS, CLUSTER, confKey = "spark.jars.ivy"),
OptionAssigner(args.packagesExclusions, STANDALONE | MESOS,
CLUSTER, confKey = "spark.jars.excludes"),
// Yarn only
OptionAssigner(args.queue, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.queue"),
OptionAssigner(args.numExecutors, YARN, ALL_DEPLOY_MODES,
confKey = "spark.executor.instances"),
OptionAssigner(args.pyFiles, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.dist.pyFiles"),
OptionAssigner(args.jars, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.dist.jars"),
OptionAssigner(args.files, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.dist.files"),
OptionAssigner(args.archives, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.dist.archives"),
OptionAssigner(args.principal, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.principal"),
OptionAssigner(args.keytab, YARN, ALL_DEPLOY_MODES, confKey = "spark.yarn.keytab"),
// Other options
OptionAssigner(args.executorCores, STANDALONE | YARN | KUBERNETES, ALL_DEPLOY_MODES,
confKey = "spark.executor.cores"),
OptionAssigner(args.executorMemory, STANDALONE | MESOS | YARN | KUBERNETES, ALL_DEPLOY_MODES,
confKey = "spark.executor.memory"),
OptionAssigner(args.totalExecutorCores, STANDALONE | MESOS | KUBERNETES, ALL_DEPLOY_MODES,
confKey = "spark.cores.max"),
OptionAssigner(args.files, LOCAL | STANDALONE | MESOS, ALL_DEPLOY_MODES,
confKey = "spark.files"),
OptionAssigner(args.jars, LOCAL, CLIENT, confKey = "spark.jars"),
OptionAssigner(args.jars, STANDALONE | MESOS, ALL_DEPLOY_MODES, confKey = "spark.jars"),
OptionAssigner(args.driverMemory, STANDALONE | MESOS | YARN | KUBERNETES, CLUSTER,
confKey = "spark.driver.memory"),
OptionAssigner(args.driverCores, STANDALONE | MESOS | YARN | KUBERNETES, CLUSTER,
confKey = "spark.driver.cores"),
OptionAssigner(args.supervise.toString, STANDALONE | MESOS, CLUSTER,
confKey = "spark.driver.supervise"),
OptionAssigner(args.ivyRepoPath, STANDALONE, CLUSTER, confKey = "spark.jars.ivy"),
// An internal option used only for spark-shell to add user jars to repl's classloader,
// previously it uses "spark.jars" or "spark.yarn.dist.jars" which now may be pointed to
// remote jars, so adding a new option to only specify local jars for spark-shell internally.
OptionAssigner(localJars, ALL_CLUSTER_MGRS, CLIENT, confKey = "spark.repl.local.jars")
)
// In client mode, launch the application main class directly
// In addition, add the main application jar and any added jars (if any) to the classpath
if (deployMode == CLIENT) {
childMainClass = args.mainClass
if (localPrimaryResource != null && isUserJar(localPrimaryResource)) {
childClasspath += localPrimaryResource
}
if (localJars != null) { childClasspath ++= localJars.split(",") }
}
// Add the main application jar and any added jars to classpath in case YARN client
// requires these jars.
// This assumes both primaryResource and user jars are local jars, otherwise it will not be
// added to the classpath of YARN client.
if (isYarnCluster) {
if (isUserJar(args.primaryResource)) {
childClasspath += args.primaryResource
}
if (args.jars != null) { childClasspath ++= args.jars.split(",") }
}
if (deployMode == CLIENT) {
if (args.childArgs != null) { childArgs ++= args.childArgs }
}
// Map all arguments to command-line options or system properties for our chosen mode
for (opt <- options) {
if (opt.value != null &&
(deployMode & opt.deployMode) != 0 &&
(clusterManager & opt.clusterManager) != 0) {
if (opt.clOption != null) { childArgs += (opt.clOption, opt.value) }
if (opt.confKey != null) { sparkConf.set(opt.confKey, opt.value) }
}
}
// In case of shells, spark.ui.showConsoleProgress can be true by default or by user.
if (isShell(args.primaryResource) && !sparkConf.contains(UI_SHOW_CONSOLE_PROGRESS)) {
sparkConf.set(UI_SHOW_CONSOLE_PROGRESS, true)
}
// Add the application jar automatically so the user doesn't have to call sc.addJar
// For YARN cluster mode, the jar is already distributed on each node as "app.jar"
// For python and R files, the primary resource is already distributed as a regular file
if (!isYarnCluster && !args.isPython && !args.isR) {
var jars = sparkConf.getOption("spark.jars").map(x => x.split(",").toSeq).getOrElse(Seq.empty)
if (isUserJar(args.primaryResource)) {
jars = jars ++ Seq(args.primaryResource)
}
sparkConf.set("spark.jars", jars.mkString(","))
}
// In standalone cluster mode, use the REST client to submit the application (Spark 1.3+).
// All Spark parameters are expected to be passed to the client through system properties.
if (args.isStandaloneCluster) {
if (args.useRest) {
childMainClass = REST_CLUSTER_SUBMIT_CLASS
childArgs += (args.primaryResource, args.mainClass)
} else {
// In legacy standalone cluster mode, use Client as a wrapper around the user class
childMainClass = STANDALONE_CLUSTER_SUBMIT_CLASS
if (args.supervise) { childArgs += "--supervise" }
Option(args.driverMemory).foreach { m => childArgs += ("--memory", m) }
Option(args.driverCores).foreach { c => childArgs += ("--cores", c) }
childArgs += "launch"
childArgs += (args.master, args.primaryResource, args.mainClass)
}
if (args.childArgs != null) {
childArgs ++= args.childArgs
}
}
// Let YARN know it's a pyspark app, so it distributes needed libraries.
if (clusterManager == YARN) {
if (args.isPython) {
sparkConf.set("spark.yarn.isPython", "true")
}
}
if (clusterManager == MESOS && UserGroupInformation.isSecurityEnabled) {
setRMPrincipal(sparkConf)
}
// In yarn-cluster mode, use yarn.Client as a wrapper around the user class
if (isYarnCluster) {
childMainClass = YARN_CLUSTER_SUBMIT_CLASS
if (args.isPython) {
childArgs += ("--primary-py-file", args.primaryResource)
childArgs += ("--class", "org.apache.spark.deploy.PythonRunner")
} else if (args.isR) {
val mainFile = new Path(args.primaryResource).getName
childArgs += ("--primary-r-file", mainFile)
childArgs += ("--class", "org.apache.spark.deploy.RRunner")
} else {
if (args.primaryResource != SparkLauncher.NO_RESOURCE) {
childArgs += ("--jar", args.primaryResource)
}
childArgs += ("--class", args.mainClass)
}
if (args.childArgs != null) {
args.childArgs.foreach { arg => childArgs += ("--arg", arg) }
}
}
if (isMesosCluster) {
assert(args.useRest, "Mesos cluster mode is only supported through the REST submission API")
childMainClass = REST_CLUSTER_SUBMIT_CLASS
if (args.isPython) {
// Second argument is main class
childArgs += (args.primaryResource, "")
if (args.pyFiles != null) {
sparkConf.set("spark.submit.pyFiles", args.pyFiles)
}
} else if (args.isR) {
// Second argument is main class
childArgs += (args.primaryResource, "")
} else {
childArgs += (args.primaryResource, args.mainClass)
}
if (args.childArgs != null) {
childArgs ++= args.childArgs
}
}
if (isKubernetesCluster) {
childMainClass = KUBERNETES_CLUSTER_SUBMIT_CLASS
if (args.primaryResource != SparkLauncher.NO_RESOURCE) {
childArgs ++= Array("--primary-java-resource", args.primaryResource)
}
childArgs ++= Array("--main-class", args.mainClass)
if (args.childArgs != null) {
args.childArgs.foreach { arg =>
childArgs += ("--arg", arg)
}
}
}
// Load any properties specified through --conf and the default properties file
for ((k, v) <- args.sparkProperties) {
sparkConf.setIfMissing(k, v)
}
// Ignore invalid spark.driver.host in cluster modes.
if (deployMode == CLUSTER) {
sparkConf.remove("spark.driver.host")
}
// Resolve paths in certain spark properties
val pathConfigs = Seq(
"spark.jars",
"spark.files",
"spark.yarn.dist.files",
"spark.yarn.dist.archives",
"spark.yarn.dist.jars")
pathConfigs.foreach { config =>
// Replace old URIs with resolved URIs, if they exist
sparkConf.getOption(config).foreach { oldValue =>
sparkConf.set(config, Utils.resolveURIs(oldValue))
}
}
// Resolve and format python file paths properly before adding them to the PYTHONPATH.
// The resolving part is redundant in the case of --py-files, but necessary if the user
// explicitly sets `spark.submit.pyFiles` in his/her default properties file.
sparkConf.getOption("spark.submit.pyFiles").foreach { pyFiles =>
val resolvedPyFiles = Utils.resolveURIs(pyFiles)
val formattedPyFiles = if (!isYarnCluster && !isMesosCluster) {
PythonRunner.formatPaths(resolvedPyFiles).mkString(",")
} else {
// Ignoring formatting python path in yarn and mesos cluster mode, these two modes
// support dealing with remote python files, they could distribute and add python files
// locally.
resolvedPyFiles
}
sparkConf.set("spark.submit.pyFiles", formattedPyFiles)
}
(childArgs, childClasspath, sparkConf, childMainClass)
}
// [SPARK-20328]. HadoopRDD calls into a Hadoop library that fetches delegation tokens with
// renewer set to the YARN ResourceManager. Since YARN isn't configured in Mesos mode, we
// must trick it into thinking we're YARN.
private def setRMPrincipal(sparkConf: SparkConf): Unit = {
val shortUserName = UserGroupInformation.getCurrentUser.getShortUserName
val key = s"spark.hadoop.${YarnConfiguration.RM_PRINCIPAL}"
// scalastyle:off println
printStream.println(s"Setting ${key} to ${shortUserName}")
// scalastyle:off println
sparkConf.set(key, shortUserName)
}
/**
* Run the main method of the child class using the provided launch environment.
*
* Note that this main class will not be the one provided by the user if we're
* running cluster deploy mode or python applications.
*/
private def runMain(
childArgs: Seq[String],
childClasspath: Seq[String],
sparkConf: SparkConf,
childMainClass: String,
verbose: Boolean): Unit = {
// scalastyle:off println
if (verbose) {
printStream.println(s"Main class:\\n$childMainClass")
printStream.println(s"Arguments:\\n${childArgs.mkString("\\n")}")
// sysProps may contain sensitive information, so redact before printing
printStream.println(s"Spark config:\\n${Utils.redact(sparkConf.getAll.toMap).mkString("\\n")}")
printStream.println(s"Classpath elements:\\n${childClasspath.mkString("\\n")}")
printStream.println("\\n")
}
// scalastyle:on println
val loader =
if (sparkConf.get(DRIVER_USER_CLASS_PATH_FIRST)) {
new ChildFirstURLClassLoader(new Array[URL](0),
Thread.currentThread.getContextClassLoader)
} else {
new MutableURLClassLoader(new Array[URL](0),
Thread.currentThread.getContextClassLoader)
}
Thread.currentThread.setContextClassLoader(loader)
for (jar <- childClasspath) {
addJarToClasspath(jar, loader)
}
var mainClass: Class[_] = null
try {
mainClass = Utils.classForName(childMainClass)
} catch {
case e: ClassNotFoundException =>
e.printStackTrace(printStream)
if (childMainClass.contains("thriftserver")) {
// scalastyle:off println
printStream.println(s"Failed to load main class $childMainClass.")
printStream.println("You need to build Spark with -Phive and -Phive-thriftserver.")
// scalastyle:on println
}
System.exit(CLASS_NOT_FOUND_EXIT_STATUS)
case e: NoClassDefFoundError =>
e.printStackTrace(printStream)
if (e.getMessage.contains("org/apache/hadoop/hive")) {
// scalastyle:off println
printStream.println(s"Failed to load hive class.")
printStream.println("You need to build Spark with -Phive and -Phive-thriftserver.")
// scalastyle:on println
}
System.exit(CLASS_NOT_FOUND_EXIT_STATUS)
}
val app: SparkApplication = if (classOf[SparkApplication].isAssignableFrom(mainClass)) {
mainClass.newInstance().asInstanceOf[SparkApplication]
} else {
// SPARK-4170
if (classOf[scala.App].isAssignableFrom(mainClass)) {
printWarning("Subclasses of scala.App may not work correctly. Use a main() method instead.")
}
new JavaMainApplication(mainClass)
}
@tailrec
def findCause(t: Throwable): Throwable = t match {
case e: UndeclaredThrowableException =>
if (e.getCause() != null) findCause(e.getCause()) else e
case e: InvocationTargetException =>
if (e.getCause() != null) findCause(e.getCause()) else e
case e: Throwable =>
e
}
try {
app.start(childArgs.toArray, sparkConf)
} catch {
case t: Throwable =>
findCause(t) match {
case SparkUserAppException(exitCode) =>
System.exit(exitCode)
case t: Throwable =>
throw t
}
}
}
private[deploy] def addJarToClasspath(localJar: String, loader: MutableURLClassLoader) {
val uri = Utils.resolveURI(localJar)
uri.getScheme match {
case "file" | "local" =>
val file = new File(uri.getPath)
if (file.exists()) {
loader.addURL(file.toURI.toURL)
} else {
printWarning(s"Local jar $file does not exist, skipping.")
}
case _ =>
printWarning(s"Skip remote jar $uri.")
}
}
/**
* Return whether the given primary resource represents a user jar.
*/
private[deploy] def isUserJar(res: String): Boolean = {
!isShell(res) && !isPython(res) && !isInternal(res) && !isR(res)
}
/**
* Return whether the given primary resource represents a shell.
*/
private[deploy] def isShell(res: String): Boolean = {
(res == SPARK_SHELL || res == PYSPARK_SHELL || res == SPARKR_SHELL)
}
/**
* Return whether the given main class represents a sql shell.
*/
private[deploy] def isSqlShell(mainClass: String): Boolean = {
mainClass == "org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver"
}
/**
* Return whether the given main class represents a thrift server.
*/
private def isThriftServer(mainClass: String): Boolean = {
mainClass == "org.apache.spark.sql.hive.thriftserver.HiveThriftServer2"
}
/**
* Return whether the given primary resource requires running python.
*/
private[deploy] def isPython(res: String): Boolean = {
res != null && res.endsWith(".py") || res == PYSPARK_SHELL
}
/**
* Return whether the given primary resource requires running R.
*/
private[deploy] def isR(res: String): Boolean = {
res != null && res.endsWith(".R") || res == SPARKR_SHELL
}
private[deploy] def isInternal(res: String): Boolean = {
res == SparkLauncher.NO_RESOURCE
}
/**
* Merge a sequence of comma-separated file lists, some of which may be null to indicate
* no files, into a single comma-separated string.
*/
private[deploy] def mergeFileLists(lists: String*): String = {
val merged = lists.filterNot(StringUtils.isBlank)
.flatMap(_.split(","))
.mkString(",")
if (merged == "") null else merged
}
}
/** Provides utility functions to be used inside SparkSubmit. */
private[spark] object SparkSubmitUtils {
// Exposed for testing
var printStream = SparkSubmit.printStream
// Exposed for testing.
// These components are used to make the default exclusion rules for Spark dependencies.
// We need to specify each component explicitly, otherwise we miss spark-streaming-kafka-0-8 and
// other spark-streaming utility components. Underscore is there to differentiate between
// spark-streaming_2.1x and spark-streaming-kafka-0-8-assembly_2.1x
val IVY_DEFAULT_EXCLUDES = Seq("catalyst_", "core_", "graphx_", "kvstore_", "launcher_", "mllib_",
"mllib-local_", "network-common_", "network-shuffle_", "repl_", "sketch_", "sql_", "streaming_",
"tags_", "unsafe_")
/**
* Represents a Maven Coordinate
* @param groupId the groupId of the coordinate
* @param artifactId the artifactId of the coordinate
* @param version the version of the coordinate
*/
private[deploy] case class MavenCoordinate(groupId: String, artifactId: String, version: String) {
override def toString: String = s"$groupId:$artifactId:$version"
}
/**
* Extracts maven coordinates from a comma-delimited string. Coordinates should be provided
* in the format `groupId:artifactId:version` or `groupId/artifactId:version`.
* @param coordinates Comma-delimited string of maven coordinates
* @return Sequence of Maven coordinates
*/
def extractMavenCoordinates(coordinates: String): Seq[MavenCoordinate] = {
coordinates.split(",").map { p =>
val splits = p.replace("/", ":").split(":")
require(splits.length == 3, s"Provided Maven Coordinates must be in the form " +
s"'groupId:artifactId:version'. The coordinate provided is: $p")
require(splits(0) != null && splits(0).trim.nonEmpty, s"The groupId cannot be null or " +
s"be whitespace. The groupId provided is: ${splits(0)}")
require(splits(1) != null && splits(1).trim.nonEmpty, s"The artifactId cannot be null or " +
s"be whitespace. The artifactId provided is: ${splits(1)}")
require(splits(2) != null && splits(2).trim.nonEmpty, s"The version cannot be null or " +
s"be whitespace. The version provided is: ${splits(2)}")
new MavenCoordinate(splits(0), splits(1), splits(2))
}
}
/** Path of the local Maven cache. */
private[spark] def m2Path: File = {
if (Utils.isTesting) {
// test builds delete the maven cache, and this can cause flakiness
new File("dummy", ".m2" + File.separator + "repository")
} else {
new File(System.getProperty("user.home"), ".m2" + File.separator + "repository")
}
}
/**
* Extracts maven coordinates from a comma-delimited string
* @param defaultIvyUserDir The default user path for Ivy
* @return A ChainResolver used by Ivy to search for and resolve dependencies.
*/
def createRepoResolvers(defaultIvyUserDir: File): ChainResolver = {
// We need a chain resolver if we want to check multiple repositories
val cr = new ChainResolver
cr.setName("spark-list")
val localM2 = new IBiblioResolver
localM2.setM2compatible(true)
localM2.setRoot(m2Path.toURI.toString)
localM2.setUsepoms(true)
localM2.setName("local-m2-cache")
cr.add(localM2)
val localIvy = new FileSystemResolver
val localIvyRoot = new File(defaultIvyUserDir, "local")
localIvy.setLocal(true)
localIvy.setRepository(new FileRepository(localIvyRoot))
val ivyPattern = Seq(localIvyRoot.getAbsolutePath, "[organisation]", "[module]", "[revision]",
"ivys", "ivy.xml").mkString(File.separator)
localIvy.addIvyPattern(ivyPattern)
val artifactPattern = Seq(localIvyRoot.getAbsolutePath, "[organisation]", "[module]",
"[revision]", "[type]s", "[artifact](-[classifier]).[ext]").mkString(File.separator)
localIvy.addArtifactPattern(artifactPattern)
localIvy.setName("local-ivy-cache")
cr.add(localIvy)
// the biblio resolver resolves POM declared dependencies
val br: IBiblioResolver = new IBiblioResolver
br.setM2compatible(true)
br.setUsepoms(true)
br.setName("central")
cr.add(br)
val sp: IBiblioResolver = new IBiblioResolver
sp.setM2compatible(true)
sp.setUsepoms(true)
sp.setRoot("http://dl.bintray.com/spark-packages/maven")
sp.setName("spark-packages")
cr.add(sp)
cr
}
/**
* Output a comma-delimited list of paths for the downloaded jars to be added to the classpath
* (will append to jars in SparkSubmit).
* @param artifacts Sequence of dependencies that were resolved and retrieved
* @param cacheDirectory directory where jars are cached
* @return a comma-delimited list of paths for the dependencies
*/
def resolveDependencyPaths(
artifacts: Array[AnyRef],
cacheDirectory: File): String = {
artifacts.map { artifactInfo =>
val artifact = artifactInfo.asInstanceOf[Artifact].getModuleRevisionId
cacheDirectory.getAbsolutePath + File.separator +
s"${artifact.getOrganisation}_${artifact.getName}-${artifact.getRevision}.jar"
}.mkString(",")
}
/** Adds the given maven coordinates to Ivy's module descriptor. */
def addDependenciesToIvy(
md: DefaultModuleDescriptor,
artifacts: Seq[MavenCoordinate],
ivyConfName: String): Unit = {
artifacts.foreach { mvn =>
val ri = ModuleRevisionId.newInstance(mvn.groupId, mvn.artifactId, mvn.version)
val dd = new DefaultDependencyDescriptor(ri, false, false)
dd.addDependencyConfiguration(ivyConfName, ivyConfName + "(runtime)")
// scalastyle:off println
printStream.println(s"${dd.getDependencyId} added as a dependency")
// scalastyle:on println
md.addDependency(dd)
}
}
/** Add exclusion rules for dependencies already included in the spark-assembly */
def addExclusionRules(
ivySettings: IvySettings,
ivyConfName: String,
md: DefaultModuleDescriptor): Unit = {
// Add scala exclusion rule
md.addExcludeRule(createExclusion("*:scala-library:*", ivySettings, ivyConfName))
IVY_DEFAULT_EXCLUDES.foreach { comp =>
md.addExcludeRule(createExclusion(s"org.apache.spark:spark-$comp*:*", ivySettings,
ivyConfName))
}
}
/**
* Build Ivy Settings using options with default resolvers
* @param remoteRepos Comma-delimited string of remote repositories other than maven central
* @param ivyPath The path to the local ivy repository
* @return An IvySettings object
*/
def buildIvySettings(remoteRepos: Option[String], ivyPath: Option[String]): IvySettings = {
val ivySettings: IvySettings = new IvySettings
processIvyPathArg(ivySettings, ivyPath)
// create a pattern matcher
ivySettings.addMatcher(new GlobPatternMatcher)
// create the dependency resolvers
val repoResolver = createRepoResolvers(ivySettings.getDefaultIvyUserDir)
ivySettings.addResolver(repoResolver)
ivySettings.setDefaultResolver(repoResolver.getName)
processRemoteRepoArg(ivySettings, remoteRepos)
ivySettings
}
/**
* Load Ivy settings from a given filename, using supplied resolvers
* @param settingsFile Path to Ivy settings file
* @param remoteRepos Comma-delimited string of remote repositories other than maven central
* @param ivyPath The path to the local ivy repository
* @return An IvySettings object
*/
def loadIvySettings(
settingsFile: String,
remoteRepos: Option[String],
ivyPath: Option[String]): IvySettings = {
val file = new File(settingsFile)
require(file.exists(), s"Ivy settings file $file does not exist")
require(file.isFile(), s"Ivy settings file $file is not a normal file")
val ivySettings: IvySettings = new IvySettings
try {
ivySettings.load(file)
} catch {
case e @ (_: IOException | _: ParseException) =>
throw new SparkException(s"Failed when loading Ivy settings from $settingsFile", e)
}
processIvyPathArg(ivySettings, ivyPath)
processRemoteRepoArg(ivySettings, remoteRepos)
ivySettings
}
/* Set ivy settings for location of cache, if option is supplied */
private def processIvyPathArg(ivySettings: IvySettings, ivyPath: Option[String]): Unit = {
ivyPath.filterNot(_.trim.isEmpty).foreach { alternateIvyDir =>
ivySettings.setDefaultIvyUserDir(new File(alternateIvyDir))
ivySettings.setDefaultCache(new File(alternateIvyDir, "cache"))
}
}
/* Add any optional additional remote repositories */
private def processRemoteRepoArg(ivySettings: IvySettings, remoteRepos: Option[String]): Unit = {
remoteRepos.filterNot(_.trim.isEmpty).map(_.split(",")).foreach { repositoryList =>
val cr = new ChainResolver
cr.setName("user-list")
// add current default resolver, if any
Option(ivySettings.getDefaultResolver).foreach(cr.add)
// add additional repositories, last resolution in chain takes precedence
repositoryList.zipWithIndex.foreach { case (repo, i) =>
val brr: IBiblioResolver = new IBiblioResolver
brr.setM2compatible(true)
brr.setUsepoms(true)
brr.setRoot(repo)
brr.setName(s"repo-${i + 1}")
cr.add(brr)
// scalastyle:off println
printStream.println(s"$repo added as a remote repository with the name: ${brr.getName}")
// scalastyle:on println
}
ivySettings.addResolver(cr)
ivySettings.setDefaultResolver(cr.getName)
}
}
/** A nice function to use in tests as well. Values are dummy strings. */
def getModuleDescriptor: DefaultModuleDescriptor = DefaultModuleDescriptor.newDefaultInstance(
ModuleRevisionId.newInstance("org.apache.spark", "spark-submit-parent", "1.0"))
/**
* Resolves any dependencies that were supplied through maven coordinates
* @param coordinates Comma-delimited string of maven coordinates
* @param ivySettings An IvySettings containing resolvers to use
* @param exclusions Exclusions to apply when resolving transitive dependencies
* @return The comma-delimited path to the jars of the given maven artifacts including their
* transitive dependencies
*/
def resolveMavenCoordinates(
coordinates: String,
ivySettings: IvySettings,
exclusions: Seq[String] = Nil,
isTest: Boolean = false): String = {
if (coordinates == null || coordinates.trim.isEmpty) {
""
} else {
val sysOut = System.out
try {
// To prevent ivy from logging to system out
System.setOut(printStream)
val artifacts = extractMavenCoordinates(coordinates)
// Directories for caching downloads through ivy and storing the jars when maven coordinates
// are supplied to spark-submit
val packagesDirectory: File = new File(ivySettings.getDefaultIvyUserDir, "jars")
// scalastyle:off println
printStream.println(
s"Ivy Default Cache set to: ${ivySettings.getDefaultCache.getAbsolutePath}")
printStream.println(s"The jars for the packages stored in: $packagesDirectory")
// scalastyle:on println
val ivy = Ivy.newInstance(ivySettings)
// Set resolve options to download transitive dependencies as well
val resolveOptions = new ResolveOptions
resolveOptions.setTransitive(true)
val retrieveOptions = new RetrieveOptions
// Turn downloading and logging off for testing
if (isTest) {
resolveOptions.setDownload(false)
resolveOptions.setLog(LogOptions.LOG_QUIET)
retrieveOptions.setLog(LogOptions.LOG_QUIET)
} else {
resolveOptions.setDownload(true)
}
// Default configuration name for ivy
val ivyConfName = "default"
// A Module descriptor must be specified. Entries are dummy strings
val md = getModuleDescriptor
// clear ivy resolution from previous launches. The resolution file is usually at
// ~/.ivy2/org.apache.spark-spark-submit-parent-default.xml. In between runs, this file
// leads to confusion with Ivy when the files can no longer be found at the repository
// declared in that file/
val mdId = md.getModuleRevisionId
val previousResolution = new File(ivySettings.getDefaultCache,
s"${mdId.getOrganisation}-${mdId.getName}-$ivyConfName.xml")
if (previousResolution.exists) previousResolution.delete
md.setDefaultConf(ivyConfName)
// Add exclusion rules for Spark and Scala Library
addExclusionRules(ivySettings, ivyConfName, md)
// add all supplied maven artifacts as dependencies
addDependenciesToIvy(md, artifacts, ivyConfName)
exclusions.foreach { e =>
md.addExcludeRule(createExclusion(e + ":*", ivySettings, ivyConfName))
}
// resolve dependencies
val rr: ResolveReport = ivy.resolve(md, resolveOptions)
if (rr.hasError) {
throw new RuntimeException(rr.getAllProblemMessages.toString)
}
// retrieve all resolved dependencies
ivy.retrieve(rr.getModuleDescriptor.getModuleRevisionId,
packagesDirectory.getAbsolutePath + File.separator +
"[organization]_[artifact]-[revision].[ext]",
retrieveOptions.setConfs(Array(ivyConfName)))
resolveDependencyPaths(rr.getArtifacts.toArray, packagesDirectory)
} finally {
System.setOut(sysOut)
}
}
}
private[deploy] def createExclusion(
coords: String,
ivySettings: IvySettings,
ivyConfName: String): ExcludeRule = {
val c = extractMavenCoordinates(coords)(0)
val id = new ArtifactId(new ModuleId(c.groupId, c.artifactId), "*", "*", "*")
val rule = new DefaultExcludeRule(id, ivySettings.getMatcher("glob"), null)
rule.addConfiguration(ivyConfName)
rule
}
}
/**
* Provides an indirection layer for passing arguments as system properties or flags to
* the user's driver program or to downstream launcher tools.
*/
private case class OptionAssigner(
value: String,
clusterManager: Int,
deployMode: Int,
clOption: String = null,
confKey: String = null)
| ericvandenbergfb/spark | core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala | Scala | apache-2.0 | 55,295 |
package kaseannot.placebo.objekt
import pkg._
@kase object CPreToplevelNocomp
@placebo class CPreToplevelPrecomp
@kase object CPreToplevelPrecomp
@kase object CPreToplevelPostcomp
@placebo class CPreToplevelPostcomp
class KasePlaceboObject {
def assertEquals(a: Any, b: Any): Unit = { assert(a == b, s"$a != $b") }
val objects = scala.collection.mutable.ListBuffer[Any]()
objects += CPreToplevelNocomp
objects += CPreToplevelPrecomp
objects += CPreToplevelPostcomp
objects += CPostToplevelNocomp
objects += CPostToplevelPrecomp
objects += CPostToplevelPostcomp
// TODO: doesn't work in sbt, though does work in the command line
// @kase object CPreMemberNocomp
// @placebo class CPreMemberPrecomp
// @kase object CPreMemberPrecomp
// @kase object CPreMemberPostcomp
// @placebo class CPreMemberPostcomp
// objects += CPreMemberNocomp
// objects += CPreMemberPrecomp
// objects += CPreMemberPostcomp
// objects += CPostMemberNocomp
// objects += CPostMemberPrecomp
// objects += CPostMemberPostcomp
// @kase object CPostMemberNocomp
// @placebo class CPostMemberPrecomp
// @kase object CPostMemberPrecomp
// @kase object CPostMemberPostcomp
// @placebo class CPostMemberPostcomp
// @Test
def combo: Unit = {
@kase object CPreLocalNocomp
@placebo class CPreLocalPrecomp
@kase object CPreLocalPrecomp
@kase object CPreLocalPostcomp
@placebo class CPreLocalPostcomp
objects += CPreLocalNocomp
objects += CPreLocalPrecomp
objects += CPreLocalPostcomp
objects += CPostLocalNocomp
objects += CPostLocalPrecomp
objects += CPostLocalPostcomp
@kase object CPostLocalNocomp
@placebo class CPostLocalPrecomp
@kase object CPostLocalPrecomp
@kase object CPostLocalPostcomp
@placebo class CPostLocalPostcomp
assertEquals(objects.mkString("\n"), """
|CPreToplevelNocomp
|CPreToplevelPrecomp
|CPreToplevelPostcomp
|CPostToplevelNocomp
|CPostToplevelPrecomp
|CPostToplevelPostcomp
|CPreLocalNocomp
|CPreLocalPrecomp
|CPreLocalPostcomp
|CPostLocalNocomp
|CPostLocalPrecomp
|CPostLocalPostcomp
""".trim.stripMargin)
}
}
@kase object CPostToplevelNocomp
@placebo class CPostToplevelPrecomp
@kase object CPostToplevelPrecomp
@kase object CPostToplevelPostcomp
@placebo class CPostToplevelPostcomp
| scala/scala | test/macro-annot/run/kase/KasePlaceboObject_2.scala | Scala | apache-2.0 | 2,383 |
package k2b6s9j.Define
import org.scaloid.common._
import scala.language.postfixOps
class DefinitionActivity extends SActivity {
onCreate {
contentView = new SVerticalLayout {
}.padding(20 dip)
}
}
| kepler0/Define | android/src/k2b6s9j/Define/DefinitionActivity.scala | Scala | mit | 215 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package system.basic
import java.io.File
import java.nio.charset.StandardCharsets
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import common._
import common.rest.WskRestOperations
import org.apache.openwhisk.core.entity.WhiskAction
import org.apache.commons.io.FileUtils
import org.apache.openwhisk.core.FeatureFlags
import spray.json._
import spray.json.DefaultJsonProtocol._
@RunWith(classOf[JUnitRunner])
class WskActionTests extends TestHelpers with WskTestHelpers with JsHelpers with WskActorSystem {
implicit val wskprops = WskProps()
// wsk must have type WskOperations so that tests using CLI (class Wsk)
// instead of REST (WskRestOperations) still work.
val wsk: WskOperations = new WskRestOperations
val testString = "this is a test"
val testResult = JsObject("count" -> testString.split(" ").length.toJson)
val guestNamespace = wskprops.namespace
behavior of "Whisk actions"
it should "create an action with an empty file" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "empty"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("empty.js")))
}
}
it should "invoke an action returning a promise" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "hello promise"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("helloPromise.js")))
}
val run = wsk.action.invoke(name)
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.response.result shouldBe Some(JsObject("done" -> true.toJson))
activation.logs.get.mkString(" ") shouldBe empty
}
}
it should "invoke an action with a space in the name" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "hello Async"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("helloAsync.js")))
}
val run = wsk.action.invoke(name, Map("payload" -> testString.toJson))
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.response.result shouldBe Some(testResult)
activation.logs.get.mkString(" ") should include(testString)
}
}
it should "invoke an action that throws an uncaught exception and returns correct status code" in withAssetCleaner(
wskprops) { (wp, assetHelper) =>
val name = "throwExceptionAction"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("runexception.js")))
}
withActivation(wsk.activation, wsk.action.invoke(name)) { activation =>
val response = activation.response
activation.response.status shouldBe "action developer error"
activation.response.result shouldBe Some(
JsObject("error" -> "An error has occurred: Extraordinary exception".toJson))
}
}
it should "pass parameters bound on creation-time to the action" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "printParams"
val params = Map("param1" -> "test1", "param2" -> "test2")
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(
name,
Some(TestUtils.getTestActionFilename("printParams.js")),
parameters = params.mapValues(_.toJson))
}
val invokeParams = Map("payload" -> testString)
val run = wsk.action.invoke(name, invokeParams.mapValues(_.toJson))
withActivation(wsk.activation, run) { activation =>
val logs = activation.logs.get.mkString(" ")
(params ++ invokeParams).foreach {
case (key, value) =>
logs should include(s"params.$key: $value")
}
}
}
it should "copy an action and invoke it successfully" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "copied"
val packageName = "samples"
val actionName = "wordcount"
val fullQualifiedName = s"/$guestNamespace/$packageName/$actionName"
assetHelper.withCleaner(wsk.pkg, packageName) { (pkg, _) =>
pkg.create(packageName, shared = Some(true))
}
assetHelper.withCleaner(wsk.action, fullQualifiedName) {
val file = Some(TestUtils.getTestActionFilename("wc.js"))
(action, _) =>
action.create(fullQualifiedName, file)
}
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(fullQualifiedName), Some("copy"))
}
val run = wsk.action.invoke(name, Map("payload" -> testString.toJson))
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.response.result shouldBe Some(testResult)
activation.logs.get.mkString(" ") should include(testString)
}
}
it should "copy an action and ensure exec, parameters, and annotations copied" in withAssetCleaner(wskprops) {
(wp, assetHelper) =>
val origActionName = "origAction"
val copiedActionName = "copiedAction"
val params = Map("a" -> "A".toJson)
val annots = Map("b" -> "B".toJson)
assetHelper.withCleaner(wsk.action, origActionName) {
val file = Some(TestUtils.getTestActionFilename("wc.js"))
(action, _) =>
action.create(origActionName, file, parameters = params, annotations = annots)
}
assetHelper.withCleaner(wsk.action, copiedActionName) { (action, _) =>
action.create(copiedActionName, Some(origActionName), Some("copy"))
}
val copiedAction = wsk.parseJsonString(wsk.action.get(copiedActionName).stdout)
val origAction = wsk.parseJsonString(wsk.action.get(copiedActionName).stdout)
copiedAction.fields("annotations") shouldBe origAction.fields("annotations")
copiedAction.fields("parameters") shouldBe origAction.fields("parameters")
copiedAction.fields("exec") shouldBe origAction.fields("exec")
copiedAction.fields("version") shouldBe JsString("0.0.1")
}
it should "add new parameters and annotations while copying an action" in withAssetCleaner(wskprops) {
(wp, assetHelper) =>
val origName = "origAction"
val copiedName = "copiedAction"
val origParams = Map("origParam1" -> "origParamValue1".toJson, "origParam2" -> 999.toJson)
val copiedParams = Map("copiedParam1" -> "copiedParamValue1".toJson, "copiedParam2" -> 123.toJson)
val origAnnots = Map("origAnnot1" -> "origAnnotValue1".toJson, "origAnnot2" -> true.toJson)
val copiedAnnots = Map("copiedAnnot1" -> "copiedAnnotValue1".toJson, "copiedAnnot2" -> false.toJson)
val resParams = Seq(
JsObject("key" -> JsString("copiedParam1"), "value" -> JsString("copiedParamValue1")),
JsObject("key" -> JsString("copiedParam2"), "value" -> JsNumber(123)),
JsObject("key" -> JsString("origParam1"), "value" -> JsString("origParamValue1")),
JsObject("key" -> JsString("origParam2"), "value" -> JsNumber(999)))
val baseAnnots = Seq(
JsObject("key" -> JsString("origAnnot1"), "value" -> JsString("origAnnotValue1")),
JsObject("key" -> JsString("copiedAnnot2"), "value" -> JsFalse),
JsObject("key" -> JsString("copiedAnnot1"), "value" -> JsString("copiedAnnotValue1")),
JsObject("key" -> JsString("origAnnot2"), "value" -> JsTrue),
JsObject("key" -> JsString("exec"), "value" -> JsString("nodejs:6")),
JsObject("key" -> WhiskAction.provideApiKeyAnnotationName.toJson, "value" -> JsFalse))
val resAnnots: Seq[JsObject] = if (FeatureFlags.requireApiKeyAnnotation) {
baseAnnots ++ Seq(JsObject("key" -> WhiskAction.provideApiKeyAnnotationName.toJson, "value" -> JsFalse))
} else baseAnnots
assetHelper.withCleaner(wsk.action, origName) {
val file = Some(TestUtils.getTestActionFilename("echo.js"))
(action, _) =>
action.create(origName, file, parameters = origParams, annotations = origAnnots)
}
assetHelper.withCleaner(wsk.action, copiedName) { (action, _) =>
println("created copied ")
action.create(copiedName, Some(origName), Some("copy"), parameters = copiedParams, annotations = copiedAnnots)
}
val copiedAction = wsk.parseJsonString(wsk.action.get(copiedName).stdout)
// CLI does not guarantee order of annotations and parameters so do a diff to compare the values
copiedAction.fields("parameters").convertTo[Seq[JsObject]] diff resParams shouldBe List.empty
copiedAction.fields("annotations").convertTo[Seq[JsObject]] diff resAnnots shouldBe List.empty
}
it should "recreate and invoke a new action with different code" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "recreatedAction"
assetHelper.withCleaner(wsk.action, name, false) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("wc.js")))
}
val run1 = wsk.action.invoke(name, Map("payload" -> testString.toJson))
withActivation(wsk.activation, run1) { activation =>
activation.response.status shouldBe "success"
activation.logs.get.mkString(" ") should include(s"The message '$testString' has")
}
wsk.action.delete(name)
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("hello.js")))
}
val run2 = wsk.action.invoke(name, Map("payload" -> testString.toJson))
withActivation(wsk.activation, run2) { activation =>
activation.response.status shouldBe "success"
activation.logs.get.mkString(" ") should include(s"hello, $testString")
}
}
it should "fail to invoke an action with an empty file" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "empty"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("empty.js")))
}
val run = wsk.action.invoke(name)
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "action developer error"
activation.response.result shouldBe Some(JsObject("error" -> "Missing main/no code to execute.".toJson))
}
}
it should "blocking invoke of nested blocking actions" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "nestedBlockingAction"
val child = "wc"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
val annotations =
if (FeatureFlags.requireApiKeyAnnotation) Map(WhiskAction.provideApiKeyAnnotationName -> JsTrue)
else Map.empty[String, JsValue]
action.create(name, Some(TestUtils.getTestActionFilename("wcbin.js")), annotations = annotations)
}
assetHelper.withCleaner(wsk.action, child) { (action, _) =>
action.create(child, Some(TestUtils.getTestActionFilename("wc.js")))
}
val run = wsk.action.invoke(name, Map("payload" -> testString.toJson), blocking = true)
val activation = wsk.parseJsonString(run.stdout).convertTo[ActivationResult]
withClue(s"check failed for activation: $activation") {
val wordCount = testString.split(" ").length
activation.response.result.get shouldBe JsObject("binaryCount" -> s"${wordCount.toBinaryString} (base 2)".toJson)
}
}
it should "blocking invoke an asynchronous action" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "helloAsync"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("helloAsync.js")))
}
val run = wsk.action.invoke(name, Map("payload" -> testString.toJson), blocking = true)
val activation = wsk.parseJsonString(run.stdout).convertTo[ActivationResult]
withClue(s"check failed for activation: $activation") {
activation.response.status shouldBe "success"
activation.response.result shouldBe Some(testResult)
activation.logs shouldBe Some(List.empty)
}
}
it should "not be able to use 'ping' in an action" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "ping"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("ping.js")))
}
val run = wsk.action.invoke(name, Map("payload" -> "google.com".toJson))
withActivation(wsk.activation, run) { activation =>
val result = activation.response.result.get
result.getFields("stdout", "code") match {
case Seq(JsString(stdout), JsNumber(code)) =>
stdout should not include "bytes from"
code.intValue() should not be 0
case _ => fail(s"fields 'stdout' or 'code' where not of the expected format, was $result")
}
}
}
it should "support UTF-8 as input and output format" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "utf8Test"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("hello.js")))
}
val utf8 = "«ταБЬℓσö»: 1<2 & 4+1>³, now 20%€§$ off!"
val run = wsk.action.invoke(name, Map("payload" -> utf8.toJson))
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.logs.get.mkString(" ") should include(s"hello, $utf8")
}
}
it should "invoke action with large code" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "big-hello"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
val filePath = TestUtils.getTestActionFilename("hello.js")
val code = FileUtils.readFileToString(new File(filePath), StandardCharsets.UTF_8)
val largeCode = code + " " * (WhiskProperties.getMaxActionSizeMB * FileUtils.ONE_MB).toInt
val tmpFile = File.createTempFile("whisk", ".js")
FileUtils.write(tmpFile, largeCode, StandardCharsets.UTF_8)
val result = action.create(name, Some(tmpFile.getAbsolutePath))
tmpFile.delete()
result
}
val hello = "hello"
val run = wsk.action.invoke(name, Map("payload" -> hello.toJson))
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.logs.get.mkString(" ") should include(s"hello, $hello")
}
}
}
| csantanapr/incubator-openwhisk | tests/src/test/scala/system/basic/WskActionTests.scala | Scala | apache-2.0 | 15,227 |
/* Copyright 2016-2019 UniCredit S.p.A.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package unicredit.lethe
import scala.io.StdIn
import boopickle.Default._
import client._
import data._
import oram._
import transport._
object Recursive extends App {
implicit val pstring = Pointed("")
implicit val pint = Pointed(0)
val remote = new MemoryRemote(capacity = 100000)
val params = Params(depth = 8, bucketSize = 4)
val oram = PathORAM.recursive[String, String, Int](remote, "Hello my friend", params, _.length)
oram.init
oram.write("1", "Alice")
oram.write("2", "Bob")
oram.write("3", "Eve")
oram.write("4", "Mallory")
var keepGoing = true
while (keepGoing) {
println("Choose a number between 1 and 4:")
val input = StdIn.readLine.trim
try {
val n = input.toInt
if ((n >= 1) && (n <= 4)) {
val name = oram.read(n.toString)
println(name)
}
}
catch {
case _: Throwable =>
if ((input == "q") || (input == "quit") || (input == "x") || (input == "exit")) {
keepGoing = false
}
}
}
} | unicredit/lethe | apps/src/main/scala/unicredit/lethe/Recursive.scala | Scala | apache-2.0 | 1,601 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.index
import java.nio.charset.StandardCharsets
import java.util.Collections
import org.apache.kudu.client.{AlterTableOptions, CreateTableOptions, KuduTable, PartialRow}
import org.locationtech.geomesa.curve.BinnedTime
import org.locationtech.geomesa.index.api._
import org.locationtech.geomesa.index.conf.splitter.DefaultSplitter
import org.locationtech.geomesa.index.index.z3.Z3IndexKey
import org.locationtech.geomesa.kudu.schema.KuduIndexColumnAdapter.{FeatureIdAdapter, PeriodColumnAdapter, ZColumnAdapter}
import org.locationtech.geomesa.kudu.{KuduValue, Partitioning}
import org.locationtech.geomesa.utils.index.ByteArrays
object Z3ColumnMapper {
private val columns = Seq(PeriodColumnAdapter, ZColumnAdapter, FeatureIdAdapter)
}
class Z3ColumnMapper(index: GeoMesaFeatureIndex[_, _]) extends KuduColumnMapper(index, Z3ColumnMapper.columns) {
override def configurePartitions(): CreateTableOptions = {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import scala.collection.JavaConverters._
val options = new CreateTableOptions()
// add hash splits based on our shards, which we don't need to actually store as a separate column
val shards = index.sft.getZShards
if (shards > 1) {
options.addHashPartitions(Collections.singletonList(FeatureIdAdapter.name), shards)
}
options.setRangePartitionColumns(Seq(PeriodColumnAdapter.name, ZColumnAdapter.name).asJava)
val splits = {
val configured = DefaultSplitter.Parser.z3Splits(index.sft.getZ3Interval, splitters)
if (configured.isEmpty) {
val bin = BinnedTime.timeToBinnedTime(index.sft.getZ3Interval)(System.currentTimeMillis()).bin
Map(bin -> Seq(0, Long.MaxValue))
} else {
configured.groupBy(_._1).map { case (bin, times) =>
val ts = times.flatMap(_._2)
// add upper and lower bounds as our splits don't have endpoints
val t = if (ts.isEmpty) { Seq(0, Long.MaxValue) } else {
val builder = Seq.newBuilder[Long]
builder.sizeHint(ts.size + 2)
builder += 0L
builder ++= ts.sorted
builder += Long.MaxValue
builder.result.distinct
}
bin -> t
}
}
}
splits.foreach { case (bin, times) =>
times.sliding(2).foreach { case Seq(lo, hi) =>
val lower = tableSchema.newPartialRow()
val upper = tableSchema.newPartialRow()
lower.addShort(0, bin)
upper.addShort(0, bin)
lower.addLong(1, lo)
upper.addLong(1, hi)
options.addRangePartition(lower, upper)
}
}
options
}
override def createPartition(table: KuduTable, bin: Short): Option[Partitioning] = {
val alteration = new AlterTableOptions()
val bitSplits = {
val configured = DefaultSplitter.Parser.z3BitSplits(splitters)
if (configured.isEmpty) { Seq(0L, Long.MaxValue) } else {
configured :+ Long.MaxValue // add an upper bound as our splits don't have endpoints
}
}
bitSplits.sliding(2).foreach { case Seq(lo, hi) =>
val lower = tableSchema.newPartialRow()
val upper = tableSchema.newPartialRow()
lower.addShort(0, bin)
upper.addShort(0, bin)
lower.addLong(1, lo)
upper.addLong(1, hi)
alteration.addRangePartition(lower, upper)
}
Some(Partitioning(table.getName, alteration))
}
override def createKeyValues(value: SingleRowKeyValue[_]): Seq[KuduValue[_]] = {
val fid = KuduValue(new String(value.id, StandardCharsets.UTF_8), FeatureIdAdapter)
value.key match {
case Z3IndexKey(bin, z) => Seq(KuduValue(bin, PeriodColumnAdapter), KuduValue(z, ZColumnAdapter), fid)
case _ => throw new IllegalStateException(s"Expected z value but got '${value.key}'")
}
}
override def toRowRanges(ranges: Seq[ScanRange[_]],
tieredKeyRanges: Seq[ByteRange]): Seq[(Option[PartialRow], Option[PartialRow])] = {
def lower(key: Z3IndexKey): Some[PartialRow] = {
val row = tableSchema.newPartialRow()
PeriodColumnAdapter.writeToRow(row, key.bin)
ZColumnAdapter.writeToRow(row, key.z)
FeatureIdAdapter.writeToRow(row, "")
Some(row)
}
def upper(key: Z3IndexKey): Some[PartialRow] = {
val row = tableSchema.newPartialRow()
PeriodColumnAdapter.writeToRow(row, key.bin)
if (key.z == Long.MaxValue) {
// push the exclusive value to the feature ID to avoid numeric overflow
ZColumnAdapter.writeToRow(row, key.z)
FeatureIdAdapter.writeToRow(row, new String(ByteArrays.ZeroByteArray, StandardCharsets.UTF_8))
} else {
ZColumnAdapter.writeToRow(row, key.z + 1L)
FeatureIdAdapter.writeToRow(row, "")
}
Some(row)
}
ranges.asInstanceOf[Seq[ScanRange[Z3IndexKey]]].map {
case BoundedRange(lo, hi) => (lower(lo), upper(hi))
case UnboundedRange(_) => (None, None)
case r => throw new IllegalArgumentException(s"Unexpected range type $r")
}
}
}
| elahrvivaz/geomesa | geomesa-kudu/geomesa-kudu-datastore/src/main/scala/org/locationtech/geomesa/kudu/index/Z3ColumnMapper.scala | Scala | apache-2.0 | 5,581 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model.persisted
import play.api.libs.json.Json
import reactivemongo.bson.Macros
case class Media(userId: String, media: String, originalUserId: Option[String] = None)
object Media {
implicit val mediaFormat = Json.format[Media]
implicit val mediaHandler = Macros.handler[Media]
}
| hmrc/fset-faststream | app/model/persisted/Media.scala | Scala | apache-2.0 | 900 |
package io.swagger.client.model
case class ConnectorInfoHistoryItem (
/* Number of measurements */
number_of_measurements: Integer,
/* True if the update was successfull */
success: Boolean,
/* Error message. */
message: String,
/* Date and time of the update */
created_at: String)
| QuantiModo/QuantiModo-SDK-Scala | src/main/scala/io/swagger/client/model/ConnectorInfoHistoryItem.scala | Scala | gpl-2.0 | 306 |
package com.twitter.finatra.http.exceptions
import com.google.common.net.MediaType
import com.twitter.finagle.http.Status
/* HTTP Exceptions */
// TODO: Redesign to avoid boilerplate below (@see ResponseBuilder) */
/**
* HttpException which will be rendered as an HTTP response.
*/
object HttpException {
def plainText(status: Status, body: String) = {
new HttpException(status, MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(status: Status, errors: String*) = {
new HttpException(status, MediaType.JSON_UTF_8, errors)
}
}
class HttpException(
val statusCode: Status,
val mediaType: MediaType,
val errors: Seq[String] = Seq())
extends Exception {
/* Public */
override def getMessage: String = {
"HttpException(" + statusCode + ":" + mediaType + ") with errors: " + errors.mkString(",")
}
/* Generated Equals/Hashcode */
override def equals(other: Any): Boolean = other match {
case that: HttpException =>
(that canEqual this) &&
statusCode == that.statusCode &&
mediaType == that.mediaType &&
errors == that.errors
case _ => false
}
override def hashCode(): Int = {
val state = Seq(statusCode, mediaType, errors)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
def canEqual(other: Any): Boolean = other.isInstanceOf[HttpException]
}
/* Specific Status Exceptions */
object NotFoundException {
def plainText(body: String) = {
new NotFoundException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new NotFoundException(MediaType.JSON_UTF_8, errors)
}
}
case class NotFoundException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.NotFound, mediaType, errors) {
def this(error: String) = {
this(MediaType.JSON_UTF_8, Seq(error))
}
}
object ConflictException {
def plainText(body: String) = {
new ConflictException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new ConflictException(MediaType.JSON_UTF_8, errors)
}
}
case class ConflictException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.Conflict, mediaType, errors) {
def this(error: String) = {
this(MediaType.JSON_UTF_8, Seq(error))
}
}
object InternalServerErrorException {
def plainText(body: String) = {
new InternalServerErrorException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new InternalServerErrorException(MediaType.JSON_UTF_8, errors)
}
}
case class InternalServerErrorException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.InternalServerError, mediaType, errors)
object ServiceUnavailableException {
def plainText(body: String) = {
new ServiceUnavailableException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new ServiceUnavailableException(MediaType.JSON_UTF_8, errors)
}
}
case class ServiceUnavailableException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.ServiceUnavailable, mediaType, errors)
object BadRequestException {
def plainText(body: String) = {
new BadRequestException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new BadRequestException(MediaType.JSON_UTF_8, errors)
}
}
case class BadRequestException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.BadRequest, mediaType, errors) {
def this(error: String) = {
this(MediaType.JSON_UTF_8, Seq(error))
}
}
object ForbiddenException {
def plainText(body: String) = {
new ForbiddenException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new ForbiddenException(MediaType.JSON_UTF_8, errors)
}
}
case class ForbiddenException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.Forbidden, mediaType, errors)
object NotAcceptableException {
def plainText(body: String) = {
new NotAcceptableException(MediaType.PLAIN_TEXT_UTF_8, Seq(body))
}
def apply(errors: String*) = {
new NotAcceptableException(MediaType.JSON_UTF_8, errors)
}
}
case class NotAcceptableException(
override val mediaType: MediaType,
override val errors: Seq[String])
extends HttpException(Status.NotAcceptable, mediaType, errors)
| syamantm/finatra | http/src/main/scala/com/twitter/finatra/http/exceptions/exceptions.scala | Scala | apache-2.0 | 4,521 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.events
import org.scalatest.Assertions._
trait TestLocationMethodServices {
private[events] case class TestStartingPair(testName: String, className: String, methodName: String, var checked: Boolean = false)
import scala.language.existentials
private[events] case class TestResultPair(clazz: Class[_], className: String, methodName: String, var checked: Boolean = false)
private[events] case class ScopeOpenedPair(testName: String, className: String, var checked: Boolean = false)
private[events] case class ScopeClosedPair(testName: String, className: String, var checked: Boolean = false)
val suiteTypeName: String
val expectedStartingList: List[TestStartingPair]
val expectedResultList: List[TestResultPair]
val expectedScopeOpenedList: List[ScopeOpenedPair]
val expectedScopeClosedList: List[ScopeClosedPair]
private def checkTopOfMethod(className: String, methodName: String, event: Event): Boolean = {
event.location match {
case Some(location) =>
location match {
case topOfMethod:TopOfMethod =>
val expectedClassName = className
val expectedMethodId = "public void " + expectedClassName + "." + methodName
assert(expectedClassName == topOfMethod.className, "Suite " + suiteTypeName + "'s " + event.getClass.getName + " event's TopOfMethod.className expected to be " + expectedClassName + ", but got " + topOfMethod.className)
assert(expectedMethodId == topOfMethod.methodId, "Suite " + suiteTypeName + "'s " + event.getClass.getName + " event's TopOfMethod.methodId expected to be " + expectedMethodId + ", but got " + topOfMethod.methodId)
true
case _ => fail("Suite " + suiteTypeName + "'s " + event.getClass.getName + " event expect to have TopOfMethod location, but got " + location.getClass.getName)
}
case None => fail("Suite " + suiteTypeName + "'s " + event.getClass.getName + " does not have location (None)")
}
}
private def checkTopOfClass(className: String, event: Event): Boolean = {
event.location match {
case Some(location) =>
location match {
case topOfClass: TopOfClass =>
assert(className == topOfClass.className, "Suite " + suiteTypeName + "'s " + event.getClass.getName + " event's TopOfClass.className expected to be " + className + ", but got " + topOfClass.className)
true
case _ => fail("Suite " + suiteTypeName + "'s " + event.getClass.getName + " event expect to have TopOfClass location, but got " + location.getClass.getName)
}
case None => fail("Suite " + suiteTypeName + "'s " + event.getClass.getName + " does not have location (None)")
}
}
def checkFun(event: Event): Unit = {
event match {
case testStarting: TestStarting =>
val expectedStartingPairOpt = expectedStartingList.find { pair => pair.testName == testStarting.testName }
expectedStartingPairOpt match {
case Some(expectedStartingPair) => expectedStartingPair.checked = checkTopOfMethod(expectedStartingPair.className, expectedStartingPair.methodName, event)
case None => fail("Unknown TestStarting for testName=" + testStarting.testName + " in " + suiteTypeName)
}
case scopeOpened: ScopeOpened =>
val expectedScopeOpenedPairOpt = expectedScopeOpenedList.find { pair => pair.testName == scopeOpened.message }
expectedScopeOpenedPairOpt match {
case Some(expectedScopeOpenedPair) => expectedScopeOpenedPair.checked = checkTopOfClass(expectedScopeOpenedPair.className, event)
case None => fail("Unknown ScopeOpened for testName=" + scopeOpened.message + " in " + suiteTypeName)
}
case scopeClosed: ScopeClosed =>
val expectedScopeClosedPairOpt = expectedScopeClosedList.find { pair => pair.testName == scopeClosed.message }
expectedScopeClosedPairOpt match {
case Some(expectedScopeClosedPair) => expectedScopeClosedPair.checked = checkTopOfClass(expectedScopeClosedPair.className, event)
case None => fail("Unknown ScopeClosed for testName=" + scopeClosed.message + " in " + suiteTypeName)
}
case suiteStarting: SuiteStarting => // Tested in LocationSuiteProp
case suiteCompleted: SuiteCompleted => // Tested in LocationSuiteProp
case _ =>
val expectedResultPairOpt = expectedResultList.find { pair => pair.clazz == event.getClass() }
expectedResultPairOpt match {
case Some(expectedResultPair) => expectedResultPair.checked = checkTopOfMethod(expectedResultPair.className, expectedResultPair.methodName, event)
case None => fail("Unexpected event:" + event.getClass.getName + " in " + suiteTypeName)
}
}
}
def allChecked: Unit = {
expectedStartingList.foreach { pair => assert(pair.checked, suiteTypeName + ": TestStarting for " + pair.testName + " not fired.") }
expectedResultList.foreach { pair => assert(pair.checked, suiteTypeName + ": " + pair.clazz.getName() + " event not fired.") }
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/events/TestLocationMethodServices.scala | Scala | apache-2.0 | 5,706 |
package com.twitter.finagle.service
import com.twitter.finagle.Filter.TypeAgnostic
import com.twitter.finagle._
import com.twitter.finagle.context.{Contexts, Deadline}
import com.twitter.finagle.service.TimeoutFilterTest.TunableTimeoutHelper
import com.twitter.util.TimeConversions._
import com.twitter.util._
import com.twitter.util.tunable.Tunable
import java.util.concurrent.atomic.AtomicReference
import org.junit.runner.RunWith
import org.scalatest.{FunSuite, Matchers}
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import scala.language.reflectiveCalls
private object TimeoutFilterTest {
class TimeoutFilterHelper {
val timer = new MockTimer
val promise = new Promise[String] {
@volatile var interrupted: Option[Throwable] = None
setInterruptHandler {
case exc => interrupted = Some(exc)
}
}
val service = new Service[String, String] {
def apply(request: String) = promise
}
val timeout = 1.second
val exception = new IndividualRequestTimeoutException(timeout)
val timeoutFilter = new TimeoutFilter[String, String](timeout, exception, timer)
val timeoutService = timeoutFilter.andThen(service)
}
class TunableTimeoutHelper {
val timer = new MockTimer()
val tunable = Tunable.emptyMutable[Duration]("id")
val svc = Service.mk { _: String => Future.never }
val svcFactory = ServiceFactory.const(svc)
val stack = TimeoutFilter.clientModule[String, String]
.toStack(Stack.Leaf(Stack.Role("test"), svcFactory))
val params = Stack.Params.empty + param.Timer(timer) + TimeoutFilter.Param(tunable)
val service = stack.make(params).toService
}
}
@RunWith(classOf[JUnitRunner])
class TimeoutFilterTest extends FunSuite
with Matchers
with MockitoSugar {
import TimeoutFilterTest.TimeoutFilterHelper
test("TimeoutFilter should request succeeds when the service succeeds") {
val h = new TimeoutFilterHelper
import h._
promise.setValue("1")
val res = timeoutService("blah")
assert(res.isDefined)
assert(Await.result(res) == "1")
}
test("TimeoutFilter should time out a request that is not successful, cancels underlying") {
val h = new TimeoutFilterHelper
import h._
Time.withCurrentTimeFrozen { tc: TimeControl =>
val res = timeoutService("blah")
assert(!res.isDefined)
assert(promise.interrupted == None)
tc.advance(2.seconds)
timer.tick()
assert(res.isDefined)
val t = promise.interrupted
intercept[java.util.concurrent.TimeoutException] {
throw t.get
}
intercept[IndividualRequestTimeoutException] {
Await.result(res)
}
}
}
class DeadlineCtx(val timeout: Duration) {
val service = new Service[Unit, Option[Deadline]] {
def apply(req: Unit) = Future.value(Deadline.current)
}
val timer = new MockTimer
val exception = new IndividualRequestTimeoutException(timeout)
val timeoutFilter = new TimeoutFilter[Unit, Option[Deadline]](timeout, exception, timer)
val timeoutService = timeoutFilter andThen service
}
test("deadlines, finite timeout") {
val ctx = new DeadlineCtx(1.second)
import ctx._
Time.withCurrentTimeFrozen { tc =>
assert(Await.result(timeoutService((): Unit)) == Some(Deadline(Time.now, Time.now+1.second)))
// Adjust existing ones.
val f = Contexts.broadcast.let(Deadline, Deadline(Time.now-1.second, Time.now+200.milliseconds)) {
timeoutService((): Unit)
}
assert(Await.result(f) == Some(Deadline(Time.now, Time.now+200.milliseconds)))
}
}
test("deadlines, infinite timeout") {
val ctx = new DeadlineCtx(Duration.Top)
import ctx._
Time.withCurrentTimeFrozen { tc =>
assert(Await.result(timeoutService((): Unit)) == Some(Deadline(Time.now, Time.Top)))
// Adjust existing ones
val f = Contexts.broadcast.let(Deadline, Deadline(Time.now-1.second, Time.now+1.second)) {
timeoutService((): Unit)
}
assert(Await.result(f) == Some(Deadline(Time.now, Time.now+1.second)))
}
}
test("bug verification: TimeoutFilter incorrectly sends expired deadlines") {
val ctx = new DeadlineCtx(1.second)
import ctx._
Time.withCurrentTimeFrozen { tc =>
val now = Time.now
val f = Contexts.broadcast.let(Deadline, Deadline(now, now+1.second)) {
tc.advance(5.seconds)
timeoutService((): Unit)
}
assert(Await.result(f) == Some(Deadline(now + 5.seconds, now + 1.second)))
}
}
private def verifyFilterAddedOrNot(
timoutModule: Stackable[ServiceFactory[Int, Int]]
) = {
val svc = Service.mk { i: Int => Future.value(i) }
val svcFactory = ServiceFactory.const(svc)
val stack = timoutModule.toStack(Stack.Leaf(Stack.Role("test"), svcFactory))
def assertNoTimeoutFilter(duration: Duration): Unit = {
val params = Stack.Params.empty + TimeoutFilter.Param(duration)
val made = stack.make(params)
// this relies on the fact that we do not compose
// with a TimeoutFilter if the duration is not appropriate.
assert(svcFactory eq made)
}
assertNoTimeoutFilter(Duration.Bottom)
assertNoTimeoutFilter(Duration.Top)
assertNoTimeoutFilter(Duration.Undefined)
assertNoTimeoutFilter(Duration.Zero)
assertNoTimeoutFilter(-1.second)
def assertNoTimeoutFilterTunable(tunable: Tunable[Duration]): Unit = {
val params = Stack.Params.empty + TimeoutFilter.Param(tunable)
val made = stack.make(params)
assert(svcFactory eq made)
}
assertNoTimeoutFilterTunable(Tunable.const("id", Duration.Bottom))
assertNoTimeoutFilterTunable(Tunable.const("id", Duration.Top))
assertNoTimeoutFilterTunable(Tunable.const("id", Duration.Undefined))
assertNoTimeoutFilterTunable(Tunable.const("id", Duration.Zero))
assertNoTimeoutFilterTunable(Tunable.const("id", -1.second))
def assertTimeoutFilter(duration: Duration): Unit = {
val params = Stack.Params.empty + TimeoutFilter.Param(duration)
val made = stack.make(params)
// this relies on the fact that we do compose
// with a TimeoutFilter if the duration is appropriate.
assert(svcFactory ne made)
}
assertTimeoutFilter(10.seconds)
def assertTimeoutFilterTunable(tunable: Tunable[Duration]): Unit = {
val params = Stack.Params.empty + TimeoutFilter.Param(tunable)
val made = stack.make(params)
assert(svcFactory ne made)
}
assertTimeoutFilterTunable(Tunable.const("id", 10.seconds))
assertTimeoutFilterTunable(Tunable.emptyMutable[Duration]("undefined"))
assertTimeoutFilterTunable(Tunable.mutable[Duration]("id", 10.seconds))
assertTimeoutFilterTunable(Tunable.mutable[Duration]("id", Duration.Top))
}
test("filter added or not to clientModule based on duration") {
verifyFilterAddedOrNot(TimeoutFilter.clientModule[Int, Int])
}
test("filter added or not to serverModule based on duration") {
verifyFilterAddedOrNot(TimeoutFilter.serverModule[Int, Int])
}
def testTypeAgnostic(filter: TypeAgnostic, timer: MockTimer) = {
val h = new TimeoutFilterHelper()
val svc = filter.andThen(h.service)
Time.withCurrentTimeFrozen { tc =>
val res = svc("hello")
assert(!res.isDefined)
// not yet at the timeout
tc.advance(4.seconds)
timer.tick()
assert(!res.isDefined)
// go past the timeout
tc.advance(2.seconds)
timer.tick()
intercept[IndividualRequestTimeoutException] {
Await.result(res, 1.second)
}
}
}
test("typeAgnostic using timeout") {
val timer = new MockTimer()
val timeout = 5.seconds
val filter = TimeoutFilter.typeAgnostic(
timeout,
new IndividualRequestTimeoutException(timeout),
timer)
testTypeAgnostic(filter, timer)
}
test("typeAgnostic using tunable timeout") {
val timer = new MockTimer()
val timeoutTunable = Tunable.const("myTimeout", 5.seconds)
val filter = TimeoutFilter.typeAgnostic(
timeoutTunable,
timeout => new IndividualRequestTimeoutException(timeout),
timer)
testTypeAgnostic(filter, timer)
}
test("variable timeouts") {
val timer = new MockTimer()
val atomicTimeout = new AtomicReference[Duration](Duration.Top)
val filter = new TimeoutFilter[String, String](
() => atomicTimeout.get,
timeout => new IndividualRequestTimeoutException(timeout),
timer)
val h = new TimeoutFilterHelper()
val svc = filter.andThen(h.service)
Time.withCurrentTimeFrozen { tc =>
atomicTimeout.set(5.seconds)
val res = svc("hello")
assert(!res.isDefined)
// not yet at the timeout
tc.advance(4.seconds)
timer.tick()
assert(!res.isDefined)
// go past the timeout
tc.advance(2.seconds)
timer.tick()
val ex = intercept[IndividualRequestTimeoutException] {
Await.result(res, 1.second)
}
ex.getMessage should include(atomicTimeout.get.toString)
// change the timeout
atomicTimeout.set(3.seconds)
val res2 = svc("hello")
assert(!res2.isDefined)
// this time, 4 seconds pushes us past
tc.advance(4.seconds)
timer.tick()
val ex2 = intercept[IndividualRequestTimeoutException] {
Await.result(res2, 1.second)
}
ex2.getMessage should include(atomicTimeout.get.toString)
}
}
test("variable timeout when timeout is not finite") {
val timer = new MockTimer()
val atomicTimeout = new AtomicReference[Duration]()
val filter = new TimeoutFilter[String, String](
() => atomicTimeout.get,
timeout => new IndividualRequestTimeoutException(timeout),
timer)
val h = new TimeoutFilterHelper()
val svc = filter.andThen(h.service)
Time.withCurrentTimeFrozen { tc =>
atomicTimeout.set(Duration.Undefined)
val res = svc("hello")
assert(!res.isDefined)
tc.advance(200.seconds)
timer.tick()
assert(!res.isDefined)
}
}
test("Tunable timeout: No timeout if Tunable not set") {
val h = new TunableTimeoutHelper
import h._
Time.withCurrentTimeFrozen { tc =>
// No timeout because Tunable uses Duration.Top if applying it produces `None`
val res = service("hello")
assert(!res.isDefined)
tc.advance(1.hour)
timer.tick()
intercept[com.twitter.util.TimeoutException] {
Await.result(res, 1.second)
}
}
}
test("Tunable timeouts: Timeout if tunable is set") {
val h = new TunableTimeoutHelper
import h._
Time.withCurrentTimeFrozen { tc =>
// set a timeout
tunable.set(5.seconds)
val res = service("hello")
assert(!res.isDefined)
// not yet at the timeout
tc.advance(4.seconds)
timer.tick()
assert(!res.isDefined)
// go past the timeout
tc.advance(2.seconds)
timer.tick()
val ex = intercept[IndividualRequestTimeoutException] {
Await.result(res, 1.second)
}
ex.getMessage should include(tunable().get.toString)
}
}
test("Tunable timeouts: No timeout if Tunable cleared") {
val h = new TunableTimeoutHelper
import h._
Time.withCurrentTimeFrozen { tc =>
// set the timeout
tunable.set(3.seconds)
val res = service("hello")
assert(!res.isDefined)
// 4 seconds pushes us past
tc.advance(4.seconds)
timer.tick()
intercept[IndividualRequestTimeoutException] {
Await.result(res, 1.second)
}
// clear the tunable; should use Duration.Top again
tunable.clear()
val res2 = service("hello")
assert(!res2.isDefined)
tc.advance(1.hour)
timer.tick()
intercept[com.twitter.util.TimeoutException] {
Await.result(res2, 1.second)
}
}
}
}
| koshelev/finagle | finagle-core/src/test/scala/com/twitter/finagle/service/TimeoutFilterTest.scala | Scala | apache-2.0 | 11,932 |
package scala.meta
package transversers
private[meta] trait Api {
implicit class XtensionCollectionLikeUI(tree: Tree) {
def transform(fn: PartialFunction[Tree, Tree]): Tree = {
object transformer extends Transformer {
override def apply(tree: Tree): Tree = {
if (fn.isDefinedAt(tree)) super.apply(fn(tree))
else super.apply(tree)
}
}
transformer(tree)
}
def traverse(fn: PartialFunction[Tree, Unit]): Unit = {
object traverser extends Traverser {
override def apply(tree: Tree): Unit = {
if (fn.isDefinedAt(tree)) fn(tree)
super.apply(tree)
}
}
traverser(tree)
}
def collect[T](fn: PartialFunction[Tree, T]): List[T] = {
val buf = scala.collection.mutable.ListBuffer[T]()
object traverser extends Traverser {
override def apply(tree: Tree): Unit = {
if (fn.isDefinedAt(tree)) buf += fn(tree)
super.apply(tree)
}
}
traverser(tree)
buf.toList
}
}
}
private[meta] trait Aliases {
type Transformer = scala.meta.transversers.Transformer
// there's no term Transformer, so we don't have a term alias here
type Traverser = scala.meta.transversers.Traverser
// there's no term Traverser, so we don't have a term alias here
}
| Dveim/scalameta | scalameta/transversers/src/main/scala/scala/meta/transversers/Api.scala | Scala | bsd-3-clause | 1,332 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.