code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package shapeless
import scala.language.experimental.macros
import scala.reflect.macros.whitebox
/**
* Wraps a cached implicit `T`.
*
* Looking for an implicit `Cached[T]` first triggers a look for an implicit `T`, caches the resulting
* tree, and returns it immediately and in subsequent look ups for an implicit `Cached[T]`. Thus,
* subsequent look ups do not trigger looking for an implicit `T`, only returning the instance kept in
* cache.
*
* Beware that if the contexts in which two subsequent look ups are different, so that looking for a
* `T` in each of them doesn't return the same result, this change would be ignored by caching. Looking
* for a `Cached[T]` in the first context would put the implicit `T` of this context in cache, and then
* looking for a `Cached[T]` in the second context would return the former instance from the first
* context. E.g.
*
* {{{
* trait TC[T] {
* def msg: String
* }
*
* object First {
* implicit val tc: TC[Int] = new TC[Int] {
* val msg = "first"
* }
*
* def print() = println(implicitly[TC[Int]].msg)
* def printCached() = println(cached[TC[Int]].msg)
* }
*
* object Second {
* implicit val tc: TC[Int] = new TC[Int] {
* val msg = "second"
* }
*
* def print() = println(implicitly[TC[Int]].msg)
* def printCached() = println(cached[TC[Int]].msg)
* }
*
* First.print()
* Second.print()
* First.printCached()
* Second.printCached()
* }}}
*
* would print "first" then "second" (non cached `TC[Int]` instances), then "first" twice (first instance, returned
* the second time too through the cache).
*
* @author Alexandre Archambault
*/
case class Cached[+T](value: T) extends AnyVal
object Cached {
implicit def materialize[I]: Cached[I] = macro CachedMacros.materializeCached[I]
def implicitly[T](implicit cached: Cached[T]): T = cached.value
}
object CachedMacros {
var deriving = false
var cache = List.empty[(Any, Any)]
}
@macrocompat.bundle
class CachedMacros(override val c: whitebox.Context) extends LazyMacros(c) with OpenImplicitMacros {
import c.universe._
def deepCopyTree(t: Tree): Tree = {
val treeDuplicator = new Transformer {
// by default Transformers don’t copy trees which haven’t been modified,
// so we need to use use strictTreeCopier
override val treeCopy =
c.asInstanceOf[reflect.macros.runtime.Context].global.newStrictTreeCopier.asInstanceOf[TreeCopier]
}
treeDuplicator.transform(t)
}
def materializeCached[T: WeakTypeTag]: Tree = {
// Getting the actual type parameter T, using the same trick as Lazy/Strict
val tpe = openImplicitTpeParam.getOrElse(weakTypeOf[T])
val concurrentLazy = !CachedMacros.deriving && LazyMacros.dcRef(this).nonEmpty
// Ensuring we are not caching parts of trees derived during a Lazy/Strict lookup
// (but caching the full tree of a Lazy/Strict is fine), as these can reference values
// (other entries of the Lazy/Strict derivation) that should not be accessible if
// re-using the tree in other contexts, after caching.
if (concurrentLazy)
c.warning(c.enclosingPosition,
s"Cached[$tpe] called from a Lazy/Strict, you might want to consider caching " +
"an implicit earlier, so that the whole Lazy/Strict itself gets cached. Caching " +
"is disabled here."
)
if (CachedMacros.deriving || concurrentLazy) {
// Caching only the first (root) Cached, not subsequent ones as here
val tree0 = c.inferImplicitValue(tpe)
if (tree0 == EmptyTree)
c.abort(c.enclosingPosition, s"Implicit $tpe not found")
q"_root_.shapeless.Cached($tree0)"
} else {
CachedMacros.deriving = true
try {
val treeOpt = CachedMacros.cache.asInstanceOf[List[(Type, Tree)]].collectFirst {
case (eTpe, eTree) if eTpe =:= tpe => eTree
}
deepCopyTree(treeOpt.getOrElse {
// Cached instances are derived like Lazy or Strict instances.
// Trying to derive them in a standalone way raised
// https://github.com/fommil/spray-json-shapeless/issues/14.
val tree0 = mkImpl[T](
(tree, actualType) => q"_root_.shapeless.Cached[$actualType]($tree)",
q"null.asInstanceOf[_root_.shapeless.Cached[_root_.scala.Nothing]]"
)
val tree = c.untypecheck(tree0)
CachedMacros.cache = (tpe -> tree) :: CachedMacros.cache
tree
})
} finally {
CachedMacros.deriving = false
}
}
}
}
| rorygraves/perf_tester | corpus/shapeless/src/main/scala/shapeless/cached.scala | Scala | apache-2.0 | 4,600 |
package foo
trait Foo { def g(x: Any): Any }
inline given f[T <: Foo]: T = ??? match {
case x: T => x.g(10) // error
}
@main def Test = f
| lampepfl/dotty | tests/neg/i7294-b.scala | Scala | apache-2.0 | 143 |
package im.actor.server.push
import scala.concurrent.ExecutionContext
import akka.actor.ActorSystem
import com.relayrides.pushy.apns.util.{ ApnsPayloadBuilder, SimpleApnsPushNotification }
import slick.driver.PostgresDriver.api._
import im.actor.api.rpc.peers.{ Peer, PeerType }
import im.actor.server.{ models, persist }
private[push] class ApplePusher(pushManager: ApplePushManager, db: Database)(implicit system: ActorSystem) extends VendorPush {
private implicit val ec: ExecutionContext = system.dispatcher
def deliverApplePush(creds: models.push.ApplePushCredentials, authId: Long, seq: Int, textOpt: Option[String], originPeerOpt: Option[Peer]): Unit = {
val paramBase = "category.mobile.notification"
system.log.debug("Delivering apple push, authId: {}, seq: {}, text: {}, originPeer: {}", authId, seq, textOpt, originPeerOpt)
val builder = new ApnsPayloadBuilder
val action = (textOpt, originPeerOpt) match {
case (Some(text), Some(originPeer)) ⇒
persist.AuthId.findUserId(authId) flatMap {
case Some(userId) ⇒
val peerStr = originPeer.`type` match {
case PeerType.Private ⇒ s"PRIVATE_${originPeer.id}"
case PeerType.Group ⇒ s"GROUP_${originPeer.id}"
}
system.log.debug(s"Loading params ${paramBase}")
persist.configs.Parameter.findValue(userId, s"${paramBase}.chat.${peerStr}.enabled") flatMap {
case Some("false") ⇒
system.log.debug("Notifications disabled")
DBIO.successful(builder)
case _ ⇒
system.log.debug("Notifications enabled")
for {
soundEnabled ← persist.configs.Parameter.findValue(userId, s"${paramBase}.sound.enabled") map (_.getOrElse("true"))
vibrationEnabled ← persist.configs.Parameter.findValue(userId, s"${paramBase}.vibration.enabled") map (_.getOrElse("true"))
showText ← getShowText(userId, paramBase)
} yield {
if (soundEnabled == "true") {
system.log.debug("Sound enabled")
builder.setSoundFileName("iapetus.caf")
} else if (vibrationEnabled == "true") {
system.log.debug("Sound disabled, vibration enabled")
builder.setSoundFileName("silence.caf")
}
if (showText) {
system.log.debug("Text enabled")
builder.setAlertBody(text)
}
builder
}
}
case None ⇒ DBIO.successful(builder) // TODO: fail?
}
case (Some(text), None) ⇒
builder.setAlertBody(text)
DBIO.successful(builder)
case _ ⇒ DBIO.successful(builder)
}
db.run(action) foreach { b ⇒
builder.addCustomProperty("seq", seq)
builder.setContentAvailable(true)
val payload = builder.buildWithDefaultMaximumLength()
pushManager.getInstance(creds.apnsKey) foreach { mgr ⇒
mgr.getQueue.put(new SimpleApnsPushNotification(creds.token, payload))
}
}
}
} | suxinde2009/actor-platform | actor-server/actor-push/src/main/scala/im/actor/server/push/ApplePusher.scala | Scala | mit | 3,213 |
object IntelliJBug {
def trav[T[_] <: Traversable[_], A](t : T[A]): T[A] = exit()
val m : Map[Int, Int] = null
/*start*/trav(m)/*end*/
}
//Iterable[(Int, Int)] | LPTK/intellij-scala | testdata/typeInference/bugs5/SCL4150D.scala | Scala | apache-2.0 | 166 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.iglu.client
package repositories
// Java
import java.io.FileNotFoundException
import java.net.{
URL,
UnknownHostException,
MalformedURLException
}
// Apache Commons
import org.apache.commons.lang3.exception.ExceptionUtils
// Jackson
import com.fasterxml.jackson.core.JsonParseException
import com.fasterxml.jackson.databind.JsonNode
// Json Schema
import com.github.fge.jackson.JsonNodeReader
// Scala
import scala.util.control.NonFatal
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.jackson.JsonMethods._
// This project
import validation.ProcessingMessageMethods
import ProcessingMessageMethods._
import utils.{ValidationExceptions => VE}
/**
* Helpers for constructing an HttpRepository.
* See below for the definition.
*/
object HttpRepositoryRef {
implicit val formats = DefaultFormats
private lazy val reader = new JsonNodeReader()
/**
* Helper class to extract HTTP URI and api key from config JSON
*/
private[client] case class HttpConnection(uri: String, apikey: Option[String])
/**
* Read a JsonNode from an URL using optional apikey
* This method is just a copy of [[com.github.fge.jackson.JsonLoader.fromURL]]
* with added optional header, so it is unsafe as well and throws same exceptions
*
* @param url the URL to fetch the JSON document from
* @param apikey optional apikey UUID to aunthenticate in Iglu HTTP repo
* @return The document at that URL
*/
private def getFromUrl(url: URL, apikey: Option[String]): JsonNode = {
val connection = url.openConnection()
apikey match {
case Some(key) => connection.setRequestProperty("apikey", key)
case None => ()
}
reader.fromInputStream(connection.getInputStream)
}
/**
* Sniffs a config JSON to determine if this is
* an HTTP-based repository ref or not.
*
* @param config The configuration JSON to sniff
* @return true if this is the configuration for
* an HttpRepositoryRef, else false
*/
def isHttp(config: JValue): Boolean =
(config \\ "connection" \\ "http").toSome.isDefined
/**
* Constructs an EmbeddedRepositoryRef
* from a JsonNode.
*
* @param config The JSON containing the configuration
* for this repository reference
* @return a configured reference to this embedded
* repository
*/
def parse(config: JsonNode): ValidatedNel[HttpRepositoryRef] =
parse(fromJsonNode(config))
/**
* Constructs an EmbeddedRepositoryRef
* from a JValue.
*
* @param config The JSON containing the configuration
* for this repository reference
* @return a configured reference to this embedded
* repository
*/
def parse(config: JValue): ValidatedNel[HttpRepositoryRef] = {
val conf = RepositoryRefConfig.parse(config)
val http = extractUrl(config)
(conf |@| http.toValidationNel) { (c, h) => HttpRepositoryRef(c, h.uri, h.apikey) }
}
/**
* Returns the path to this embedded repository.
*
* @param config The JSON containing the configuration
* for this repository reference
* @return the path to the embedded repository on
* Success, or an error String on Failure
*/
private def extractUrl(config: JValue): Validated[HttpConnection] =
try {
(config \\ "connection" \\ "http").extract[HttpConnection].success
} catch {
case me: MappingException => s"Could not extract connection.http from ${compact(render(config))}".fail.toProcessingMessage
}
/**
* A wrapper around Java's URL.
*
* Exceptions thrown by
* URI.create():
* 1. NullPointerException
* if uri is null
* 2. IllegalArgumentException
* if uri violates RFC 2396
*
* @param url The String to
* convert to a URL
* @return a URLobject, or an
* error message, all
* wrapped in a Validation
*/
private def stringToUrl(url: String): Validated[URL] =
(try {
(new URL(url)).success
} catch {
case npe: NullPointerException => "Provided URL was null".fail
case mue: MalformedURLException => "Provided URL string [%s] is malformed: [%s]".format(url, mue.getMessage).fail
case iae: IllegalArgumentException => "Provided URL string [%s] violates RFC 2396: [%s]".format(url, ExceptionUtils.getRootCause(iae).getMessage).fail
case e: Throwable => "Unexpected error creating URL from string [%s]: [%s]".format(url, e.getMessage).fail
}).toProcessingMessage
}
/**
* An HTTP repository is one which is accessible over
* HTTP.
*/
case class HttpRepositoryRef(
override val config: RepositoryRefConfig,
uri: String, apikey: Option[String] = None) extends RepositoryRef {
/**
* De-prioritize searching this class of repository because
* it is high cost.
*/
override val classPriority: Int = 100
/**
* Human-readable descriptor for this
* type of repository ref.
*/
val descriptor = "HTTP"
/**
* Retrieves an IgluSchema from the Iglu Repo as
* a JsonNode.
*
* @param schemaKey The SchemaKey uniquely identifies
* the schema in Iglu
* @return a Validation boxing either the Schema's
* JsonNode on Success, or an error String
* on Failure
*/
// TODO: this is only intermittently working when there is a network outage (e.g. running test suite on Tube)
def lookupSchema(schemaKey: SchemaKey): Validated[Option[JsonNode]] = {
try {
for {
url <- HttpRepositoryRef.stringToUrl(s"$uri/schemas/${schemaKey.toPath}")
sch = HttpRepositoryRef.getFromUrl(url, apikey).some
} yield sch
} catch {
// The most common failure case: the schema is not found in the repo
case fnf: FileNotFoundException => None.success
case jpe: JsonParseException =>
s"Problem parsing ${schemaKey} as JSON in ${descriptor} Iglu repository ${config.name}: %s".format(VE.stripInstanceEtc(jpe.getMessage)).fail.toProcessingMessage
case uhe: UnknownHostException =>
s"Unknown host issue fetching ${schemaKey} in ${descriptor} Iglu repository ${config.name}: ${uhe.getMessage}".fail.toProcessingMessage
case NonFatal(nfe) =>
s"Unexpected exception fetching $schemaKey in ${descriptor} Iglu repository ${config.name}: $nfe".fail.toProcessingMessage
}
}
}
| jramos/iglu-scala-client | src/main/scala/com.snowplowanalytics.iglu/client/repositories/HttpRepositoryRef.scala | Scala | apache-2.0 | 7,099 |
package com.ovoenergy.comms.composer
import com.ovoenergy.comms.model.{Arbitraries => CoreArbitraries}
import org.scalacheck.rng.Seed
import org.scalacheck.{Arbitrary, Gen}
import org.scalacheck.Arbitrary._
import org.scalacheck.Gen._
import org.http4s.Uri
import model._
object Arbitraries extends CoreArbitraries {
implicit val arbHttp4sUri: Arbitrary[org.http4s.Uri] = Arbitrary(
for {
uuid <- Gen.uuid
} yield Uri.unsafeFromString(s"/${uuid.toString}")
)
implicit val arbTemplateFragmentId: Arbitrary[TemplateFragmentId] = Arbitrary(
for {
string <- genNonEmptyString
} yield TemplateFragmentId(string)
)
// TODO: Maybe we need to generate a valid template here ?
implicit val arbTemplateFragment: Arbitrary[TemplateFragment] = Arbitrary(
for {
string <- genNonEmptyString
} yield TemplateFragment(string)
)
implicit val arbRenderedFragment: Arbitrary[RenderedFragment] = Arbitrary(
for {
string <- genNonEmptyString
} yield RenderedFragment(string)
)
implicit val arbRenderedPdfFragment: Arbitrary[RenderedPdfFragment] = Arbitrary(
for {
bytes <- arbitrary[Array[Byte]]
} yield RenderedPdfFragment(bytes)
)
implicit val arbTemplateFragmentType: Arbitrary[TemplateFragmentType] = Arbitrary(
oneOf(
TemplateFragmentType.Email.Sender,
TemplateFragmentType.Email.Subject,
TemplateFragmentType.Email.HtmlBody,
TemplateFragmentType.Email.TextBody,
TemplateFragmentType.Sms.Body,
TemplateFragmentType.Print.Body,
)
)
def generate[A: Arbitrary]: A = {
implicitly[Arbitrary[A]].arbitrary.apply(Gen.Parameters.default.withSize(3), Seed.random()).get
}
}
| ovotech/comms-composer | src/test/scala/com/ovoenergy/comms/composer/Arbitraries.scala | Scala | mit | 1,707 |
package com.github.diegopacheco.sandbox.scala.twitter.finagle.fun
import com.twitter.finagle.{Httpx, Service}
import com.twitter.finagle.httpx
import com.twitter.util.{Await, Future}
object Client extends App {
val client: Service[httpx.Request, httpx.Response] = Httpx.newService("www.scala-lang.org:80")
val request = httpx.Request(httpx.Method.Get, "/")
request.host = "www.scala-lang.org"
val response: Future[httpx.Response] = client(request)
response.onSuccess { resp: httpx.Response =>
println("GET success: " + resp)
}
Await.ready(response)
} | diegopacheco/scala-playground | twitter-finagle-playground-fun/src/main/scala/com/github/diegopacheco/sandbox/scala/twitter/finagle/fun/Client.scala | Scala | unlicense | 570 |
package org.bitcoins.core.protocol.ln
import org.bitcoins.core.crypto.ECPrivateKey
import org.bitcoins.testkit.core.gen.CryptoGenerators
import org.bitcoins.testkit.core.gen.ln.LnInvoiceGen
import org.bitcoins.testkit.util.BitcoinSUnitTest
class LnInvoiceSignatureTest extends BitcoinSUnitTest {
implicit override val generatorDrivenConfig: PropertyCheckConfiguration =
generatorDrivenConfigNewCode
behavior of "LnInvoiceSignature"
it must "have serialization symmetry for raw r,s,recovId" in {
forAll(CryptoGenerators.digitalSignature, LnInvoiceGen.signatureVersion) {
case (ecSig, recovId) =>
val lnSig = LnInvoiceSignature.fromRS(r = ecSig.r.bigInteger,
s = ecSig.s.bigInteger,
recovId = recovId)
val serialized = lnSig.hex
val deserialized = LnInvoiceSignature.fromHex(serialized)
assert(deserialized.signature.r == ecSig.r)
assert(deserialized.signature.s == ecSig.s)
assert(deserialized.recoverId == recovId)
}
}
it must "have serialization symmetry" in {
forAll(LnInvoiceGen.lnInvoiceSignature) {
case sig =>
assert(LnInvoiceSignature.fromHex(sig.hex) == sig)
}
}
it must "be able to generate signatures, and then verify those signatures" in {
val gen = LnInvoiceGen
forAll(gen.lnHrp, gen.taggedFields(None), gen.invoiceTimestamp) {
case (hrp, tags, timestamp) =>
val key = ECPrivateKey.freshPrivateKey
val signature = LnInvoice.buildLnInvoiceSignature(
hrp = hrp,
timestamp = timestamp,
lnTags = tags,
privateKey = key
)
val hash = LnInvoice.buildSigHashData(
hrp = hrp,
timestamp = timestamp,
lnTags = tags
)
assert(key.publicKey.verify(hash, signature.signature))
}
}
}
| bitcoin-s/bitcoin-s-core | core-test/src/test/scala/org/bitcoins/core/protocol/ln/LnInvoiceSignatureTest.scala | Scala | mit | 1,925 |
package edu.rice.habanero.benchmarks.banking
import edu.rice.habanero.actors.{JumiActor, JumiActorState, JumiPool}
import edu.rice.habanero.benchmarks.banking.BankingConfig._
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner, PseudoRandom}
import scala.collection.mutable.ListBuffer
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object BankingJumiManualStashActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new BankingJumiManualStashActorBenchmark)
}
private final class BankingJumiManualStashActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
BankingConfig.parseArgs(args)
}
def printArgInfo() {
BankingConfig.printArgs()
}
def runIteration() {
val master = new Teller(BankingConfig.A, BankingConfig.N)
master.start()
master.send(StartMessage.ONLY)
JumiActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
if (lastIteration) {
JumiPool.shutdown()
}
}
}
protected class Teller(numAccounts: Int, numBankings: Int) extends JumiActor[AnyRef] {
private val self = this
private val accounts = Array.tabulate[Account](numAccounts)((i) => {
new Account(i, BankingConfig.INITIAL_BALANCE)
})
private var numCompletedBankings = 0
private val randomGen = new PseudoRandom(123456)
protected override def onPostStart() {
accounts.foreach(loopAccount => loopAccount.start())
}
override def process(theMsg: AnyRef) {
theMsg match {
case sm: BankingConfig.StartMessage =>
var m = 0
while (m < numBankings) {
generateWork()
m += 1
}
case sm: BankingConfig.ReplyMessage =>
numCompletedBankings += 1
if (numCompletedBankings == numBankings) {
accounts.foreach(loopAccount => loopAccount.send(StopMessage.ONLY))
exit()
}
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
}
def generateWork(): Unit = {
// src is lower than dest id to ensure there is never a deadlock
val srcAccountId = randomGen.nextInt((accounts.length / 10) * 8)
var loopId = randomGen.nextInt(accounts.length - srcAccountId)
if (loopId == 0) {
loopId += 1
}
val destAccountId = srcAccountId + loopId
val srcAccount = accounts(srcAccountId)
val destAccount = accounts(destAccountId)
val amount = Math.abs(randomGen.nextDouble()) * 1000
val sender = self
val cm = new CreditMessage(sender, amount, destAccount)
srcAccount.send(cm)
}
}
protected class Account(id: Int, var balance: Double) extends JumiActor[AnyRef] {
private var inReplyMode = false
private var replyTeller: JumiActor[AnyRef] = null
private val stashedMessages = new ListBuffer[AnyRef]()
override def process(theMsg: AnyRef) {
if (inReplyMode) {
theMsg match {
case _: ReplyMessage =>
inReplyMode = false
replyTeller.send(ReplyMessage.ONLY)
if (!stashedMessages.isEmpty) {
val newMsg = stashedMessages.remove(0)
this.send(newMsg)
}
case message =>
stashedMessages.append(message)
}
} else {
// process the message
theMsg match {
case dm: DebitMessage =>
balance += dm.amount
val creditor = dm.sender.asInstanceOf[JumiActor[AnyRef]]
creditor.send(ReplyMessage.ONLY)
case cm: CreditMessage =>
balance -= cm.amount
replyTeller = cm.sender.asInstanceOf[JumiActor[AnyRef]]
val sender = this
val destAccount = cm.recipient.asInstanceOf[Account]
destAccount.send(new DebitMessage(sender, cm.amount))
inReplyMode = true
case _: StopMessage =>
exit()
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
// recycle stashed messages
if (!inReplyMode && !stashedMessages.isEmpty) {
val newMsg = stashedMessages.remove(0)
this.send(newMsg)
}
}
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/banking/BankingJumiManualStashActorBenchmark.scala | Scala | gpl-2.0 | 4,526 |
package TAPLcomp2.fullsub
import scala.text.Document
import scala.text.Document._
// outer means that the term is the top-level term
object FullSubPrinter {
import TAPLcomp2.Print._
def ptyType(outer: Boolean, ty: Ty): Document = ty match {
case ty => ptyArrowType(outer, ty)
}
def ptyArrowType(outer: Boolean, tyT: Ty): Document = tyT match {
case TyArr(tyT1, tyT2) =>
g2(ptyAType(false, tyT1) :: " ->" :/: ptyArrowType(outer, tyT2))
case tyT =>
ptyAType(outer, tyT)
}
def ptyAType(outer: Boolean, tyT: Ty): Document = tyT match {
case TyVar(x) =>
x
case TyBool =>
"Bool"
case TyString =>
"String"
case TyUnit =>
"Unit"
case TyTop =>
"Top"
case TyRecord(fields) =>
def pf(i: Int, li: String, tyTi: Ty): Document =
if (i.toString() == li) {
ptyType(false, tyTi)
} else {
g0(li :: ":" :/: ptyType(false, tyTi))
}
g2("{" :: fields.zipWithIndex.map { case ((li, tyTi), i) => pf(i + 1, li, tyTi) }.reduceLeftOption(_ :: "," :/:
_).getOrElse(empty) :: "}")
case TyNat =>
"Nat"
case TyFloat =>
"Float"
case tyT =>
"(" :: ptyType(outer, tyT) :: ")"
}
def ptyTy(ty: Ty) = ptyType(true, ty)
def ptmTerm(outer: Boolean, t: Term): Document = t match {
case TmIf(t1, t2, t3) =>
val ifB = g2("if" :/: ptmTerm(outer, t1))
val thenB = g2("then" :/: ptmTerm(outer, t2))
val elseB = g2("else" :/: ptmTerm(outer, t3))
g0(ifB :/: thenB :/: elseB)
case TmAbs(x, tyT1, t2) =>
val abs = g0("lambda" :/: x :: ":" :/: ptyType(false, tyT1) :: ".")
val body = ptmTerm(outer, t2)
g2(abs :/: body)
case TmLet(x, t1, t2) =>
g0("let " :: x :: " = " :: ptmTerm(false, t1) :/: "in" :/: ptmTerm(false, t2))
case TmFix(t1) =>
g2("fix " :: ptmTerm(false, t1))
case t => ptmAppTerm(outer, t)
}
def ptmAppTerm(outer: Boolean, t: Term): Document = t match {
case TmApp(t1, t2) =>
g2(ptmAppTerm(false, t1) :/: ptmATerm(false, t2))
case TmPred(t1) =>
"pred " :: ptmATerm(false, t1)
case TmIsZero(t1) =>
"iszero " :: ptmATerm(false, t1)
case t =>
ptmPathTerm(outer, t)
}
def ptmPathTerm(outer: Boolean, t: Term): Document = t match {
case TmProj(t1, l) =>
ptmATerm(false, t1) :: "." :: l
case t1 =>
ptmAscribeTerm(outer, t1)
}
def ptmAscribeTerm(outer: Boolean, t: Term): Document = t match {
case TmAscribe(t1, tyT1) =>
g0(ptmAppTerm(false, t1) :/: "as " :: ptyType(false, tyT1))
case t1 =>
ptmATerm(outer, t1)
}
def ptmATerm(outer: Boolean, t: Term): Document = t match {
case TmInert(tyT) =>
"inert[" :: ptyType(false, tyT) :: "]"
case TmTrue =>
"true"
case TmFalse =>
"false"
case TmVar(x) =>
x
case TmString(s) =>
"\"" :: s :: "\""
case TmUnit =>
"unit"
case TmRecord(fields) =>
def pf(i: Int, li: String, t: Term): Document =
if (i.toString() == li) {
ptmTerm(false, t)
} else {
li :: "=" :: ptmTerm(false, t)
}
"{" :: fields.zipWithIndex.map { case ((li, tyTi), i) => pf(i + 1, li, tyTi) }.
reduceLeftOption(_ :: "," :/: _).getOrElse(empty) :: "}"
case TmZero =>
"0"
case TmSucc(t1) =>
def pf(i: Int, t: Term): Document = t match {
case TmZero =>
i.toString()
case TmSucc(s) =>
pf(i + 1, s)
case _ =>
"(succ " :: ptmATerm(false, t1) :: ")"
}
pf(1, t1)
case t =>
"(" :: ptmTerm(outer, t) :: ")"
}
def ptm(t: Term) = ptmTerm(true, t)
} | hy-zhang/parser | Scala/Parser/src/TAPLcomp2/fullsub/syntax.scala | Scala | bsd-3-clause | 3,703 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.connector.datasource
import quasar.api.resource.ResourcePath
import quasar.connector.{Datasource, MonadResourceErr}
import quasar.qscript.InterpretedRead
/** A Datasource capable of returning the contents of resources. */
abstract class LightweightDatasource[F[_]: MonadResourceErr, G[_], R]
extends Datasource[F, G, InterpretedRead[ResourcePath], R]
| slamdata/slamengine | connector/src/main/scala/quasar/connector/datasource/LightweightDatasource.scala | Scala | apache-2.0 | 977 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.storage
import javax.servlet.http.HttpServletRequest
import scala.xml.Node
import org.apache.spark.storage._
import org.apache.spark.ui.{UIUtils, WebUIPage}
import org.apache.spark.util.Utils
/** Page showing list of RDD's currently stored in the cluster */
private[ui] class StoragePage(parent: StorageTab) extends WebUIPage("") {
private val listener = parent.listener
def render(request: HttpServletRequest): Seq[Node] = {
val content = rddTable(listener.rddInfoList) ++
receiverBlockTables(listener.allExecutorStreamBlockStatus.sortBy(_.executorId))
UIUtils.headerSparkPage("Storage", content, parent)
}
private[storage] def rddTable(rdds: Seq[RDDInfo]): Seq[Node] = {
if (rdds.isEmpty) {
// Don't show the rdd table if there is no RDD persisted.
Nil
} else {
<div>
<h4>RDDs</h4>
{UIUtils.listingTable(rddHeader, rddRow, rdds, id = Some("storage-by-rdd-table"))}
</div>
}
}
/** Header fields for the RDD table */
private val rddHeader = Seq(
"RDD Name",
"Storage Level",
"Cached Partitions",
"Fraction Cached",
"Size in Memory",
"Size in ExternalBlockStore",
"Size on Disk")
/** Render an HTML row representing an RDD */
private def rddRow(rdd: RDDInfo): Seq[Node] = {
// scalastyle:off
<tr>
<td>
<a href={"%s/storage/rdd?id=%s".format(UIUtils.prependBaseUri(parent.basePath), rdd.id)}>
{rdd.name}
</a>
</td>
<td>{rdd.storageLevel.description}
</td>
<td>{rdd.numCachedPartitions.toString}</td>
<td>{"%.0f%%".format(rdd.numCachedPartitions * 100.0 / rdd.numPartitions)}</td>
<td sorttable_customkey={rdd.memSize.toString}>{Utils.bytesToString(rdd.memSize)}</td>
<td sorttable_customkey={rdd.externalBlockStoreSize.toString}>{Utils.bytesToString(rdd.externalBlockStoreSize)}</td>
<td sorttable_customkey={rdd.diskSize.toString} >{Utils.bytesToString(rdd.diskSize)}</td>
</tr>
// scalastyle:on
}
private[storage] def receiverBlockTables(statuses: Seq[ExecutorStreamBlockStatus]): Seq[Node] = {
if (statuses.map(_.numStreamBlocks).sum == 0) {
// Don't show the tables if there is no stream block
Nil
} else {
val blocks = statuses.flatMap(_.blocks).groupBy(_.blockId).toSeq.sortBy(_._1.toString)
<div>
<h4>Receiver Blocks</h4>
{executorMetricsTable(statuses)}
{streamBlockTable(blocks)}
</div>
}
}
private def executorMetricsTable(statuses: Seq[ExecutorStreamBlockStatus]): Seq[Node] = {
<div>
<h5>Aggregated Block Metrics by Executor</h5>
{UIUtils.listingTable(executorMetricsTableHeader, executorMetricsTableRow, statuses,
id = Some("storage-by-executor-stream-blocks"))}
</div>
}
private val executorMetricsTableHeader = Seq(
"Executor ID",
"Address",
"Total Size in Memory",
"Total Size in ExternalBlockStore",
"Total Size on Disk",
"Stream Blocks")
private def executorMetricsTableRow(status: ExecutorStreamBlockStatus): Seq[Node] = {
<tr>
<td>
{status.executorId}
</td>
<td>
{status.location}
</td>
<td sorttable_customkey={status.totalMemSize.toString}>
{Utils.bytesToString(status.totalMemSize)}
</td>
<td sorttable_customkey={status.totalExternalBlockStoreSize.toString}>
{Utils.bytesToString(status.totalExternalBlockStoreSize)}
</td>
<td sorttable_customkey={status.totalDiskSize.toString}>
{Utils.bytesToString(status.totalDiskSize)}
</td>
<td>
{status.numStreamBlocks.toString}
</td>
</tr>
}
private def streamBlockTable(blocks: Seq[(BlockId, Seq[BlockUIData])]): Seq[Node] = {
if (blocks.isEmpty) {
Nil
} else {
<div>
<h5>Blocks</h5>
{UIUtils.listingTable(
streamBlockTableHeader,
streamBlockTableRow,
blocks,
id = Some("storage-by-block-table"),
sortable = false)}
</div>
}
}
private val streamBlockTableHeader = Seq(
"Block ID",
"Replication Level",
"Location",
"Storage Level",
"Size")
/** Render a stream block */
private def streamBlockTableRow(block: (BlockId, Seq[BlockUIData])): Seq[Node] = {
val replications = block._2
assert(replications.size > 0) // This must be true because it's the result of "groupBy"
if (replications.size == 1) {
streamBlockTableSubrow(block._1, replications.head, replications.size, true)
} else {
streamBlockTableSubrow(block._1, replications.head, replications.size, true) ++
replications.tail.map(streamBlockTableSubrow(block._1, _, replications.size, false)).flatten
}
}
private def streamBlockTableSubrow(
blockId: BlockId, block: BlockUIData, replication: Int, firstSubrow: Boolean): Seq[Node] = {
val (storageLevel, size) = streamBlockStorageLevelDescriptionAndSize(block)
<tr>
{
if (firstSubrow) {
<td rowspan={replication.toString}>
{block.blockId.toString}
</td>
<td rowspan={replication.toString}>
{replication.toString}
</td>
}
}
<td>{block.location}</td>
<td>{storageLevel}</td>
<td>{Utils.bytesToString(size)}</td>
</tr>
}
private[storage] def streamBlockStorageLevelDescriptionAndSize(
block: BlockUIData): (String, Long) = {
if (block.storageLevel.useDisk) {
("Disk", block.diskSize)
} else if (block.storageLevel.useMemory && block.storageLevel.deserialized) {
("Memory", block.memSize)
} else if (block.storageLevel.useMemory && !block.storageLevel.deserialized) {
("Memory Serialized", block.memSize)
} else if (block.storageLevel.useOffHeap) {
("External", block.externalBlockStoreSize)
} else {
throw new IllegalStateException(s"Invalid Storage Level: ${block.storageLevel}")
}
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/main/scala/org/apache/spark/ui/storage/StoragePage.scala | Scala | apache-2.0 | 6,836 |
package myspark.mllib
import java.util.Date
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import myspark.mllib.DepthStudy._
import scala.collection.mutable._
class DepthStudy(val sc: SparkContext) extends scala.Serializable {
// /data/party_rel_data/lianxiRen2.txt
val fromToRelRDD = sc.textFile("/data/party_rel_data/input/lxr.txt").map(_.split("\\t"))
/*.filter(a => a.length == 18 && a(2) != "null" && a(5) != "null")
.map(a => (a(2), a(5)))*/
.filter(a => a.length == 7 && a(0) != "" && a(3) != "")
.map(a => (a(0), a(3)))
val fromToRelList = fromToRelRDD.collect().toList
//val fromToRel = List(("A", "a"), ("A", "b"), ("A", "c"), ("a", "m"), ("a", "H"), ("b", "f"), ("U", "K"), ("U", "jsdf"))
def getTempGroupIds = {
var index = 0L
fromToRelList.map(_._1).distinct.map {
t =>
index += 1L
(t, index)
}
}
def getGroupIds = {
val tempGroupIds = getTempGroupIds
var arrayBuffer = ArrayBuffer[(String, Long)]()
tempGroupIds.foreach {
case (key, id) =>
fromToRelList.filter(t => t._1 == key).foreach { t => arrayBuffer.+=((t._2, id)) }
}
arrayBuffer ++= (tempGroupIds)
var list = arrayBuffer.toList.map(t => (t._1, (t._2, t._2)))
val ids = list.map(t => t._2._2).distinct
var needChangedIds = scala.collection.mutable.ArrayBuffer[Long]()
ids.foreach {
id =>
if (list.map(t => t._2._2).distinct.contains(id)) {
do {
val keys = list.filter(t => t._2._2 == id).map(t => t._1).distinct
needChangedIds.clear()
list.foreach { t => if (keys.contains(t._1) && t._2._2 != id) needChangedIds += t._2._1 }
list = list.map(t => if (needChangedIds.contains(t._2._1)) (t._1, (t._2._1, id)) else (t._1, t._2))
} while (needChangedIds.nonEmpty)
}
}
list.map(t => (t._1, t._2._2)).distinct
}
def getRoles = {
var fromUUIDs = fromToRelList.map(_._1).distinct
var toUUIDs = fromToRelList.map(t => t._2).distinct
val bothFromAndToUUIDs = fromUUIDs.intersect(toUUIDs).distinct
fromUUIDs = fromUUIDs.filter(x => !bothFromAndToUUIDs.contains(x))
toUUIDs = toUUIDs.filter(x => !bothFromAndToUUIDs.contains(x))
val fromUUIDsRoles = fromUUIDs.map((_, applyCode))
val toUUIDsRoles = toUUIDs.map((_, relativeCode))
val bothFromAndToUUIDsRoles = bothFromAndToUUIDs.map((_, applyAndRelativeCode))
fromUUIDsRoles.union(toUUIDsRoles).union(bothFromAndToUUIDsRoles)
}
def getGroupIdAndRolesRDD(groupIdList: List[(String, Long)], rolesList: List[(String, String)]) = {
val groupIdRDD = sc.makeRDD(groupIdList)
val rolesListRDD = sc.makeRDD(rolesList)
groupIdRDD.leftOuterJoin(rolesListRDD).map(t => (t._2._1, t._1, t._2._2.getOrElse("null")))
}
def getGroupIdFromUUIDsAndToUUIDsRDD(groupIdAndRolesRDD: RDD[(Long, String, String)]) = {
val fromUUIDsAndGroupIdRDD = groupIdAndRolesRDD.filter(t => t._3 == applyCode || t._3 == applyAndRelativeCode) //List((1,a,申请人和联系人), (1,A,申请人), (4,U,申请人), (1,b,申请人和联系人))
.map(t => (t._2, t._1))
fromUUIDsAndGroupIdRDD.leftOuterJoin(fromToRelRDD).map(t => (t._2._1, t._1, t._2._2.get))
}
}
object DepthStudy {
val applyCode = "申请人"
val relativeCode = "联系人"
val applyAndRelativeCode = "申请人和联系人"
def main(args: Array[String]) {
println("运行!!!!")
val beginTime = System.currentTimeMillis()
val conf = new SparkConf().setAppName("xx").setMaster("local[4]")
val sc = new SparkContext(conf)
val ds = new DepthStudy(sc)
val groupId = ds.getGroupIds
val role = ds.getRoles
val groupIdAndRolesRDD = ds.getGroupIdAndRolesRDD(groupId, role)
val timestamp = new Date().getTime
groupIdAndRolesRDD.map(_.productIterator.mkString("\\t")).coalesce(1).saveAsTextFile(s"/data/party_rel_data/output/result_group/${timestamp}")
groupIdAndRolesRDD.map(t =>(t._1, 1)).reduceByKey(_ + _).sortBy(_._2, false)
.map(_.productIterator.mkString("\\t")).coalesce(1).saveAsTextFile(s"/data/party_rel_data/output/result_sum/${timestamp}")
val groupIdFromUUIDsAndToUUIDsRDD = ds.getGroupIdFromUUIDsAndToUUIDsRDD(groupIdAndRolesRDD)
groupIdFromUUIDsAndToUUIDsRDD.map(_.productIterator.mkString("\\t")).coalesce(1).saveAsTextFile(s"/data/party_rel_data/output/result_rel/${timestamp}")
val endTime = System.currentTimeMillis()
println(s"共耗时:${(endTime - beginTime) / 1000}秒")
sc.stop()
}
}
| luciuschina/ScalaAndSparkStudy | src/main/scala/myspark/mllib/DepthStudy.scala | Scala | mit | 4,564 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark
import java.io.{BufferedWriter, StringWriter}
import org.apache.spark.geomesa.GeoMesaSparkKryoRegistratorEndpoint
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, TaskContext}
import org.locationtech.geomesa.features.serialization.GeoJsonSerializer
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* RDD with a known schema
*
* @param rdd simple feature RDD
* @param sft simple feature type schema
*/
class SpatialRDD(rdd: RDD[SimpleFeature], sft: SimpleFeatureType) extends RDD[SimpleFeature](rdd) with Schema {
GeoMesaSparkKryoRegistrator.register(sft)
private val typeName = sft.getTypeName
private val spec = SimpleFeatureTypes.encodeType(sft, includeUserData = true)
@transient
override lazy val schema: SimpleFeatureType = SimpleFeatureTypes.createType(typeName, spec)
override def compute(split: Partition, context: TaskContext): Iterator[SimpleFeature] =
firstParent.compute(split, context)
override def getPartitions: Array[Partition] = firstParent.partitions
}
object SpatialRDD {
import scala.collection.JavaConverters._
GeoMesaSparkKryoRegistratorEndpoint.init()
def apply(rdd: RDD[SimpleFeature], schema: SimpleFeatureType) = new SpatialRDD(rdd, schema)
implicit def toValueSeq(in: RDD[SimpleFeature] with Schema): RDD[Seq[AnyRef]] =
in.map(_.getAttributes.asScala)
implicit def toKeyValueSeq(in: RDD[SimpleFeature] with Schema): RDD[Seq[(String, AnyRef)]] =
in.map(_.getProperties.asScala.map(p => (p.getName.getLocalPart, p.getValue)).toSeq)
implicit def toKeyValueMap(in: RDD[SimpleFeature] with Schema): RDD[Map[String, AnyRef]] =
in.map(_.getProperties.asScala.map(p => (p.getName.getLocalPart, p.getValue)).toMap)
implicit def toGeoJSONString(in: RDD[SimpleFeature] with Schema): RDD[String] = {
val sft = in.schema
in.mapPartitions { features =>
val json = new GeoJsonSerializer(sft)
val sw = new StringWriter
// note: we don't need to close this since we're writing to a string
val jw = GeoJsonSerializer.writer(sw)
features.map { f =>
sw.getBuffer.setLength(0)
json.write(jw, f)
jw.flush()
sw.toString
}
}
}
implicit class SpatialRDDConversions(in: RDD[SimpleFeature] with Schema) {
def asGeoJSONString: RDD[String] = toGeoJSONString(in)
def asKeyValueMap: RDD[Map[String, AnyRef]] = toKeyValueMap(in)
def asKeyValueSeq: RDD[Seq[(String, AnyRef)]] = toKeyValueSeq(in)
def asValueSeq: RDD[Seq[AnyRef]] = toValueSeq(in)
}
}
| aheyne/geomesa | geomesa-spark/geomesa-spark-core/src/main/scala/org/locationtech/geomesa/spark/SpatialRDD.scala | Scala | apache-2.0 | 3,130 |
package com.ereisman.esurient.hadoop.io
import org.apache.hadoop.mapreduce.{JobContext,RecordReader,InputFormat,InputSplit,TaskAttemptContext}
import org.apache.hadoop.io.NullWritable
import com.ereisman.esurient.EsurientConstants._
/**
* Dummy class who's main purpose is to fake out Hadoop and trick
* it into giving us 'numTasks' # of Mappers to run our ETL code in.
*/
class EsurientInputFormat extends InputFormat[NullWritable, NullWritable] {
override def getSplits(jc: JobContext): java.util.List[InputSplit] = {
val numTasks = jc.getConfiguration.getInt(ES_TASK_COUNT, ES_TASK_COUNT_DEFAULT)
// return a Java list of numTasks worth of dummy InputSplits to fool Hadoop
(1 to numTasks).foldLeft(new java.util.LinkedList[InputSplit]) {
(acc, i) => acc.add(new EsurientInputSplit(i, numTasks)) ; acc
}
}
override def createRecordReader(split: InputSplit, tac: TaskAttemptContext): RecordReader[NullWritable, NullWritable] = {
new EsurientNoOpRecordReader
}
}
| initialcontext/esurient | src/main/scala/com/ereisman/esurient/hadoop/io/EsurientInputFormat.scala | Scala | apache-2.0 | 1,010 |
package example
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/**
* This class implements a ScalaTest test suite for the methods in object
* `Lists` that need to be implemented as part of this assignment. A test
* suite is simply a collection of individual tests for some specific
* component of a program.
*
* A test suite is created by defining a class which extends the type
* `org.scalatest.FunSuite`. When running ScalaTest, it will automatically
* find this class and execute all of its tests.
*
* Adding the `@RunWith` annotation enables the test suite to be executed
* inside eclipse using the built-in JUnit test runner.
*
* You have two options for running this test suite:
*
* - Start the sbt console and run the "test" command
* - Right-click this file in eclipse and chose "Run As" - "JUnit Test"
*/
@RunWith(classOf[JUnitRunner])
class ListsSuite extends FunSuite {
/**
* Tests are written using the `test` operator which takes two arguments:
*
* - A description of the test. This description has to be unique, no two
* tests can have the same description.
* - The test body, a piece of Scala code that implements the test
*
* The most common way to implement a test body is using the method `assert`
* which tests that its argument evaluates to `true`. So one of the simplest
* successful tests is the following:
*/
test("one plus one is two")(assert(1 + 1 == 2))
/**
* In Scala, it is allowed to pass an argument to a method using the block
* syntax, i.e. `{ argument }` instead of parentheses `(argument)`.
*
* This allows tests to be written in a more readable manner:
*/
test("one plus one is three?") {
assert(1 + 1 != 3) // This assertion fails! Go ahead and fix it.
}
/**
* One problem with the previous (failing) test is that ScalaTest will
* only tell you that a test failed, but it will not tell you what was
* the reason for the failure. The output looks like this:
*
* {{{
* [info] - one plus one is three? *** FAILED ***
* }}}
*
* This situation can be improved by using a special equality operator
* `===` instead of `==` (this is only possible in ScalaTest). So if you
* run the next test, ScalaTest will show the following output:
*
* {{{
* [info] - details why one plus one is not three *** FAILED ***
* [info] 2 did not equal 3 (ListsSuite.scala:67)
* }}}
*
* We recommend to always use the `===` equality operator when writing tests.
*/
test("details why one plus one is not three") {
assert(1 + 1 != 3) // Fix me, please!
}
/**
* In order to test the exceptional behavior of a methods, ScalaTest offers
* the `intercept` operation.
*
* In the following example, we test the fact that the method `intNotZero`
* throws an `IllegalArgumentException` if its argument is `0`.
*/
test("intNotZero throws an exception if its argument is 0") {
intercept[IllegalArgumentException] {
intNotZero(0)
}
}
def intNotZero(x: Int): Int = {
if (x == 0) throw new IllegalArgumentException("zero is not allowed")
else x
}
/**
* Now we finally write some tests for the list functions that have to be
* implemented for this assignment. We fist import all members of the
* `List` object.
*/
import Lists._
/**
* We only provide two very basic tests for you. Write more tests to make
* sure your `sum` and `max` methods work as expected.
*
* In particular, write tests for corner cases: negative numbers, zeros,
* empty lists, lists with repeated elements, etc.
*
* It is allowed to have multiple `assert` statements inside one test,
* however it is recommended to write an individual `test` statement for
* every tested aspect of a method.
*/
test("sum of a few numbers") {
assert(sum(List(1,2,0)) === 3)
}
test("sum of an empty list is zero") {
assert(sum(List()) === 0)
}
test("max doesn't work for empty lists") {
intercept[NoSuchElementException] {
max(List())
}
}
test("max of a list with an element is that element") {
assert(max(List(3)) === 3)
}
test("max of a few numbers") {
assert(max(List(3, 7, 2)) === 7)
}
}
| faloi/progfun-scala | example/src/test/scala/example/ListsSuite.scala | Scala | mit | 4,313 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet.util
import org.apache.mxnet.NDArray
import org.scalatest.{BeforeAndAfterAll, FunSuite}
class SerializerUtilsSuite extends FunSuite with BeforeAndAfterAll {
test("serialize & deserialize NDArrays") {
val a = NDArray.zeros(2, 3)
val b = NDArray.ones(3, 1)
val bytes = SerializerUtils.serializeNDArrays(a, b)
val ndArrays = SerializerUtils.deserializeNDArrays(bytes)
assert(ndArrays.size === 2)
assert(ndArrays(0) === a)
assert(ndArrays(1) === b)
}
}
| indhub/mxnet | scala-package/core/src/test/scala/org/apache/mxnet/util/SerializerUtilsSuite.scala | Scala | apache-2.0 | 1,310 |
import scala.actors._
object test extends Actor {
def act(): Unit = {
receive {
case TIMEOUT => Console.println("TIMEOUT")
//case _ => Console.println("_")
}
}
}
| yusuke2255/dotty | tests/pending/pos/t533.scala | Scala | bsd-3-clause | 194 |
/*
* Copyright (c) 2016 dawid.melewski
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package actyxpoweruseralert.services
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import actyxpoweruseralert.model._
import play.api.libs.ws.WSClient
import scala.concurrent.{ ExecutionContext, Future }
trait MachineParkApi {
def machines(): Future[List[MachineEndpoint]]
def machineById(id: MachineId): Future[Option[MachineInfo]]
def machineByEndpoint(url: MachineEndpoint): Future[Option[MachineInfo]]
}
class WsMachineParkApi(wSClient: WSClient, config: Config)(
implicit ex: ExecutionContext)
extends MachineParkApi
with LazyLogging {
import net.ceedubs.ficus.Ficus._
import actyxpoweruseralert.model.JsonProtocol._
private val apiUrl = config.as[String]("actyx.api.url")
override def machines(): Future[List[MachineEndpoint]] = {
wSClient
.url(apiUrl + "/machines")
.withHeaders("Accept" -> "application/json")
.get()
.map(resp => {
resp.json.validate[List[String]].get.map(MachineEndpoint)
})
}
override def machineById(id: MachineId): Future[Option[MachineInfo]] =
machineByUrl(apiUrl + s"/machine/${id.id}")
override def machineByEndpoint(
url: MachineEndpoint): Future[Option[MachineInfo]] =
machineByUrl(url.baseUrl.replace("$API_ROOT", apiUrl))
private def machineByUrl(url: String) =
wSClient
.url(url)
.withHeaders("Accept" -> "application/json")
.get()
.map(resp =>
resp.status match {
case 200 =>
resp.json.validateOpt[MachineInfo].recoverTotal {
case jsonError =>
logger.error(
"Occurred json parsing error " + jsonError.toString)
None
}
case _ =>
logger.warn(
s"Unexpected status when retireving machine info, ${resp.status}, ${resp.body}")
None
})
}
| meloniasty/ActyxPowerUserAlert | src/main/scala/actyxpoweruseralert/services/MachineParkApi.scala | Scala | mit | 3,009 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0, (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.core.db
import java.time.Instant
import com.tle.core.db.tables._
import com.tle.core.db.types.{DbUUID, InstId, String20, String255, UserId}
import fs2.Stream
import io.doolse.simpledba.{WriteOp, WriteQueries}
import io.doolse.simpledba.jdbc.{JDBCColumn, JDBCIO, JDBCSQLConfig}
import org.slf4j.LoggerFactory
case class AuditLogQueries(
insertNew: (Long => AuditLogEntry) => Stream[JDBCIO, AuditLogEntry],
deleteForUser: ((UserId, InstId)) => Stream[JDBCIO, WriteOp],
listForUser: ((UserId, InstId)) => Stream[JDBCIO, AuditLogEntry],
deleteForInst: InstId => Stream[JDBCIO, WriteOp],
deleteBefore: Instant => Stream[JDBCIO, WriteOp],
countForInst: InstId => Stream[JDBCIO, Int],
listForInst: InstId => Stream[JDBCIO, AuditLogEntry]
)
case class ViewCountQueries(
writeItemCounts: WriteQueries[JDBCIO, ItemViewCount],
writeAttachmentCounts: WriteQueries[JDBCIO, AttachmentViewCount],
itemCount: ((InstId, DbUUID, Int)) => Stream[JDBCIO, ItemViewCount],
allItemCount: InstId => Stream[JDBCIO, ItemViewCount],
attachmentCount: (
(InstId, DbUUID, Int, DbUUID)
) => Stream[JDBCIO, AttachmentViewCount],
allAttachmentCount: (
(InstId, DbUUID, Int)
) => Stream[JDBCIO, AttachmentViewCount],
countForCollectionId: Long => Stream[JDBCIO, Int],
attachmentCountForCollectionId: Long => Stream[JDBCIO, Int],
deleteForItemId: ((InstId, DbUUID, Int)) => Stream[JDBCIO, WriteOp]
)
case class SettingsQueries(
write: WriteQueries[JDBCIO, Setting],
query: ((InstId, String)) => Stream[JDBCIO, Setting],
prefixQuery: ((InstId, String)) => Stream[JDBCIO, Setting],
prefixAnyInst: String => Stream[JDBCIO, Setting]
)
case class EntityQueries(
write: WriteQueries[JDBCIO, OEQEntity],
allByType: ((InstId, String20)) => Stream[JDBCIO, OEQEntity],
byId: ((InstId, DbUUID)) => Stream[JDBCIO, OEQEntity],
allByInst: InstId => Stream[JDBCIO, OEQEntity]
)
case class CachedValueQueries(
insertNew: (Long => CachedValue) => Stream[JDBCIO, CachedValue],
writes: WriteQueries[JDBCIO, CachedValue],
getForKey: ((String255, String255, InstId)) => Stream[JDBCIO, CachedValue],
getForValue: ((String255, String, InstId)) => Stream[JDBCIO, CachedValue])
object DBQueries {
val logSQL = LoggerFactory.getLogger("org.hibernate.SQL")
}
trait DBQueries {
type C[A] <: JDBCColumn
def setupLogging(config: JDBCSQLConfig[C]): JDBCSQLConfig[C] =
config.withPrepareLogger(sql => DBQueries.logSQL.debug(sql))
def auditLogQueries: AuditLogQueries
def viewCountQueries: ViewCountQueries
def settingsQueries: SettingsQueries
def entityQueries: EntityQueries
def cachedValueQueries: CachedValueQueries
}
| equella/Equella | Source/Plugins/Core/com.equella.serverbase/scalasrc/com/tle/core/db/DBQueries.scala | Scala | apache-2.0 | 3,547 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import scala.annotation.tailrec
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
/**
* Command-line parser for the [[HistoryServer]].
*/
private[history] class HistoryServerArguments(conf: SparkConf, args: Array[String])
extends Logging {
private var propertiesFile: String = null
parse(args.toList)
@tailrec
private def parse(args: List[String]): Unit = {
args match {
case ("--help" | "-h") :: tail =>
printUsageAndExit(0)
case ("--properties-file") :: value :: tail =>
propertiesFile = value
parse(tail)
case Nil =>
case _ =>
printUsageAndExit(1)
}
}
// This mutates the SparkConf, so all accesses to it must be made after this line
Utils.loadDefaultSparkProperties(conf, propertiesFile)
private def printUsageAndExit(exitCode: Int) {
// scalastyle:off println
System.err.println(
"""
|Usage: HistoryServer [options]
|
|Options:
| --properties-file FILE Path to a custom Spark properties file.
| Default is conf/spark-defaults.conf.
|
|Configuration options can be set by setting the corresponding JVM system property.
|History Server options are always available; additional options depend on the provider.
|
|History Server options:
|
| spark.history.ui.port Port where server will listen for connections
| (default 18080)
| spark.history.acls.enable Whether to enable view acls for all applications
| (default false)
| spark.history.provider Name of history provider class (defaults to
| file system-based provider)
| spark.history.retainedApplications Max number of application UIs to keep loaded in memory
| (default 50)
|FsHistoryProvider options:
|
| spark.history.fs.logDirectory Directory where app logs are stored
| (default: file:/tmp/spark-events)
| spark.history.fs.update.interval How often to reload log data from storage
| (in seconds, default: 10)
|""".stripMargin)
// scalastyle:on println
System.exit(exitCode)
}
}
| pgandhi999/spark | core/src/main/scala/org/apache/spark/deploy/history/HistoryServerArguments.scala | Scala | apache-2.0 | 3,316 |
import scala.tools.partest.ReplTest
object Test extends ReplTest {
override def code = """
|"foobar"
|.size
""".stripMargin.trim
}
| scala/scala | test/files/run/repl-previous-result.scala | Scala | apache-2.0 | 144 |
package com.twitter.finatra.http.tests.conversions
import com.twitter.finatra.http.conversions.optionHttp._
import com.twitter.finatra.http.exceptions._
import com.twitter.inject.Test
import com.twitter.util.{Future, Throw, Try}
class OptionHttpConversionsTest extends Test {
test("Option[T]#valueOrNotFound when Some") {
Some(1).valueOrNotFound("foo") should equal(1)
}
test("Option[T]#valueOrNotFound when None") {
val e = intercept[NotFoundException] {
None.valueOrNotFound("foo")
}
e.errors should equal(Seq("foo"))
}
test("Option[T]#toTryOrServerError when Some") {
Some(1).toTryOrServerError("foo") should equal(Try(1))
}
test("Option[T]#toTryOrServerError when None") {
None.toTryOrServerError("foo") should equal(Throw(InternalServerErrorException("foo")))
}
test("Option[T]#toFutureOrNotFound when Some") {
assertFuture(Some(1).toFutureOrNotFound(), Future(1))
}
test("Option[T]#toFutureOrBadRequest when Some") {
assertFuture(Some(1).toFutureOrBadRequest(), Future(1))
}
test("Option[T]#toFutureOrServiceError when Some") {
assertFuture(Some(1).toFutureOrServerError(), Future(1))
}
test("Option[T]#toFutureOrForbidden when Some") {
assertFuture(Some(1).toFutureOrForbidden(), Future(1))
}
test("Option[T]#toFutureOrNotFound when None") {
assertFailedFuture[NotFoundException](None.toFutureOrNotFound())
}
test("Option[T]#toFutureOrBadRequest when None") {
assertFailedFuture[BadRequestException](None.toFutureOrBadRequest())
}
test("Option[T]#toFutureOrServiceError when None") {
assertFailedFuture[InternalServerErrorException](None.toFutureOrServerError())
}
test("Option[T]#toFutureOrForbidden when None") {
assertFailedFuture[ForbiddenException](None.toFutureOrForbidden())
}
}
| twitter/finatra | http-server/src/test/scala/com/twitter/finatra/http/tests/conversions/OptionHttpConversionsTest.scala | Scala | apache-2.0 | 1,817 |
/*
* Copyright 2000-2014 JetBrains s.r.o.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.scala.lang.macros.evaluator
import com.intellij.openapi.components._
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.Key
import com.intellij.psi.PsiNamedElement
import org.jetbrains.plugins.scala.components.libextensions.LibraryExtensionsManager
import org.jetbrains.plugins.scala.lang.macros.MacroDef
import org.jetbrains.plugins.scala.lang.macros.evaluator.impl._
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.macroAnnotations.Cached
import scala.reflect.ClassTag
/**
* @author Mikhail.Mutcianko
* date 19.12.14
*/
class ScalaMacroEvaluator(project: Project) extends ProjectComponent {
import ScalaMacroEvaluator._
override def getComponentName = "ScalaMacroEvaluator"
@Cached(LibraryExtensionsManager.MOD_TRACKER, null)
private def typingRules: Map[MacroImpl, ScalaMacroTypeable] = loadRules(defaultTypeProviders)
@Cached(LibraryExtensionsManager.MOD_TRACKER, null)
private def expansionRules: Map[MacroImpl, ScalaMacroExpandable] = loadRules(defaultExprProviders)
private def loadRules[T <: ScalaMacroBound](defaults: Seq[T])(implicit tag: ClassTag[T]) : Map[MacroImpl, T] = {
val external = LibraryExtensionsManager.getInstance(project)
.getExtensions(tag.runtimeClass).asInstanceOf[Seq[T]]
defaults.flatMap(p => p.boundMacro.map(_ -> p)).toMap ++
external.flatMap(p => p.boundMacro.map(_ -> p)).toMap
}
def checkMacro(named: PsiNamedElement, context: MacroContext): Option[ScType] = {
macroSupport(named, typingRules).flatMap {
case (m, x) => x.checkMacro(m, context)
}
}
def expandMacro(named: PsiNamedElement, context: MacroInvocationContext): Option[ScExpression] = {
if (isMacroExpansion(context.call)) return None //avoid recursive macro expansions
macroSupport(named, expansionRules).flatMap {
case (m, x) =>
val expanded = x.expandMacro(m, context)
expanded.foreach(markMacroExpansion)
expanded
}
}
private def macroSupport[T](named: PsiNamedElement, map: Map[MacroImpl, T]): Option[(ScFunction, T)] = {
named match {
case MacroDef(m) if !m.isLocal =>
val macroImpl = MacroImpl(m.name, m.containingClass.qualifiedName)
map.get(macroImpl).map((m, _))
case _ => None
}
}
}
object ScalaMacroEvaluator {
def getInstance(project: Project): ScalaMacroEvaluator = ServiceManager.getService(project, classOf[ScalaMacroEvaluator])
private val isMacroExpansionKey: Key[AnyRef] = Key.create("macro.original.expression")
private def isMacroExpansion(expr: ScExpression): Boolean = expr.getUserData(isMacroExpansionKey) != null
private def markMacroExpansion(expr: ScExpression): Unit = expr.putUserData(isMacroExpansionKey, this)
val defaultTypeProviders = Seq(
ShapelessForProduct,
ShapelessMaterializeGeneric,
ShapelessDefaultSymbolicLabelling,
ShapelessMkSelector,
ShapelessWitnessSelectDynamic
)
val defaultExprProviders = Seq(
ShapelessProductArgs
)
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/macros/evaluator/ScalaMacroEvaluator.scala | Scala | apache-2.0 | 3,808 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.ml.params
import ai.h2o.sparkling.H2OFrame
import org.apache.spark.ml.linalg.DenseVector
trait HasInitialBiases extends H2OAlgoParamsBase {
private val initialBiases = new NullableVectorArrayParam(
this,
"initialBiases",
"A array of weight vectors to be used for bias initialization of every network layer." +
"If this parameter is set, the parameter 'initialWeights' has to be set as well.")
setDefault(initialBiases -> null)
def getInitialBiases(): Array[DenseVector] = $(initialBiases)
def setInitialBiases(value: Array[DenseVector]): this.type = set(initialBiases, value)
private[sparkling] def getInitialBiasesParam(trainingFrame: H2OFrame): Map[String, Any] = {
Map("initial_biases" -> convertVectorArrayToH2OFrameKeyArray(getInitialBiases()))
}
override private[sparkling] def getSWtoH2OParamNameMap(): Map[String, String] = {
super.getSWtoH2OParamNameMap() ++ Map("initialBiases" -> "initial_biases")
}
}
| h2oai/sparkling-water | ml/src/main/scala/ai/h2o/sparkling/ml/params/HasInitialBiases.scala | Scala | apache-2.0 | 1,789 |
package com.datastax.spark.connector.util
import java.util.UUID
import scala.util.parsing.combinator.RegexParsers
import org.apache.spark.Logging
object CqlWhereParser extends RegexParsers with Logging {
sealed trait RelationalOperator
case object EqualTo extends RelationalOperator
case object LowerThan extends RelationalOperator
case object LowerEqual extends RelationalOperator
case object GreaterThan extends RelationalOperator
case object GreaterEqual extends RelationalOperator
case object In extends RelationalOperator
sealed trait Value
case object Placeholder extends Value
sealed trait Literal extends Value { def value: Any }
case class StringLiteral(value: String) extends Literal
case class NumberLiteral(value: String) extends Literal
case class BooleanLiteral(value: Boolean) extends Literal
case class UUIDLiteral(value: UUID) extends Literal
case class ValueList(values: Value*)
case class Identifier(name: String)
sealed trait Predicate
sealed trait SingleColumnPredicate extends Predicate {
def columnName: String
}
case class InPredicate(columnName: String) extends SingleColumnPredicate
case class InListPredicate(columnName: String, values: ValueList) extends SingleColumnPredicate
case class EqPredicate(columnName: String, value: Value) extends SingleColumnPredicate
case class RangePredicate(columnName: String, operator: RelationalOperator, value: Value) extends SingleColumnPredicate
case class UnknownPredicate(text: String) extends Predicate
private def unquotedIdentifier = """[_\\p{L}][_\\p{L}\\p{Nd}]*""".r ^^ {
id => Identifier(id.toLowerCase)
}
private def quotedIdentifier = "\\"" ~> "(\\"\\"|[^\\"])*".r <~ "\\"" ^^ {
def unEscapeQuotes(s: String) = s.replace("\\"\\"", "\\"")
id => Identifier(unEscapeQuotes(id.toString))
}
private def identifier = unquotedIdentifier | quotedIdentifier
private def num = """-?\\d+(\\.\\d*)?([eE][-\\+]?\\d+)?""".r ^^ NumberLiteral.apply
private def bool = ("true" | "false") ^^ {
s => BooleanLiteral(s.toBoolean)
}
private def str = "'" ~> """(''|[^'])*""".r <~ "'" ^^ {
def unEscapeQuotes(s: String) = s.replace("''", "'")
s => StringLiteral(unEscapeQuotes(s))
}
private def uuid = """[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}""".r ^^ {
s => UUIDLiteral(UUID.fromString(s))
}
private def placeholder = "?" ^^ (_ => Placeholder)
private def value: Parser[Value] = placeholder | uuid | num | bool | str
private def relationalOperator: Parser[RelationalOperator] =
"<=" ^^ (_ => LowerEqual) |
">=" ^^ (_ => GreaterEqual) |
"<" ^^ (_ => LowerThan) |
">" ^^ (_ => GreaterThan) |
"=" ^^ (_ => EqualTo) |
"(?i)in".r ^^ (_ => In)
private def valueList: Parser[ValueList] = "(" ~> value ~ rep("," ~> value) <~ ")" ^^ {
case literal ~ list => ValueList(literal :: list :_*)
}
private def and = "(?i)and".r ^^ {_ => "and" }
private def predicate: Parser[Predicate] = ((identifier ~ relationalOperator ~ (value | valueList)) | ".*".r) ^^ {
case Identifier(name) ~ In ~ Placeholder => InPredicate(name)
case Identifier(name) ~ In ~ (list : ValueList) => InListPredicate(name, list)
case Identifier(name) ~ EqualTo ~ (v: Value) => EqPredicate(name, v)
case Identifier(name) ~ (op: RelationalOperator) ~ (param: Value) => RangePredicate(name, op, param)
case other => UnknownPredicate(other.toString)
}
private def whereClause = predicate ~ rep(and ~> predicate) ^^ {
case expr ~ list => expr :: list
}
def parse(cqlWhere: String): Seq[Predicate] = {
parseAll(whereClause, cqlWhere) match {
case Success(columns, _) => columns
case x => logError("Parse error when parsing CQL WHERE clause:" + x.toString); List()
}
}
}
| Stratio/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/util/CqlWhereParser.scala | Scala | apache-2.0 | 3,972 |
/*
* StillmanSubsumptionAlgorithmTest.scala
*
*/
package at.logic.gapt.expr
import at.logic.gapt.proofs.HOLSequent
import org.specs2.mutable._
class StillmanSubsumptionAlgorithmFOLTest extends Specification {
"StillmanSubsumptionAlgorithmFOL" should {
val P = "P"
val Q = "Q"
val R = "R"
val f = "f"
val a = FOLConst( "a" )
val b = FOLConst( "b" )
val x = FOLVar( "x" )
val y = FOLVar( "y" )
val z = FOLVar( "z" )
val Px = FOLAtom( P, x :: Nil )
val Py = FOLAtom( P, y :: Nil )
val Pz = FOLAtom( P, z :: Nil )
val fxy = FOLFunction( f, x :: y :: Nil )
val Pfxy = FOLAtom( P, fxy :: Nil )
val Pa = FOLAtom( P, a :: Nil )
val Pb = FOLAtom( P, b :: Nil )
val fba = FOLFunction( f, b :: a :: Nil )
val Pfba = FOLAtom( P, fba :: Nil )
val Pxx = FOLAtom( P, x :: x :: Nil )
val Pxa = FOLAtom( P, x :: a :: Nil )
val Paa = FOLAtom( P, a :: a :: Nil )
val Pxb = FOLAtom( P, x :: b :: Nil )
val Pab = FOLAtom( P, a :: b :: Nil )
val Pba = FOLAtom( P, b :: a :: Nil )
val fx = FOLFunction( f, x :: Nil )
val fa = FOLFunction( f, a :: Nil )
val fb = FOLFunction( f, b :: Nil )
val Pfx = FOLAtom( P, fx :: Nil )
val Pfa = FOLAtom( P, fa :: Nil )
val Pfb = FOLAtom( P, fb :: Nil )
val Qxy = FOLAtom( Q, x :: y :: Nil )
val Qay = FOLAtom( Q, a :: y :: Nil )
val Rx = FOLAtom( R, x :: Nil )
"return true on the following clauses" in {
"P(x) | P(f(x,y)) and P(a) | P(b) | P(f(b,a))" in {
val c1 = HOLSequent( Nil, Px :: Pfxy :: Nil )
val c2 = HOLSequent( Nil, Pa :: Pb :: Pfba :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
"Nil and P(a) | P(b) | P(f(b,a))" in {
val c1 = HOLSequent( Nil, Nil )
val c2 = HOLSequent( Nil, Pa :: Pb :: Pfba :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
"P(x) and P(x) | P(f(x,y))" in {
val c1 = HOLSequent( Nil, Px :: Nil )
val c2 = HOLSequent( Nil, Px :: Pfxy :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
"P(x) and P(x)" in {
val c1 = HOLSequent( Nil, Px :: Nil )
val c2 = HOLSequent( Nil, Px :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
"P(x) and P(y)" in {
val c1 = HOLSequent( Nil, Px :: Nil )
val c2 = HOLSequent( Nil, Py :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
"P(x,x) | P(x,a) and P(a,a)" in {
val c1 = HOLSequent( Nil, Pxx :: Pxa :: Nil )
val c2 = HOLSequent( Nil, Paa :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
"P(x) | Q(x,y) and P(a) | Q(a,y) | R(x)" in {
val c1 = HOLSequent( Nil, Px :: Qxy :: Nil )
val c2 = HOLSequent( Nil, Pa :: Qay :: Rx :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
}
"return false on the following clauses" in {
"P(x) | P(f(x)) and P(f(a)) | P(f(b))" in {
val c1 = HOLSequent( Nil, Px :: Pfx :: Nil )
val c2 = HOLSequent( Nil, Pfa :: Pfb :: Nil )
clauseSubsumption( c1, c2 ) must beNone
}
"P(a,a) and P(x,x) | P(x,a)" in {
val c1 = HOLSequent( Nil, Paa :: Nil )
val c2 = HOLSequent( Nil, Pxx :: Pxa :: Nil )
clauseSubsumption( c1, c2 ) must beNone
}
"P(x,x) | P(x,b) and P(b,a) | P(a,b)" in {
val c1 = HOLSequent( Nil, Pxx :: Pxb :: Nil )
val c2 = HOLSequent( Nil, Pba :: Pab :: Nil )
clauseSubsumption( c1, c2 ) must beNone
}
"P(x) | -P(x) and P(a) | -P(b)" in {
val c1 = HOLSequent( Px :: Nil, Px :: Nil )
val c2 = HOLSequent( Pb :: Nil, Pa :: Nil )
clauseSubsumption( c1, c2 ) must beNone
}
"P(x) | -P(x) and P(y) | -P(z)" in {
val c1 = HOLSequent( Px :: Nil, Px :: Nil )
val c2 = HOLSequent( Pz :: Nil, Py :: Nil )
clauseSubsumption( c1, c2 ) must beNone
}
"P(x) and P(a) | P(y)" in {
val c1 = HOLSequent( Nil, Px :: Nil )
val c2 = HOLSequent( Nil, Pa :: Py :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
}
}
}
class StillmanSubsumptionAlgorithmHOLTest extends Specification {
"StillmanSubsumptionAlgorithmHOL" should {
"return true on the following clauses" in {
val P = Const( "P", Ti -> ( Ti -> To ) )
val P1 = Const( "P", Ti -> To )
val Q = Const( "Q", Ti -> To )
val x = Var( "x", Ti )
val y = Var( "y", Ti )
val z = Var( "z", Ti )
val q = Var( "q", Ti -> To )
val a = Const( "a", Ti )
val b = Const( "b", Ti )
val f = Const( "f", Ti -> ( Ti -> ( Ti -> Ti ) ) )
val f1 = Const( "f", Ti -> Ti )
val f2 = Const( "f", ( Ti -> To ) -> ( Ti -> ( Ti -> Ti ) ) )
val f2qza = HOLFunction( f2, q :: z :: a :: Nil )
val f1b = HOLFunction( f1, b :: Nil )
val fyza = HOLFunction( f, y :: z :: a :: Nil )
val Pxx = Atom( P, x :: x :: Nil )
val Pxa = Atom( P, x :: a :: Nil )
val Paa = Atom( P, a :: a :: Nil )
val Pfyza_fyza = Atom( P, fyza :: fyza :: Nil )
val Qf1b = Atom( Q, f1b :: Nil )
val Pf2qza = Atom( P, f2qza :: f2qza :: Nil )
val Px = Atom( P1, x :: Nil )
val Pa = Atom( P1, a :: Nil )
val Qx = Atom( Q, x :: Nil )
"P(x:i,x:i) | P(x:i,a:i) and P(a:i,a:i)" in {
val c1 = HOLSequent( Nil, Pxx :: Pxa :: Nil )
val c2 = HOLSequent( Nil, Paa :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
"P(x:i,x:i) and P(f(y:i,z:i,a:i):i,f(y:i,z:i,a:i):i)" in {
val c1 = HOLSequent( Nil, Pxx :: Nil )
val c2 = HOLSequent( Nil, Pfyza_fyza :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
"P(x:i,x:i) and P(f(q:(i->o),z:i,a:i):i,f(q:(i->o),z:i,a:i):i) | -Q(f(b:i):(i->i))" in {
val c1 = HOLSequent( Nil, Pxx :: Nil )
val c2 = HOLSequent( Qf1b :: Nil, Pf2qza :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
"P(x:i) and P(a:i) | Q(x:i)" in {
val c1 = HOLSequent( Nil, Px :: Nil )
val c2 = HOLSequent( Nil, Pa :: Qx :: Nil )
clauseSubsumption( c1, c2 ) must beSome
}
}
}
}
| gebner/gapt | tests/src/test/scala/at/logic/gapt/expr/StillmanSubsumptionAlgorithmTest.scala | Scala | gpl-3.0 | 6,192 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.control
import org.orbeon.oxf.xforms.XFormsUtils
import org.orbeon.oxf.xforms.analysis.controls.{LHHA, LHHAAnalysis, StaticLHHASupport}
import org.orbeon.oxf.xforms.control.LHHASupport.LHHAProperty
import org.orbeon.oxf.xforms.control.XFormsControl.MutableControlProperty
import org.orbeon.oxf.xforms.control.controls.XFormsLHHAControl
import org.orbeon.oxf.xml.XMLUtils
import org.orbeon.xforms.XFormsId
class MutableLHHProperty(control: XFormsControl, lhhaType: LHHA, supportsHTML: Boolean)
extends MutableLHHAProperty(control, lhhaType, supportsHTML) {
protected def evaluateValueImpl =
for {
lhh ← control.staticControl.asInstanceOf[StaticLHHASupport].lhh(lhhaType)
value ← evaluateOne(lhh)
} yield
value → lhh.containsHTML
}
class MutableAlertProperty(control: XFormsSingleNodeControl, lhhaType: LHHA, supportsHTML: Boolean)
extends MutableLHHAProperty(control, lhhaType, supportsHTML) {
protected def evaluateValueImpl = {
val activeAlertsOpt = LHHASupport.gatherActiveAlerts(control)
val valuesWithIsHtml =
for {
(_, activeAlerts) ← activeAlertsOpt.toList
activeAlert ← activeAlerts
valueWithIsHTML ← evaluateOne(activeAlert)
} yield
valueWithIsHTML → activeAlert.containsHTML
if (valuesWithIsHtml.size < 2)
valuesWithIsHtml.headOption
else {
// Combine multiple values as a single HTML value using ul/li
val combined = (
valuesWithIsHtml
map { case (value, isHTML) ⇒ if (! isHTML) XMLUtils.escapeXMLMinimal(value) else value }
mkString ("<ul><li>", "</li><li>", "</li></ul>")
)
Some(combined → true)
}
}
}
// Mutable LHHA property
abstract class MutableLHHAProperty(control: XFormsControl, lhhaType: LHHA, supportsHTML: Boolean)
extends MutableControlProperty[String]
with LHHAProperty {
private var _isHTML = false
protected def isRelevant = control.isRelevant
protected def wasRelevant = control.wasRelevant
// TODO: `isHTML` now uses the static `containsHTML` except for multiple alerts. Do this more statically?
protected def evaluateValue() =
evaluateValueImpl match {
case Some((value: String, isHTML)) ⇒
_isHTML = isHTML
value
case _ ⇒
_isHTML = false
null
}
def escapedValue() = {
val rawValue = value()
if (_isHTML)
XFormsControl.getEscapedHTMLValue(control.getLocationData, rawValue)
else
XMLUtils.escapeXMLMinimal(rawValue)
}
def isHTML = {
value()
_isHTML
}
protected override def markDirty(): Unit = {
super.markDirty()
_isHTML = false
}
protected def requireUpdate =
control.containingDocument.getXPathDependencies.requireLHHAUpdate(control.staticControl, lhhaType, control.effectiveId)
protected def notifyCompute() =
control.containingDocument.getXPathDependencies.notifyComputeLHHA()
protected def notifyOptimized() =
control.containingDocument.getXPathDependencies.notifyOptimizeLHHA()
override def copy: MutableLHHAProperty =
super.copy.asInstanceOf[MutableLHHAProperty]
protected def evaluateValueImpl: Option[(String, Boolean)]
// Evaluate the value of a LHHA related to this control
// Can return null
protected def evaluateOne(lhhaAnalysis: LHHAAnalysis): Option[String] = {
val contextStack = control.getContextStack
val lhhaElement = lhhaAnalysis.element
val result =
if (lhhaAnalysis.isLocal) {
// LHHA is direct child of control, evaluate within context
contextStack.setBinding(control.bindingContext)
contextStack.pushBinding(lhhaElement, control.effectiveId, lhhaAnalysis.scope)
val result = Option(
XFormsUtils.getElementValue(
control.lhhaContainer,
contextStack,
control.effectiveId,
lhhaElement,
supportsHTML,
lhhaAnalysis.defaultToHTML,
Array[Boolean](false)
)
)
contextStack.popBinding()
result
} else {
// LHHA is somewhere else. We resolve the control and ask for its value.
control.containingDocument.getControls.resolveObjectByIdOpt(control.effectiveId, lhhaAnalysis.staticId) collect {
case control: XFormsLHHAControl ⇒ control.getValue
}
}
result
}
private def findAncestorContextControl(contextStaticId: String, lhhaStaticId: String): XFormsControl = {
// NOTE: LHHA element must be in the same resolution scope as the current control (since @for refers to @id)
val lhhaScope = control.getResolutionScope
val lhhaPrefixedId = lhhaScope.prefixedIdForStaticId(lhhaStaticId)
// Assume that LHHA element is within same repeat iteration as its related control
val contextPrefixedId = XFormsId.getRelatedEffectiveId(lhhaPrefixedId, contextStaticId)
val contextEffectiveId = contextPrefixedId + XFormsId.getEffectiveIdSuffixWithSeparator(control.effectiveId)
var ancestorObject = control.container.getContainingDocument.getObjectByEffectiveId(contextEffectiveId)
while (ancestorObject.isInstanceOf[XFormsControl]) {
val ancestorControl = ancestorObject.asInstanceOf[XFormsControl]
if (ancestorControl.getResolutionScope == lhhaScope) {
// Found ancestor in right scope
return ancestorControl
}
ancestorObject = ancestorControl.parent
}
null
}
}
| brunobuzzi/orbeon-forms | xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/control/MutableLHHAProperty.scala | Scala | lgpl-2.1 | 6,147 |
package pages
import forms.ApplicationReferenceForm
import helpers.A11ySpec
import views.html.ApplicationReference
class ApplicationReferenceA11ySpec extends A11ySpec {
val view = app.injector.instanceOf[ApplicationReference]
val form = app.injector.instanceOf[ApplicationReferenceForm]
"the Application Reference page" when {
"the page is rendered without errors" must {
"pass all accessibility tests" in {
view(form()).toString must passAccessibilityChecks
}
}
}
}
| hmrc/vat-registration-frontend | a11y/pages/ApplicationReferenceA11ySpec.scala | Scala | apache-2.0 | 509 |
sealed trait Nat[+T]
case class Zero() extends Nat[Nothing]
case class Succ[T]() extends Nat[T]
// +N is incorrect, as in `foo` we can have `N = Zero | Succ[Zero]`,
// then it's correct for exhaustivity check to produce two warnings.
sealed trait Vect[+N <: Nat[_], +T]
case class VN[T]() extends Vect[Zero, T]
case class VC[T, N <: Nat[_]](x: T, xs: Vect[N, T]) extends Vect[Succ[N], T]
object Test {
def foo[N <: Nat[_], A, B](v1: Vect[N, A], v2: Vect[N, B]) = (v1, v2) match {
case (VN(), VN()) => 1
case (VC(x, xs), VC(y, ys)) => 2
}
}
| som-snytt/dotty | tests/patmat/gadt4.scala | Scala | apache-2.0 | 554 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.evaluation
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.ml.param.{ParamMap, Params}
import org.apache.spark.sql.DataFrame
/**
* :: DeveloperApi ::
* Abstract class for evaluators that compute metrics from predictions.
* 评估的抽象类,用于根据预测计算度量
*/
@DeveloperApi
abstract class Evaluator extends Params {
/**
* Evaluates model output and returns a scalar metric (larger is better).
* 计算模型输出并返回标量度量(较大为更好)
* @param dataset a dataset that contains labels/observations and predictions.
* @param paramMap parameter map that specifies the input columns and output metrics
* @return metric
*/
def evaluate(dataset: DataFrame, paramMap: ParamMap): Double = {
this.copy(paramMap).evaluate(dataset)
}
/**
* Evaluates the output.评估输出
* @param dataset a dataset that contains labels/observations and predictions.
* 包含标签/观察和预测的数据集
* @return metric
*/
def evaluate(dataset: DataFrame): Double
/**
* Indicates whether the metric returned by [[evaluate()]] should be maximized (true, default)
* or minimized (false).
* A given evaluator may support multiple metrics which may be maximized or minimized.
*
* 指示[[evaluate()]]返回的度量标准是最大化(true,default)还是最小化(false)
* 给定的求值程序可以支持可以最大化或最小化的多个度量标准。
*/
def isLargerBetter: Boolean = true
override def copy(extra: ParamMap): Evaluator
}
| tophua/spark1.52 | mllib/src/main/scala/org/apache/spark/ml/evaluation/Evaluator.scala | Scala | apache-2.0 | 2,410 |
package com.olvind
object ParseComponent {
val ignoredMembers: Set[String] =
Set(
"render",
"componentDidMount",
"componentWillMount",
"componentWillReceiveProps",
"componentDidUpdate",
"componentWillUnmount",
"shouldComponentUpdate"
)
def apply(
scope: Map[CompName, requiresjs.FoundComponent],
library: Library,
comp: ComponentDef
): ParsedComponent = {
val propTypes: Map[PropName, PropUnparsed] =
scope
.get(comp.name)
.map(_.props)
.getOrElse(
panic(s"No Proptypes found for ${comp.name}")
)
val inheritedProps: Map[PropName, PropUnparsed] =
comp.shared match {
case None => Map.empty
case Some(shared) =>
scope
.get(shared.name)
.map(_.props)
.getOrElse(
panic(s"$comp: No Proptypes found for $shared")
)
}
val methodClassOpt: Option[ParsedMethodClass] =
scope
.get(comp.name)
.flatMap(_.methods)
.map(_.filterNot(m ⇒
ignoredMembers(m.name) || m.name.startsWith("handle") || m.name.startsWith("_")))
.filter(_.nonEmpty)
.map(
members ⇒
ParsedMethodClass(
library.prefixOpt.getOrElse("") + comp.name + "M",
members.toSeq.sortBy(_.name).map(library.memberMapper(comp.name))
))
val basicFields: Seq[ParsedProp] =
Seq(
ParsedProp(PropName("key"), isRequired = false, Normal("String"), None, None, None),
ParsedProp(PropName("ref"),
isRequired = false,
Normal(methodClassOpt.fold("String")(c => c.className + " => Unit")),
None,
None,
None)
)
val parsedProps: Seq[ParsedProp] =
(inheritedProps ++ propTypes)
.filterNot(t => basicFields.exists(_.name == t._1))
.toSeq
.sortBy(p => (p._2.fromComp != comp.name, p._1.clean.value))
.map {
case (propName, PropUnparsed(origComp, tpe, commentOpt)) =>
ParseProp(
library,
comp.name,
origComp,
propName,
tpe,
commentOpt
)
}
val domProps: Seq[ParsedProp] =
comp.domeTypeOpt
.map(domType => domType.handlers ++ domType.props)
.toSeq
.flatten
.filterNot(p ⇒ parsedProps.exists(_.name == p.name))
ParsedComponent(comp, basicFields ++ parsedProps ++ domProps, methodClassOpt)
}
}
object ParseProp {
// "Deprecated(string, 'Instead, use a custom `actions` property.')"
val Pattern = "Deprecated\\\\(([^,]+), '(.+)'\\\\)".r
def apply(
library: Library,
compName: CompName,
origCompName: CompName,
propName: PropName,
propString: PropTypeUnparsed,
commentOpt: Option[PropComment]
): ParsedProp = {
val _clean: String =
propString.value
.replace("PropTypes.", "")
.replace(".isRequired", "")
/* old style */
.replace("_react2['default'].", "")
//TODO: this is fairly mui specific, and actually breaks Sui
.replace("_utilsPropTypes2['default'].", "Mui.")
.replace("(0, _utilsDeprecatedPropType2['default'])", "Deprecated")
/* new style */
.replace("_react2.default.", "")
.replace("_propTypes2.default.", "Mui.")
.replace("(0, _deprecatedPropType2.default)", "Deprecated")
/* even another style*/
.replace("_react.", "")
val (typeStr: String, deprecatedOpt: Option[String]) = _clean match {
case Pattern(tpe, depMsg) => (tpe, Some(depMsg))
case tpe => (tpe, None)
}
val mappedType: Type =
library.typeMapper(origCompName, propName, typeStr)
val isRequired: Boolean =
propString.value.contains(".isRequired")
val inheritedFrom: Option[CompName] =
if (compName == origCompName) None else Some(origCompName)
ParsedProp(
propName,
isRequired && inheritedFrom.isEmpty,
mappedType,
commentOpt,
deprecatedOpt,
inheritedFrom
)
}
}
| chandu0101/scalajs-react-components | gen/src/main/scala/com/olvind/componentParsers.scala | Scala | apache-2.0 | 4,239 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.resolver
import java.util.function.{ Function => JFunction }
import java.net.{ InetAddress, UnknownHostException }
import java.util.{ Arrays => JArrays, Collections => JCollections, List => JList }
import java.util.concurrent.{ ConcurrentHashMap, ThreadLocalRandom }
import io.netty.resolver.InetNameResolver
import io.netty.util.concurrent.{ ImmediateEventExecutor, Promise }
object ShuffleJdkNameResolver {
private val computer: JFunction[String, JList[InetAddress]] =
inetHost => InetAddress.getAllByName(inetHost) match {
case Array(single) => JCollections.singletonList(single)
case array =>
val list = JArrays.asList(array: _*)
JCollections.shuffle(list, ThreadLocalRandom.current)
list
}
}
class ShuffleJdkNameResolver extends InetNameResolver(ImmediateEventExecutor.INSTANCE) {
private[this] val cache = new ConcurrentHashMap[String, JList[InetAddress]]
override def doResolve(inetHost: String, promise: Promise[InetAddress]): Unit =
throw new UnsupportedOperationException
override def doResolveAll(inetHost: String, promise: Promise[JList[InetAddress]]): Unit =
try {
val addresses = cache.computeIfAbsent(inetHost, ShuffleJdkNameResolver.computer)
promise.setSuccess(addresses)
} catch {
case e: UnknownHostException => promise.setFailure(e)
}
}
| wiacekm/gatling | gatling-http/src/main/scala/io/gatling/http/resolver/ShuffleJdkNameResolver.scala | Scala | apache-2.0 | 1,993 |
package main.scala
/*
case class PolarExample(label: Int, features: Array[Float]) extends breeze.data.Example[Int, Array[Float]] {
val id: String = label.toString + features map (x => x.toString) reduce(_+_)
override def toString : String = label.toString + {features map (x => " " + x.toString) reduce(_+_)}
}
*/
case class Sample(text: String, label: List[String])
case class Feature(tokens: List[String], labels: List[String])
case class Polarity(polarity: Map[String, Float], serializationId :Long = 542805478259L) {
def get(x: String) : Option[Float] = polarity.get(x)
def getOrElse(x: String, y: Float) : Float = polarity.getOrElse(x, y)
def foreach(f: ((String,Float)) => Unit) : Unit = polarity.foreach(f)
def toOrderedArray = polarity.toSeq.sortBy(_._1).map(_._2).toArray
}
trait Base extends Serializable {
def goodbye(message : String, errorCode: Int = -1) {
System.err.println("\\n\\t" + message + "\\n")
System.exit(errorCode)
}
// read in Files of the form "Text,Label1,Label2,..."
def readLabeledFile(fileName: String, sep: String = "\\t") : List[Sample] = {
import io.Source
val lines = Source.fromFile(fileName).getLines()
lines.toList map(line => {
val fields = line.split(sep).toList
new Sample(fields(0), fields.tail)
})
}
}
| AustinBGibbons/emoticat | polarity/src/main/scala/Common.scala | Scala | bsd-3-clause | 1,305 |
package org.akkamon.core.instruments
import java.io.File
import akka.actor.ActorRef
import org.akkamon.core.ActorStack
import scala.concurrent.duration._
object VMStatsInstrument {
class VMStatsActor(interval: Long, exporterActor: ActorRef) extends ActorStack {
import context._
override def preStart() =
system.scheduler.scheduleOnce(2 * interval millis, self, "tick")
// override postRestart so we don't call preStart and schedule a new message
override def postRestart(reason: Throwable) = {}
def receive = {
case "tick" =>
// send another periodic tick after the specified delay
system.scheduler.scheduleOnce(interval millis, self, "tick")
exporter.processCounterMap(getStats())
}
}
def getStats(): Map[String, Long] = {
val baseStats = Map[String, Long](
"count.procs" -> Runtime.getRuntime().availableProcessors(),
"count.mem.free" -> Runtime.getRuntime().freeMemory(),
"count.mem.maxMemory" -> Runtime.getRuntime().maxMemory(),
"count.mem.totalMemory" -> Runtime.getRuntime().totalMemory()
)
val roots = File.listRoots()
val totalSpaceMap = roots.map(root => s"count.fs.total.${root.getAbsolutePath}" -> root.getTotalSpace) toMap
val freeSpaceMap = roots.map(root => s"count.fs.free.${root.getAbsolutePath}" -> root.getFreeSpace) toMap
val usuableSpaceMap = roots.map(root => s"count.fs.usuable.${root.getAbsolutePath}" -> root.getUsableSpace) toMap
baseStats ++ totalSpaceMap ++ freeSpaceMap ++ usuableSpaceMap
}
}
| josdirksen/akka-mon | src/main/scala/org/akkamon/core/instruments/VMStatsInstrument.scala | Scala | mit | 1,553 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package statements
package params
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import com.intellij.lang.ASTNode
import stubs.ScParamClausesStub;
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import com.intellij.psi._
import scope.PsiScopeProcessor
import api.ScalaElementVisitor
import lexer.ScalaTokenTypes
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
class ScParametersImpl extends ScalaStubBasedElementImpl[ScParameters] with ScParameters {
def this(node: ASTNode) = {this(); setNode(node)}
def this(stub: ScParamClausesStub) = {this(); setStub(stub); setNode(null)}
override def toString: String = "Parameters"
def clauses: Seq[ScParameterClause] = {
getStubOrPsiChildren(ScalaElementTypes.PARAM_CLAUSE, JavaArrayFactoryUtil.ScParameterClauseFactory).toSeq
}
override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState,
lastParent: PsiElement, place: PsiElement): Boolean = {
if (lastParent != null) {
val clausesIterator = clauses.iterator
var break = false
while (clausesIterator.hasNext && !break) {
val clause = clausesIterator.next()
if (clause == lastParent) break = true
else {
val paramsIterator = clause.parameters.iterator
while (paramsIterator.hasNext) {
val param = paramsIterator.next()
if (!processor.execute(param, state)) return false
}
}
}
}
true
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitParameters(this)
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitParameters(this)
case _ => super.accept(visitor)
}
}
override def add(element: PsiElement): PsiElement = {
element match {
case param: ScParameter =>
clauses.lastOption match {
case Some(clause) =>
clause.addParameter(param).parameters.last
case _ =>
val clause = ScalaPsiElementFactory.createClauseFromText("()", getManager)
val newClause = clause.addParameter(param)
super.add(clause)
newClause.parameters.last
}
case _ => super.add(element)
}
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/statements/params/ScParametersImpl.scala | Scala | apache-2.0 | 2,397 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import kafka.cluster.{Broker, Cluster}
import kafka.consumer.TopicCount
import org.I0Itec.zkclient.{IZkDataListener, ZkClient}
import org.I0Itec.zkclient.exception.{ZkNodeExistsException, ZkNoNodeException, ZkMarshallingError, ZkBadVersionException}
import org.I0Itec.zkclient.serialize.ZkSerializer
import collection._
import kafka.api.LeaderAndIsr
import mutable.ListBuffer
import org.apache.zookeeper.data.Stat
import java.util.concurrent.locks.{ReentrantLock, Condition}
import kafka.admin._
import kafka.common.{KafkaException, NoEpochForPartitionException}
import kafka.controller.ReassignedPartitionsContext
import kafka.controller.PartitionAndReplica
import kafka.controller.KafkaController
import scala.{collection, Some}
import kafka.controller.LeaderIsrAndControllerEpoch
import kafka.common.TopicAndPartition
import kafka.utils.Utils.inLock
import scala.collection
object ZkUtils extends Logging {
val ConsumersPath = "/consumers"
val BrokerIdsPath = "/brokers/ids"
val BrokerTopicsPath = "/brokers/topics"
val TopicConfigPath = "/config/topics"
val TopicConfigChangesPath = "/config/changes"
val ControllerPath = "/controller"
val ControllerEpochPath = "/controller_epoch"
val ReassignPartitionsPath = "/admin/reassign_partitions"
val DeleteTopicsPath = "/admin/delete_topics"
val PreferredReplicaLeaderElectionPath = "/admin/preferred_replica_election"
def getTopicPath(topic: String): String = {
BrokerTopicsPath + "/" + topic
}
def getTopicPartitionsPath(topic: String): String = {
getTopicPath(topic) + "/partitions"
}
def getTopicConfigPath(topic: String): String =
TopicConfigPath + "/" + topic
def getDeleteTopicPath(topic: String): String =
DeleteTopicsPath + "/" + topic
def getController(zkClient: ZkClient): Int = {
readDataMaybeNull(zkClient, ControllerPath)._1 match {
case Some(controller) => KafkaController.parseControllerId(controller)
case None => throw new KafkaException("Controller doesn't exist")
}
}
def getTopicPartitionPath(topic: String, partitionId: Int): String =
getTopicPartitionsPath(topic) + "/" + partitionId
def getTopicPartitionLeaderAndIsrPath(topic: String, partitionId: Int): String =
getTopicPartitionPath(topic, partitionId) + "/" + "state"
def getSortedBrokerList(zkClient: ZkClient): Seq[Int] =
ZkUtils.getChildren(zkClient, BrokerIdsPath).map(_.toInt).sorted
def getAllBrokersInCluster(zkClient: ZkClient): Seq[Broker] = {
val brokerIds = ZkUtils.getChildrenParentMayNotExist(zkClient, ZkUtils.BrokerIdsPath).sorted
brokerIds.map(_.toInt).map(getBrokerInfo(zkClient, _)).filter(_.isDefined).map(_.get)
}
def getLeaderIsrAndEpochForPartition(zkClient: ZkClient, topic: String, partition: Int):Option[LeaderIsrAndControllerEpoch] = {
val leaderAndIsrPath = getTopicPartitionLeaderAndIsrPath(topic, partition)
val leaderAndIsrInfo = readDataMaybeNull(zkClient, leaderAndIsrPath)
val leaderAndIsrOpt = leaderAndIsrInfo._1
val stat = leaderAndIsrInfo._2
leaderAndIsrOpt match {
case Some(leaderAndIsrStr) => parseLeaderAndIsr(leaderAndIsrStr, topic, partition, stat)
case None => None
}
}
def getLeaderAndIsrForPartition(zkClient: ZkClient, topic: String, partition: Int):Option[LeaderAndIsr] = {
getLeaderIsrAndEpochForPartition(zkClient, topic, partition).map(_.leaderAndIsr)
}
def setupCommonPaths(zkClient: ZkClient) {
for(path <- Seq(ConsumersPath, BrokerIdsPath, BrokerTopicsPath, TopicConfigChangesPath, TopicConfigPath, DeleteTopicsPath))
makeSurePersistentPathExists(zkClient, path)
}
def parseLeaderAndIsr(leaderAndIsrStr: String, topic: String, partition: Int, stat: Stat)
: Option[LeaderIsrAndControllerEpoch] = {
Json.parseFull(leaderAndIsrStr) match {
case Some(m) =>
val leaderIsrAndEpochInfo = m.asInstanceOf[Map[String, Any]]
val leader = leaderIsrAndEpochInfo.get("leader").get.asInstanceOf[Int]
val epoch = leaderIsrAndEpochInfo.get("leader_epoch").get.asInstanceOf[Int]
val isr = leaderIsrAndEpochInfo.get("isr").get.asInstanceOf[List[Int]]
val controllerEpoch = leaderIsrAndEpochInfo.get("controller_epoch").get.asInstanceOf[Int]
val zkPathVersion = stat.getVersion
debug("Leader %d, Epoch %d, Isr %s, Zk path version %d for partition [%s,%d]".format(leader, epoch,
isr.toString(), zkPathVersion, topic, partition))
Some(LeaderIsrAndControllerEpoch(LeaderAndIsr(leader, epoch, isr, zkPathVersion), controllerEpoch))
case None => None
}
}
def getLeaderForPartition(zkClient: ZkClient, topic: String, partition: Int): Option[Int] = {
val leaderAndIsrOpt = readDataMaybeNull(zkClient, getTopicPartitionLeaderAndIsrPath(topic, partition))._1
leaderAndIsrOpt match {
case Some(leaderAndIsr) =>
Json.parseFull(leaderAndIsr) match {
case Some(m) =>
Some(m.asInstanceOf[Map[String, Any]].get("leader").get.asInstanceOf[Int])
case None => None
}
case None => None
}
}
/**
* This API should read the epoch in the ISR path. It is sufficient to read the epoch in the ISR path, since if the
* leader fails after updating epoch in the leader path and before updating epoch in the ISR path, effectively some
* other broker will retry becoming leader with the same new epoch value.
*/
def getEpochForPartition(zkClient: ZkClient, topic: String, partition: Int): Int = {
val leaderAndIsrOpt = readDataMaybeNull(zkClient, getTopicPartitionLeaderAndIsrPath(topic, partition))._1
leaderAndIsrOpt match {
case Some(leaderAndIsr) =>
Json.parseFull(leaderAndIsr) match {
case None => throw new NoEpochForPartitionException("No epoch, leaderAndISR data for partition [%s,%d] is invalid".format(topic, partition))
case Some(m) => m.asInstanceOf[Map[String, Any]].get("leader_epoch").get.asInstanceOf[Int]
}
case None => throw new NoEpochForPartitionException("No epoch, ISR path for partition [%s,%d] is empty"
.format(topic, partition))
}
}
/**
* Gets the in-sync replicas (ISR) for a specific topic and partition
*/
def getInSyncReplicasForPartition(zkClient: ZkClient, topic: String, partition: Int): Seq[Int] = {
val leaderAndIsrOpt = readDataMaybeNull(zkClient, getTopicPartitionLeaderAndIsrPath(topic, partition))._1
leaderAndIsrOpt match {
case Some(leaderAndIsr) =>
Json.parseFull(leaderAndIsr) match {
case Some(m) => m.asInstanceOf[Map[String, Any]].get("isr").get.asInstanceOf[Seq[Int]]
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
}
/**
* Gets the assigned replicas (AR) for a specific topic and partition
*/
def getReplicasForPartition(zkClient: ZkClient, topic: String, partition: Int): Seq[Int] = {
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) => m.asInstanceOf[Map[String, Any]].get("partitions") match {
case Some(replicaMap) => replicaMap.asInstanceOf[Map[String, Seq[Int]]].get(partition.toString) match {
case Some(seq) => seq
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
}
def registerBrokerInZk(zkClient: ZkClient, id: Int, host: String, port: Int, timeout: Int, jmxPort: Int) {
val brokerIdPath = ZkUtils.BrokerIdsPath + "/" + id
val timestamp = SystemTime.milliseconds.toString
val brokerInfo = Json.encode(Map("version" -> 1, "host" -> host, "port" -> port, "jmx_port" -> jmxPort, "timestamp" -> timestamp))
val expectedBroker = new Broker(id, host, port)
try {
createEphemeralPathExpectConflictHandleZKBug(zkClient, brokerIdPath, brokerInfo, expectedBroker,
(brokerString: String, broker: Any) => Broker.createBroker(broker.asInstanceOf[Broker].id, brokerString).equals(broker.asInstanceOf[Broker]),
timeout)
} catch {
case e: ZkNodeExistsException =>
throw new RuntimeException("A broker is already registered on the path " + brokerIdPath
+ ". This probably " + "indicates that you either have configured a brokerid that is already in use, or "
+ "else you have shutdown this broker and restarted it faster than the zookeeper "
+ "timeout so it appears to be re-registering.")
}
info("Registered broker %d at path %s with address %s:%d.".format(id, brokerIdPath, host, port))
}
def getConsumerPartitionOwnerPath(group: String, topic: String, partition: Int): String = {
val topicDirs = new ZKGroupTopicDirs(group, topic)
topicDirs.consumerOwnerDir + "/" + partition
}
def leaderAndIsrZkData(leaderAndIsr: LeaderAndIsr, controllerEpoch: Int): String = {
Json.encode(Map("version" -> 1, "leader" -> leaderAndIsr.leader, "leader_epoch" -> leaderAndIsr.leaderEpoch,
"controller_epoch" -> controllerEpoch, "isr" -> leaderAndIsr.isr))
}
/**
* Get JSON partition to replica map from zookeeper.
*/
def replicaAssignmentZkData(map: Map[String, Seq[Int]]): String = {
Json.encode(Map("version" -> 1, "partitions" -> map))
}
/**
* make sure a persistent path exists in ZK. Create the path if not exist.
*/
def makeSurePersistentPathExists(client: ZkClient, path: String) {
if (!client.exists(path))
client.createPersistent(path, true) // won't throw NoNodeException or NodeExistsException
}
/**
* create the parent path
*/
private def createParentPath(client: ZkClient, path: String): Unit = {
val parentDir = path.substring(0, path.lastIndexOf('/'))
if (parentDir.length != 0)
client.createPersistent(parentDir, true)
}
/**
* Create an ephemeral node with the given path and data. Create parents if necessary.
*/
private def createEphemeralPath(client: ZkClient, path: String, data: String): Unit = {
try {
client.createEphemeral(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
client.createEphemeral(path, data)
}
}
}
/**
* Create an ephemeral node with the given path and data.
* Throw NodeExistException if node already exists.
*/
def createEphemeralPathExpectConflict(client: ZkClient, path: String, data: String): Unit = {
try {
createEphemeralPath(client, path, data)
} catch {
case e: ZkNodeExistsException => {
// this can happen when there is connection loss; make sure the data is what we intend to write
var storedData: String = null
try {
storedData = readData(client, path)._1
} catch {
case e1: ZkNoNodeException => // the node disappeared; treat as if node existed and let caller handles this
case e2: Throwable => throw e2
}
if (storedData == null || storedData != data) {
info("conflict in " + path + " data: " + data + " stored data: " + storedData)
throw e
} else {
// otherwise, the creation succeeded, return normally
info(path + " exists with value " + data + " during connection loss; this is ok")
}
}
case e2: Throwable => throw e2
}
}
/**
* Create an ephemeral node with the given path and data.
* Throw NodeExistsException if node already exists.
* Handles the following ZK session timeout bug:
*
* https://issues.apache.org/jira/browse/ZOOKEEPER-1740
*
* Upon receiving a NodeExistsException, read the data from the conflicted path and
* trigger the checker function comparing the read data and the expected data,
* If the checker function returns true then the above bug might be encountered, back off and retry;
* otherwise re-throw the exception
*/
def createEphemeralPathExpectConflictHandleZKBug(zkClient: ZkClient, path: String, data: String, expectedCallerData: Any, checker: (String, Any) => Boolean, backoffTime: Int): Unit = {
while (true) {
try {
createEphemeralPathExpectConflict(zkClient, path, data)
return
} catch {
case e: ZkNodeExistsException => {
// An ephemeral node may still exist even after its corresponding session has expired
// due to a Zookeeper bug, in this case we need to retry writing until the previous node is deleted
// and hence the write succeeds without ZkNodeExistsException
ZkUtils.readDataMaybeNull(zkClient, path)._1 match {
case Some(writtenData) => {
if (checker(writtenData, expectedCallerData)) {
info("I wrote this conflicted ephemeral node [%s] at %s a while back in a different session, ".format(data, path)
+ "hence I will backoff for this node to be deleted by Zookeeper and retry")
Thread.sleep(backoffTime)
} else {
throw e
}
}
case None => // the node disappeared; retry creating the ephemeral node immediately
}
}
case e2: Throwable => throw e2
}
}
}
/**
* Create an persistent node with the given path and data. Create parents if necessary.
*/
def createPersistentPath(client: ZkClient, path: String, data: String = ""): Unit = {
try {
client.createPersistent(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
client.createPersistent(path, data)
}
}
}
def createSequentialPersistentPath(client: ZkClient, path: String, data: String = ""): String = {
client.createPersistentSequential(path, data)
}
/**
* Update the value of a persistent node with the given path and data.
* create parrent directory if necessary. Never throw NodeExistException.
* Return the updated path zkVersion
*/
def updatePersistentPath(client: ZkClient, path: String, data: String) = {
try {
client.writeData(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
try {
client.createPersistent(path, data)
} catch {
case e: ZkNodeExistsException =>
client.writeData(path, data)
case e2: Throwable => throw e2
}
}
case e2: Throwable => throw e2
}
}
/**
* Conditional update the persistent path data, return (true, newVersion) if it succeeds, otherwise (the path doesn't
* exist, the current version is not the expected version, etc.) return (false, -1)
*/
def conditionalUpdatePersistentPath(client: ZkClient, path: String, data: String, expectVersion: Int): (Boolean, Int) = {
try {
val stat = client.writeDataReturnStat(path, data, expectVersion)
debug("Conditional update of path %s with value %s and expected version %d succeeded, returning the new version: %d"
.format(path, data, expectVersion, stat.getVersion))
(true, stat.getVersion)
} catch {
case b: ZkBadVersionException => {
// in case of zkClient session timesout and when it reconnects to zookeeeper
// it might be using the older zkVersion for the conditionalUpdate.
// checking to see if the new leaderandisr data matches the one in zookeeper
// except the version. If it matches returns true and version from zookeeper.
try {
val newLeader = getLeaderIsrAndControllerEpochFromJson(data)
val writtenLeaderAndIsrInfo = ZkUtils.readDataMaybeNull(client,path)
val writtenLeaderOpt = writtenLeaderAndIsrInfo._1
val writtenStat = writtenLeaderAndIsrInfo._2
writtenLeaderOpt match {
case Some(writtenData) =>
val writtenLeader = getLeaderIsrAndControllerEpochFromJson(writtenData)
if (newLeader.equals(writtenLeader))
return (true,writtenStat.getVersion)
else
throw b
case None => throw b
}
} catch {
case e1: Exception =>
error("Conditional update of path %s with data %s and expected version %d failed due to %s".format(path, data,
expectVersion, e1.getMessage))
(false, -1)
}
}
case e2: Exception =>
error("Conditional update of path %s with data %s and expected version %d failed due to %s".format(path, data,
expectVersion, e2.getMessage))
(false, -1)
}
}
def getLeaderIsrAndControllerEpochFromJson(data: String): LeaderIsrAndControllerEpoch = {
Json.parseFull(data) match {
case Some(m) =>
val leaderIsrAndEpochInfo = m.asInstanceOf[Map[String, Any]]
val leader = leaderIsrAndEpochInfo.get("leader").get.asInstanceOf[Int]
val epoch = leaderIsrAndEpochInfo.get("leader_epoch").get.asInstanceOf[Int]
val isr = leaderIsrAndEpochInfo.get("isr").get.asInstanceOf[List[Int]].sorted
val controllerEpoch = leaderIsrAndEpochInfo.get("controller_epoch").get.asInstanceOf[Int]
val zkPathVersion = leaderIsrAndEpochInfo.get("version").get.asInstanceOf[Int]
new LeaderIsrAndControllerEpoch(new LeaderAndIsr(leader, epoch, isr, zkPathVersion), controllerEpoch)
case None => null
}
}
/**
* Conditional update the persistent path data, return (true, newVersion) if it succeeds, otherwise (the current
* version is not the expected version, etc.) return (false, -1). If path doesn't exist, throws ZkNoNodeException
*/
def conditionalUpdatePersistentPathIfExists(client: ZkClient, path: String, data: String, expectVersion: Int): (Boolean, Int) = {
try {
val stat = client.writeDataReturnStat(path, data, expectVersion)
debug("Conditional update of path %s with value %s and expected version %d succeeded, returning the new version: %d"
.format(path, data, expectVersion, stat.getVersion))
(true, stat.getVersion)
} catch {
case nne: ZkNoNodeException => throw nne
case e: Exception =>
error("Conditional update of path %s with data %s and expected version %d failed due to %s".format(path, data,
expectVersion, e.getMessage))
(false, -1)
}
}
/**
* Update the value of a persistent node with the given path and data.
* create parrent directory if necessary. Never throw NodeExistException.
*/
def updateEphemeralPath(client: ZkClient, path: String, data: String): Unit = {
try {
client.writeData(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
client.createEphemeral(path, data)
}
case e2: Throwable => throw e2
}
}
def deletePath(client: ZkClient, path: String): Boolean = {
try {
client.delete(path)
} catch {
case e: ZkNoNodeException =>
// this can happen during a connection loss event, return normally
info(path + " deleted during connection loss; this is ok")
false
case e2: Throwable => throw e2
}
}
def deletePathRecursive(client: ZkClient, path: String) {
try {
client.deleteRecursive(path)
} catch {
case e: ZkNoNodeException =>
// this can happen during a connection loss event, return normally
info(path + " deleted during connection loss; this is ok")
case e2: Throwable => throw e2
}
}
def maybeDeletePath(zkUrl: String, dir: String) {
try {
val zk = new ZkClient(zkUrl, 30*1000, 30*1000, ZKStringSerializer)
zk.deleteRecursive(dir)
zk.close()
} catch {
case _: Throwable => // swallow
}
}
def readData(client: ZkClient, path: String): (String, Stat) = {
val stat: Stat = new Stat()
val dataStr: String = client.readData(path, stat)
(dataStr, stat)
}
def readDataMaybeNull(client: ZkClient, path: String): (Option[String], Stat) = {
val stat: Stat = new Stat()
val dataAndStat = try {
(Some(client.readData(path, stat)), stat)
} catch {
case e: ZkNoNodeException =>
(None, stat)
case e2: Throwable => throw e2
}
dataAndStat
}
def getChildren(client: ZkClient, path: String): Seq[String] = {
import scala.collection.JavaConversions._
// triggers implicit conversion from java list to scala Seq
client.getChildren(path)
}
def getChildrenParentMayNotExist(client: ZkClient, path: String): Seq[String] = {
import scala.collection.JavaConversions._
// triggers implicit conversion from java list to scala Seq
try {
client.getChildren(path)
} catch {
case e: ZkNoNodeException => return Nil
case e2: Throwable => throw e2
}
}
/**
* Check if the given path exists
*/
def pathExists(client: ZkClient, path: String): Boolean = {
client.exists(path)
}
def getCluster(zkClient: ZkClient) : Cluster = {
val cluster = new Cluster
val nodes = getChildrenParentMayNotExist(zkClient, BrokerIdsPath)
for (node <- nodes) {
val brokerZKString = readData(zkClient, BrokerIdsPath + "/" + node)._1
cluster.add(Broker.createBroker(node.toInt, brokerZKString))
}
cluster
}
def getPartitionLeaderAndIsrForTopics(zkClient: ZkClient, topicAndPartitions: Set[TopicAndPartition])
: mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] = {
val ret = new mutable.HashMap[TopicAndPartition, LeaderIsrAndControllerEpoch]
for(topicAndPartition <- topicAndPartitions) {
ZkUtils.getLeaderIsrAndEpochForPartition(zkClient, topicAndPartition.topic, topicAndPartition.partition) match {
case Some(leaderIsrAndControllerEpoch) => ret.put(topicAndPartition, leaderIsrAndControllerEpoch)
case None =>
}
}
ret
}
def getReplicaAssignmentForTopics(zkClient: ZkClient, topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]] = {
val ret = new mutable.HashMap[TopicAndPartition, Seq[Int]]
topics.foreach { topic =>
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) => m.asInstanceOf[Map[String, Any]].get("partitions") match {
case Some(repl) =>
val replicaMap = repl.asInstanceOf[Map[String, Seq[Int]]]
for((partition, replicas) <- replicaMap){
ret.put(TopicAndPartition(topic, partition.toInt), replicas)
debug("Replicas assigned to topic [%s], partition [%s] are [%s]".format(topic, partition, replicas))
}
case None =>
}
case None =>
}
case None =>
}
}
ret
}
def getPartitionAssignmentForTopics(zkClient: ZkClient, topics: Seq[String]): mutable.Map[String, collection.Map[Int, Seq[Int]]] = {
val ret = new mutable.HashMap[String, Map[Int, Seq[Int]]]()
topics.foreach{ topic =>
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
val partitionMap = jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) => m.asInstanceOf[Map[String, Any]].get("partitions") match {
case Some(replicaMap) =>
val m1 = replicaMap.asInstanceOf[Map[String, Seq[Int]]]
m1.map(p => (p._1.toInt, p._2))
case None => Map[Int, Seq[Int]]()
}
case None => Map[Int, Seq[Int]]()
}
case None => Map[Int, Seq[Int]]()
}
debug("Partition map for /brokers/topics/%s is %s".format(topic, partitionMap))
ret += (topic -> partitionMap)
}
ret
}
def getPartitionsForTopics(zkClient: ZkClient, topics: Seq[String]): mutable.Map[String, Seq[Int]] = {
getPartitionAssignmentForTopics(zkClient, topics).map { topicAndPartitionMap =>
val topic = topicAndPartitionMap._1
val partitionMap = topicAndPartitionMap._2
debug("partition assignment of /brokers/topics/%s is %s".format(topic, partitionMap))
(topic -> partitionMap.keys.toSeq.sortWith((s,t) => s < t))
}
}
def getPartitionsBeingReassigned(zkClient: ZkClient): Map[TopicAndPartition, ReassignedPartitionsContext] = {
// read the partitions and their new replica list
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, ReassignPartitionsPath)._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
val reassignedPartitions = parsePartitionReassignmentData(jsonPartitionMap)
reassignedPartitions.map(p => (p._1 -> new ReassignedPartitionsContext(p._2)))
case None => Map.empty[TopicAndPartition, ReassignedPartitionsContext]
}
}
def parsePartitionReassignmentData(jsonData: String): Map[TopicAndPartition, Seq[Int]] = {
val reassignedPartitions: mutable.Map[TopicAndPartition, Seq[Int]] = mutable.Map()
Json.parseFull(jsonData) match {
case Some(m) =>
m.asInstanceOf[Map[String, Any]].get("partitions") match {
case Some(partitionsSeq) =>
partitionsSeq.asInstanceOf[Seq[Map[String, Any]]].foreach(p => {
val topic = p.get("topic").get.asInstanceOf[String]
val partition = p.get("partition").get.asInstanceOf[Int]
val newReplicas = p.get("replicas").get.asInstanceOf[Seq[Int]]
reassignedPartitions += TopicAndPartition(topic, partition) -> newReplicas
})
case None =>
}
case None =>
}
reassignedPartitions
}
def parseTopicsData(jsonData: String): Seq[String] = {
var topics = List.empty[String]
Json.parseFull(jsonData) match {
case Some(m) =>
m.asInstanceOf[Map[String, Any]].get("topics") match {
case Some(partitionsSeq) =>
val mapPartitionSeq = partitionsSeq.asInstanceOf[Seq[Map[String, Any]]]
mapPartitionSeq.foreach(p => {
val topic = p.get("topic").get.asInstanceOf[String]
topics ++= List(topic)
})
case None =>
}
case None =>
}
topics
}
def getPartitionReassignmentZkData(partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]): String = {
Json.encode(Map("version" -> 1, "partitions" -> partitionsToBeReassigned.map(e => Map("topic" -> e._1.topic, "partition" -> e._1.partition,
"replicas" -> e._2))))
}
def updatePartitionReassignmentData(zkClient: ZkClient, partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]) {
val zkPath = ZkUtils.ReassignPartitionsPath
partitionsToBeReassigned.size match {
case 0 => // need to delete the /admin/reassign_partitions path
deletePath(zkClient, zkPath)
info("No more partitions need to be reassigned. Deleting zk path %s".format(zkPath))
case _ =>
val jsonData = getPartitionReassignmentZkData(partitionsToBeReassigned)
try {
updatePersistentPath(zkClient, zkPath, jsonData)
info("Updated partition reassignment path with %s".format(jsonData))
} catch {
case nne: ZkNoNodeException =>
ZkUtils.createPersistentPath(zkClient, zkPath, jsonData)
debug("Created path %s with %s for partition reassignment".format(zkPath, jsonData))
case e2: Throwable => throw new AdminOperationException(e2.toString)
}
}
}
def getPartitionsUndergoingPreferredReplicaElection(zkClient: ZkClient): Set[TopicAndPartition] = {
// read the partitions and their new replica list
val jsonPartitionListOpt = readDataMaybeNull(zkClient, PreferredReplicaLeaderElectionPath)._1
jsonPartitionListOpt match {
case Some(jsonPartitionList) => PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(jsonPartitionList)
case None => Set.empty[TopicAndPartition]
}
}
def deletePartition(zkClient : ZkClient, brokerId: Int, topic: String) {
val brokerIdPath = BrokerIdsPath + "/" + brokerId
zkClient.delete(brokerIdPath)
val brokerPartTopicPath = BrokerTopicsPath + "/" + topic + "/" + brokerId
zkClient.delete(brokerPartTopicPath)
}
def getConsumersInGroup(zkClient: ZkClient, group: String): Seq[String] = {
val dirs = new ZKGroupDirs(group)
getChildren(zkClient, dirs.consumerRegistryDir)
}
def getConsumersPerTopic(zkClient: ZkClient, group: String) : mutable.Map[String, List[String]] = {
val dirs = new ZKGroupDirs(group)
val consumers = getChildrenParentMayNotExist(zkClient, dirs.consumerRegistryDir)
val consumersPerTopicMap = new mutable.HashMap[String, List[String]]
for (consumer <- consumers) {
val topicCount = TopicCount.constructTopicCount(group, consumer, zkClient)
for ((topic, consumerThreadIdSet) <- topicCount.getConsumerThreadIdsPerTopic) {
for (consumerThreadId <- consumerThreadIdSet)
consumersPerTopicMap.get(topic) match {
case Some(curConsumers) => consumersPerTopicMap.put(topic, consumerThreadId :: curConsumers)
case _ => consumersPerTopicMap.put(topic, List(consumerThreadId))
}
}
}
for ( (topic, consumerList) <- consumersPerTopicMap )
consumersPerTopicMap.put(topic, consumerList.sortWith((s,t) => s < t))
consumersPerTopicMap
}
/**
* This API takes in a broker id, queries zookeeper for the broker metadata and returns the metadata for that broker
* or throws an exception if the broker dies before the query to zookeeper finishes
* @param brokerId The broker id
* @param zkClient The zookeeper client connection
* @return An optional Broker object encapsulating the broker metadata
*/
def getBrokerInfo(zkClient: ZkClient, brokerId: Int): Option[Broker] = {
ZkUtils.readDataMaybeNull(zkClient, ZkUtils.BrokerIdsPath + "/" + brokerId)._1 match {
case Some(brokerInfo) => Some(Broker.createBroker(brokerId, brokerInfo))
case None => None
}
}
def getAllTopics(zkClient: ZkClient): Seq[String] = {
val topics = ZkUtils.getChildrenParentMayNotExist(zkClient, BrokerTopicsPath)
if(topics == null)
Seq.empty[String]
else
topics
}
def getAllPartitions(zkClient: ZkClient): Set[TopicAndPartition] = {
val topics = ZkUtils.getChildrenParentMayNotExist(zkClient, BrokerTopicsPath)
if(topics == null) Set.empty[TopicAndPartition]
else {
topics.map { topic =>
getChildren(zkClient, getTopicPartitionsPath(topic)).map(_.toInt).map(TopicAndPartition(topic, _))
}.flatten.toSet
}
}
}
class LeaderExistsOrChangedListener(topic: String,
partition: Int,
leaderLock: ReentrantLock,
leaderExistsOrChanged: Condition,
oldLeaderOpt: Option[Int] = None,
zkClient: ZkClient = null) extends IZkDataListener with Logging {
@throws(classOf[Exception])
def handleDataChange(dataPath: String, data: Object) {
val t = dataPath.split("/").takeRight(3).head
val p = dataPath.split("/").takeRight(2).head.toInt
inLock(leaderLock) {
if(t == topic && p == partition){
if(oldLeaderOpt == None){
trace("In leader existence listener on partition [%s, %d], leader has been created".format(topic, partition))
leaderExistsOrChanged.signal()
}
else {
val newLeaderOpt = ZkUtils.getLeaderForPartition(zkClient, t, p)
if(newLeaderOpt.isDefined && newLeaderOpt.get != oldLeaderOpt.get){
trace("In leader change listener on partition [%s, %d], leader has been moved from %d to %d".format(topic, partition, oldLeaderOpt.get, newLeaderOpt.get))
leaderExistsOrChanged.signal()
}
}
}
}
}
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
inLock(leaderLock) {
leaderExistsOrChanged.signal()
}
}
}
object ZKStringSerializer extends ZkSerializer {
@throws(classOf[ZkMarshallingError])
def serialize(data : Object) : Array[Byte] = data.asInstanceOf[String].getBytes("UTF-8")
@throws(classOf[ZkMarshallingError])
def deserialize(bytes : Array[Byte]) : Object = {
if (bytes == null)
null
else
new String(bytes, "UTF-8")
}
}
class ZKGroupDirs(val group: String) {
def consumerDir = ZkUtils.ConsumersPath
def consumerGroupDir = consumerDir + "/" + group
def consumerRegistryDir = consumerGroupDir + "/ids"
}
class ZKGroupTopicDirs(group: String, topic: String) extends ZKGroupDirs(group) {
def consumerOffsetDir = consumerGroupDir + "/offsets/" + topic
def consumerOwnerDir = consumerGroupDir + "/owners/" + topic
}
class ZKConfig(props: VerifiableProperties) {
/** ZK host string */
val zkConnect = props.getString("zookeeper.connect")
/** zookeeper session timeout */
val zkSessionTimeoutMs = props.getInt("zookeeper.session.timeout.ms", 6000)
/** the max time that the client waits to establish a connection to zookeeper */
val zkConnectionTimeoutMs = props.getInt("zookeeper.connection.timeout.ms",zkSessionTimeoutMs)
/** how far a ZK follower can be behind a ZK leader */
val zkSyncTimeMs = props.getInt("zookeeper.sync.time.ms", 2000)
}
| jhooda/kafka | core/src/main/scala/kafka/utils/ZkUtils.scala | Scala | apache-2.0 | 34,711 |
package margn.compiler
class CompileError(msg: String) extends Error(msg)
class TypeError (msg: String) extends Error(msg)
| 193s/margn | src/main/scala/margn/compiler/CompileError.scala | Scala | gpl-2.0 | 126 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.annotation.Since
import org.apache.spark.internal.Logging
import org.apache.spark.ml._
import org.apache.spark.ml.attribute.NominalAttribute
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util._
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.types.StructType
/**
* Params for [[QuantileDiscretizer]].
*/
private[feature] trait QuantileDiscretizerBase extends Params
with HasHandleInvalid with HasInputCol with HasOutputCol with HasInputCols with HasOutputCols
with HasRelativeError {
/**
* Number of buckets (quantiles, or categories) into which data points are grouped. Must
* be greater than or equal to 2.
*
* See also [[handleInvalid]], which can optionally create an additional bucket for NaN values.
*
* default: 2
* @group param
*/
val numBuckets = new IntParam(this, "numBuckets", "Number of buckets (quantiles, or " +
"categories) into which data points are grouped. Must be >= 2.",
ParamValidators.gtEq(2))
setDefault(numBuckets -> 2)
/** @group getParam */
def getNumBuckets: Int = getOrDefault(numBuckets)
/**
* Array of number of buckets (quantiles, or categories) into which data points are grouped.
* Each value must be greater than or equal to 2
*
* See also [[handleInvalid]], which can optionally create an additional bucket for NaN values.
*
* @group param
*/
val numBucketsArray = new IntArrayParam(this, "numBucketsArray", "Array of number of buckets " +
"(quantiles, or categories) into which data points are grouped. This is for multiple " +
"columns input. If transforming multiple columns and numBucketsArray is not set, but " +
"numBuckets is set, then numBuckets will be applied across all columns.",
(arrayOfNumBuckets: Array[Int]) => arrayOfNumBuckets.forall(ParamValidators.gtEq(2)))
/** @group getParam */
def getNumBucketsArray: Array[Int] = $(numBucketsArray)
/**
* Param for how to handle invalid entries. Options are 'skip' (filter out rows with
* invalid values), 'error' (throw an error), or 'keep' (keep invalid values in a special
* additional bucket). Note that in the multiple columns case, the invalid handling is applied
* to all columns. That said for 'error' it will throw an error if any invalids are found in
* any column, for 'skip' it will skip rows with any invalids in any columns, etc.
* Default: "error"
* @group param
*/
@Since("2.1.0")
override val handleInvalid: Param[String] = new Param[String](this, "handleInvalid",
"how to handle invalid entries. Options are skip (filter out rows with invalid values), " +
"error (throw an error), or keep (keep invalid values in a special additional bucket).",
ParamValidators.inArray(Bucketizer.supportedHandleInvalids))
setDefault(handleInvalid, Bucketizer.ERROR_INVALID)
}
/**
* `QuantileDiscretizer` takes a column with continuous features and outputs a column with binned
* categorical features. The number of bins can be set using the `numBuckets` parameter. It is
* possible that the number of buckets used will be smaller than this value, for example, if there
* are too few distinct values of the input to create enough distinct quantiles.
* Since 2.3.0, `QuantileDiscretizer` can map multiple columns at once by setting the `inputCols`
* parameter. If both of the `inputCol` and `inputCols` parameters are set, an Exception will be
* thrown. To specify the number of buckets for each column, the `numBucketsArray` parameter can
* be set, or if the number of buckets should be the same across columns, `numBuckets` can be
* set as a convenience. Note that in multiple columns case, relative error is applied to all
* columns.
*
* NaN handling:
* null and NaN values will be ignored from the column during `QuantileDiscretizer` fitting. This
* will produce a `Bucketizer` model for making predictions. During the transformation,
* `Bucketizer` will raise an error when it finds NaN values in the dataset, but the user can
* also choose to either keep or remove NaN values within the dataset by setting `handleInvalid`.
* If the user chooses to keep NaN values, they will be handled specially and placed into their own
* bucket, for example, if 4 buckets are used, then non-NaN data will be put into buckets[0-3],
* but NaNs will be counted in a special bucket[4].
*
* Algorithm: The bin ranges are chosen using an approximate algorithm (see the documentation for
* `org.apache.spark.sql.DataFrameStatFunctions.approxQuantile`
* for a detailed description). The precision of the approximation can be controlled with the
* `relativeError` parameter. The lower and upper bin bounds will be `-Infinity` and `+Infinity`,
* covering all real values.
*/
@Since("1.6.0")
final class QuantileDiscretizer @Since("1.6.0") (@Since("1.6.0") override val uid: String)
extends Estimator[Bucketizer] with QuantileDiscretizerBase with DefaultParamsWritable {
@Since("1.6.0")
def this() = this(Identifiable.randomUID("quantileDiscretizer"))
/** @group expertSetParam */
@Since("2.0.0")
def setRelativeError(value: Double): this.type = set(relativeError, value)
/** @group setParam */
@Since("1.6.0")
def setNumBuckets(value: Int): this.type = set(numBuckets, value)
/** @group setParam */
@Since("1.6.0")
def setInputCol(value: String): this.type = set(inputCol, value)
/** @group setParam */
@Since("1.6.0")
def setOutputCol(value: String): this.type = set(outputCol, value)
/** @group setParam */
@Since("2.1.0")
def setHandleInvalid(value: String): this.type = set(handleInvalid, value)
/** @group setParam */
@Since("2.3.0")
def setNumBucketsArray(value: Array[Int]): this.type = set(numBucketsArray, value)
/** @group setParam */
@Since("2.3.0")
def setInputCols(value: Array[String]): this.type = set(inputCols, value)
/** @group setParam */
@Since("2.3.0")
def setOutputCols(value: Array[String]): this.type = set(outputCols, value)
@Since("1.6.0")
override def transformSchema(schema: StructType): StructType = {
ParamValidators.checkSingleVsMultiColumnParams(this, Seq(outputCol),
Seq(outputCols))
if (isSet(inputCol)) {
require(!isSet(numBucketsArray),
s"numBucketsArray can't be set for single-column QuantileDiscretizer.")
}
if (isSet(inputCols)) {
require(getInputCols.length == getOutputCols.length,
s"QuantileDiscretizer $this has mismatched Params " +
s"for multi-column transform. Params (inputCols, outputCols) should have " +
s"equal lengths, but they have different lengths: " +
s"(${getInputCols.length}, ${getOutputCols.length}).")
if (isSet(numBucketsArray)) {
require(getInputCols.length == getNumBucketsArray.length,
s"QuantileDiscretizer $this has mismatched Params " +
s"for multi-column transform. Params (inputCols, outputCols, numBucketsArray) " +
s"should have equal lengths, but they have different lengths: " +
s"(${getInputCols.length}, ${getOutputCols.length}, ${getNumBucketsArray.length}).")
require(!isSet(numBuckets),
s"exactly one of numBuckets, numBucketsArray Params to be set, but both are set." )
}
}
val (inputColNames, outputColNames) = if (isSet(inputCols)) {
($(inputCols).toSeq, $(outputCols).toSeq)
} else {
(Seq($(inputCol)), Seq($(outputCol)))
}
var outputFields = schema.fields
inputColNames.zip(outputColNames).foreach { case (inputColName, outputColName) =>
SchemaUtils.checkNumericType(schema, inputColName)
require(!schema.fieldNames.contains(outputColName),
s"Output column $outputColName already exists.")
val attr = NominalAttribute.defaultAttr.withName(outputColName)
outputFields :+= attr.toStructField()
}
StructType(outputFields)
}
@Since("2.0.0")
override def fit(dataset: Dataset[_]): Bucketizer = {
transformSchema(dataset.schema, logging = true)
val bucketizer = new Bucketizer(uid).setHandleInvalid($(handleInvalid))
if (isSet(inputCols)) {
val splitsArray = if (isSet(numBucketsArray)) {
val probArrayPerCol = $(numBucketsArray).map { numOfBuckets =>
(0 to numOfBuckets).map(_.toDouble / numOfBuckets).toArray
}
val probabilityArray = probArrayPerCol.flatten.sorted.distinct
val splitsArrayRaw = dataset.stat.approxQuantile($(inputCols),
probabilityArray, $(relativeError))
splitsArrayRaw.zip(probArrayPerCol).map { case (splits, probs) =>
val probSet = probs.toSet
val idxSet = probabilityArray.zipWithIndex.collect {
case (p, idx) if probSet(p) =>
idx
}.toSet
splits.zipWithIndex.collect {
case (s, idx) if idxSet(idx) =>
s
}
}
} else {
dataset.stat.approxQuantile($(inputCols),
(0 to $(numBuckets)).map(_.toDouble / $(numBuckets)).toArray, $(relativeError))
}
bucketizer.setSplitsArray(splitsArray.map(getDistinctSplits))
} else {
val splits = dataset.stat.approxQuantile($(inputCol),
(0 to $(numBuckets)).map(_.toDouble / $(numBuckets)).toArray, $(relativeError))
bucketizer.setSplits(getDistinctSplits(splits))
}
copyValues(bucketizer.setParent(this))
}
private def getDistinctSplits(splits: Array[Double]): Array[Double] = {
splits(0) = Double.NegativeInfinity
splits(splits.length - 1) = Double.PositiveInfinity
val distinctSplits = splits.distinct
if (splits.length != distinctSplits.length) {
log.warn(s"Some quantiles were identical. Bucketing to ${distinctSplits.length - 1}" +
s" buckets as a result.")
}
distinctSplits.sorted
}
@Since("1.6.0")
override def copy(extra: ParamMap): QuantileDiscretizer = defaultCopy(extra)
}
@Since("1.6.0")
object QuantileDiscretizer extends DefaultParamsReadable[QuantileDiscretizer] with Logging {
@Since("1.6.0")
override def load(path: String): QuantileDiscretizer = super.load(path)
}
| goldmedal/spark | mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala | Scala | apache-2.0 | 11,031 |
package org.labrad.data
import io.netty.buffer.{ByteBuf, Unpooled}
import java.io.{ByteArrayInputStream, InputStream, IOException}
import java.nio.ByteOrder
import java.nio.ByteOrder.BIG_ENDIAN
import java.nio.charset.StandardCharsets.UTF_8
import org.labrad.types.Type
case class Packet(id: Int, target: Long, context: Context, records: Seq[Record]) {
def toBytes(implicit bo: ByteOrder = BIG_ENDIAN): Array[Byte] = {
val buf = Unpooled.buffer()
writeTo(buf)
buf.toByteArray
}
def writeTo(buf: ByteBuf)(implicit bo: ByteOrder): Unit = {
buf.writeIntOrdered(context.high.toInt)
buf.writeIntOrdered(context.low.toInt)
buf.writeIntOrdered(id)
buf.writeIntOrdered(target.toInt)
buf.writeLen {
for (record <- records) {
record.writeTo(buf)
}
}
}
}
object Packet {
def forRequest(request: Request, requestNum: Int) = request match {
case Request(target, context, records) => Packet(requestNum, target, context, records)
}
def forMessage(request: Request) = forRequest(request, 0)
val HEADER = Type("(ww)iww")
def fromBytes(bytes: Array[Byte])(implicit bo: ByteOrder = BIG_ENDIAN): Packet =
fromBytes(new ByteArrayInputStream(bytes))
def fromBytes(in: InputStream)(implicit bo: ByteOrder): Packet = {
def readBytes(n: Int): Array[Byte] = {
val buf = Array.ofDim[Byte](n)
var tot = 0
while (tot < n) {
in.read(buf, tot, n - tot) match {
case i if i < 0 => throw new IOException("read failed")
case i => tot += i
}
}
buf
}
val headerBytes = readBytes(20)
val hdr = FlatData.fromBytes(HEADER, headerBytes)
val ((high, low), req, src, len) = hdr.get[((Long, Long), Int, Long, Long)]
val data = readBytes(len.toInt)
val recordBuf = Unpooled.wrappedBuffer(data)
val records = extractRecords(recordBuf)
Packet(req, src, Context(high, low), records)
}
def extractRecords(buf: ByteBuf)(implicit bo: ByteOrder): Seq[Record] = {
val records = Seq.newBuilder[Record]
while (buf.readableBytes > 0) {
val id = buf.readUnsignedIntOrdered()
val tagLen = buf.readIntOrdered()
val tag = buf.toString(buf.readerIndex, tagLen, UTF_8)
val t = Type(tag)
buf.skipBytes(tagLen)
val dataLen = buf.readIntOrdered()
val dataBytes = Array.ofDim[Byte](dataLen)
buf.readBytes(dataBytes)
val data = FlatData.fromBytes(t, dataBytes)
records += Record(id, data)
}
records.result
}
}
case class Record(id: Long, data: Data) {
def toBytes(implicit order: ByteOrder): Array[Byte] = {
val buf = Unpooled.buffer()
writeTo(buf)
buf.toByteArray
}
def writeTo(buf: ByteBuf)(implicit bo: ByteOrder): Unit = {
buf.writeIntOrdered(id.toInt)
buf.writeLen { buf.writeUtf8String(data.t.toString) }
buf.writeLen { buf.writeData(data) }
}
}
case class Request(server: Long, context: Context = Context(0, 0), records: Seq[Record] = Nil)
case class NameRecord(name: String, data: Data = Data.NONE)
case class NameRequest(server: String, context: Context = Context(0, 0), records: Seq[NameRecord] = Seq.empty)
case class Message(source: Long, context: Context, msg: Long, data: Data)
| labrad/scalabrad | core/src/main/scala/org/labrad/data/Packet.scala | Scala | mit | 3,247 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import java.util.Date
import org.locationtech.jts.geom.Geometry
import org.junit.runner.RunWith
import org.locationtech.geomesa.curve.TimePeriod
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Z3HistogramTest extends Specification with StatTestHelper {
def createStat(length: Int, observe: Boolean): Z3Histogram = {
val s = Stat(sft, Stat.Z3Histogram("geom", "dtg", TimePeriod.Week, length))
if (observe) {
features.foreach { s.observe }
}
s.asInstanceOf[Z3Histogram]
}
def createStat(observe: Boolean = true): Z3Histogram = createStat(1024, observe)
def toDate(string: String): Date = Date.from(java.time.LocalDateTime.parse(string, GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))
def toGeom(string: String): Geometry = WKTUtils.read(string)
"HistogramZ3 stat" should {
"work with geometries and dates" >> {
"be empty initially" >> {
val stat = createStat(observe = false)
stat.isEmpty must beTrue
}
"correctly bin values" >> {
val stat = createStat()
stat.isEmpty must beFalse
forall(0 until 100) { i =>
val (w, idx) = stat.indexOf(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z"))
stat.count(w, idx) must beBetween(1L, 21L)
}
}
"serialize and deserialize" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Histogram]
unpacked.asInstanceOf[Z3Histogram].geom mustEqual stat.geom
unpacked.asInstanceOf[Z3Histogram].dtg mustEqual stat.dtg
unpacked.asInstanceOf[Z3Histogram].length mustEqual stat.length
unpacked.asInstanceOf[Z3Histogram].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = createStat(observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Histogram]
unpacked.asInstanceOf[Z3Histogram].geom mustEqual stat.geom
unpacked.asInstanceOf[Z3Histogram].dtg mustEqual stat.dtg
unpacked.asInstanceOf[Z3Histogram].length mustEqual stat.length
unpacked.asInstanceOf[Z3Histogram].toJson mustEqual stat.toJson
}
"deserialize as immutable value" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[Z3Histogram]
unpacked.asInstanceOf[Z3Histogram].geom mustEqual stat.geom
unpacked.asInstanceOf[Z3Histogram].dtg mustEqual stat.dtg
unpacked.asInstanceOf[Z3Histogram].length mustEqual stat.length
unpacked.asInstanceOf[Z3Histogram].toJson mustEqual stat.toJson
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features.head) must throwAn[Exception]
unpacked.unobserve(features.head) must throwAn[Exception]
}
"clear" >> {
val stat = createStat()
stat.clear()
stat.isEmpty must beTrue
forall(0 until 100) { i =>
val (w, idx) = stat.indexOf(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z"))
stat.count(w, idx) mustEqual 0
}
val (w, idx) = stat.indexOf(toGeom("POINT(-180 -90)"), toDate("2012-01-01T00:00:00.000Z"))
stat.count(w, idx) mustEqual 0
}
}
}
}
| locationtech/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/Z3HistogramTest.scala | Scala | apache-2.0 | 4,340 |
package truerss.db
import truerss.dto.SetupKey
// I save setting in json representation
// real Setup[T] Keys/Values will be defined on service-level
sealed trait SettingValue {
type T
val defaultValue: T
val name: String
}
case class SelectableValue(predefined: Iterable[Int], defaultValue: Int) extends SettingValue {
override type T = Int
override val name: String = SelectableValue.fName
}
object SelectableValue {
val fName = "selectable"
val empty = SelectableValue(Nil, 0)
}
case class RadioValue(defaultValue: Boolean) extends SettingValue {
override type T = Boolean
override val name: String = RadioValue.fName
def isYes: Boolean = defaultValue
def isNo: Boolean = !isYes
}
object RadioValue {
val fName = "radio"
}
case class PredefinedSettings(key: String, description: String, value: SettingValue) {
def toKey: SetupKey = {
SetupKey(key, description)
}
def default[T]: T = {
value.defaultValue.asInstanceOf[T]
}
}
object Predefined {
val fFeedParallelism = "parallelism"
val fReadContent = "read_content"
val fFeedsPerPage = "feeds_per_page"
val fShortView = "short_view"
val parallelism = PredefinedSettings(
key = fFeedParallelism,
description = "Number of simultaneous requests",
value = SelectableValue(Iterable(10, 25, 45, 100), 10)
)
// todo: need to use
val read = PredefinedSettings(
key = fReadContent,
description = "Skip content",
value = RadioValue(true)
)
val feedsPerPage = PredefinedSettings(
key = fFeedsPerPage,
description = "Feeds per page",
value = SelectableValue(Iterable(10, 20, 30, 50, 100), 10)
)
val shortView = PredefinedSettings(
key = fShortView,
description = "Display only feeds titles",
value = RadioValue(false)
)
val predefined = parallelism :: feedsPerPage :: shortView :: Nil
} | truerss/truerss | src/main/scala/truerss/db/PredefinedSettings.scala | Scala | mit | 1,856 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.sql.{Connection, DriverManager, ResultSet, ResultSetMetaData, SQLException}
import java.util.Properties
import org.apache.commons.lang3.StringUtils
import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.{Row, SpecificMutableRow}
import org.apache.spark.sql.catalyst.util.DateUtils
import org.apache.spark.sql.types._
import org.apache.spark.sql.sources._
/**
* Data corresponding to one partition of a JDBCRDD.
*/
private[sql] case class JDBCPartition(whereClause: String, idx: Int) extends Partition {
override def index: Int = idx
}
private[sql] object JDBCRDD extends Logging {
/**
* Maps a JDBC type to a Catalyst type. This function is called only when
* the JdbcDialect class corresponding to your database driver returns null.
*
* @param sqlType - A field of java.sql.Types
* @return The Catalyst type corresponding to sqlType.
*/
private def getCatalystType(
sqlType: Int,
precision: Int,
scale: Int,
signed: Boolean): DataType = {
val answer = sqlType match {
// scalastyle:off
case java.sql.Types.ARRAY => null
case java.sql.Types.BIGINT => if (signed) { LongType } else { DecimalType.Unlimited }
case java.sql.Types.BINARY => BinaryType
case java.sql.Types.BIT => BooleanType // @see JdbcDialect for quirks
case java.sql.Types.BLOB => BinaryType
case java.sql.Types.BOOLEAN => BooleanType
case java.sql.Types.CHAR => StringType
case java.sql.Types.CLOB => StringType
case java.sql.Types.DATALINK => null
case java.sql.Types.DATE => DateType
case java.sql.Types.DECIMAL
if precision != 0 || scale != 0 => DecimalType(precision, scale)
case java.sql.Types.DECIMAL => DecimalType.Unlimited
case java.sql.Types.DISTINCT => null
case java.sql.Types.DOUBLE => DoubleType
case java.sql.Types.FLOAT => FloatType
case java.sql.Types.INTEGER => if (signed) { IntegerType } else { LongType }
case java.sql.Types.JAVA_OBJECT => null
case java.sql.Types.LONGNVARCHAR => StringType
case java.sql.Types.LONGVARBINARY => BinaryType
case java.sql.Types.LONGVARCHAR => StringType
case java.sql.Types.NCHAR => StringType
case java.sql.Types.NCLOB => StringType
case java.sql.Types.NULL => null
case java.sql.Types.NUMERIC
if precision != 0 || scale != 0 => DecimalType(precision, scale)
case java.sql.Types.NUMERIC => DecimalType.Unlimited
case java.sql.Types.NVARCHAR => StringType
case java.sql.Types.OTHER => null
case java.sql.Types.REAL => DoubleType
case java.sql.Types.REF => StringType
case java.sql.Types.ROWID => LongType
case java.sql.Types.SMALLINT => IntegerType
case java.sql.Types.SQLXML => StringType
case java.sql.Types.STRUCT => StringType
case java.sql.Types.TIME => TimestampType
case java.sql.Types.TIMESTAMP => TimestampType
case java.sql.Types.TINYINT => IntegerType
case java.sql.Types.VARBINARY => BinaryType
case java.sql.Types.VARCHAR => StringType
case _ => null
// scalastyle:on
}
if (answer == null) throw new SQLException("Unsupported type " + sqlType)
answer
}
/**
* Takes a (schema, table) specification and returns the table's Catalyst
* schema.
*
* @param url - The JDBC url to fetch information from.
* @param table - The table name of the desired table. This may also be a
* SQL query wrapped in parentheses.
*
* @return A StructType giving the table's Catalyst schema.
* @throws SQLException if the table specification is garbage.
* @throws SQLException if the table contains an unsupported type.
*/
def resolveTable(url: String, table: String, properties: Properties): StructType = {
val dialect = JdbcDialects.get(url)
val conn: Connection = DriverManager.getConnection(url, properties)
try {
val rs = conn.prepareStatement(s"SELECT * FROM $table WHERE 1=0").executeQuery()
try {
val rsmd = rs.getMetaData
val ncols = rsmd.getColumnCount
val fields = new Array[StructField](ncols)
var i = 0
while (i < ncols) {
val columnName = rsmd.getColumnLabel(i + 1)
val dataType = rsmd.getColumnType(i + 1)
val typeName = rsmd.getColumnTypeName(i + 1)
val fieldSize = rsmd.getPrecision(i + 1)
val fieldScale = rsmd.getScale(i + 1)
val isSigned = rsmd.isSigned(i + 1)
val nullable = rsmd.isNullable(i + 1) != ResultSetMetaData.columnNoNulls
val metadata = new MetadataBuilder().putString("name", columnName)
val columnType =
dialect.getCatalystType(dataType, typeName, fieldSize, metadata).getOrElse(
getCatalystType(dataType, fieldSize, fieldScale, isSigned))
fields(i) = StructField(columnName, columnType, nullable, metadata.build())
i = i + 1
}
return new StructType(fields)
} finally {
rs.close()
}
} finally {
conn.close()
}
throw new RuntimeException("This line is unreachable.")
}
/**
* Prune all but the specified columns from the specified Catalyst schema.
*
* @param schema - The Catalyst schema of the master table
* @param columns - The list of desired columns
*
* @return A Catalyst schema corresponding to columns in the given order.
*/
private def pruneSchema(schema: StructType, columns: Array[String]): StructType = {
val fieldMap = Map(schema.fields map { x => x.metadata.getString("name") -> x }: _*)
new StructType(columns map { name => fieldMap(name) })
}
/**
* Given a driver string and an url, return a function that loads the
* specified driver string then returns a connection to the JDBC url.
* getConnector is run on the driver code, while the function it returns
* is run on the executor.
*
* @param driver - The class name of the JDBC driver for the given url.
* @param url - The JDBC url to connect to.
*
* @return A function that loads the driver and connects to the url.
*/
def getConnector(driver: String, url: String, properties: Properties): () => Connection = {
() => {
try {
if (driver != null) DriverRegistry.register(driver)
} catch {
case e: ClassNotFoundException => {
logWarning(s"Couldn't find class $driver", e);
}
}
DriverManager.getConnection(url, properties)
}
}
/**
* Build and return JDBCRDD from the given information.
*
* @param sc - Your SparkContext.
* @param schema - The Catalyst schema of the underlying database table.
* @param driver - The class name of the JDBC driver for the given url.
* @param url - The JDBC url to connect to.
* @param fqTable - The fully-qualified table name (or paren'd SQL query) to use.
* @param requiredColumns - The names of the columns to SELECT.
* @param filters - The filters to include in all WHERE clauses.
* @param parts - An array of JDBCPartitions specifying partition ids and
* per-partition WHERE clauses.
*
* @return An RDD representing "SELECT requiredColumns FROM fqTable".
*/
def scanTable(
sc: SparkContext,
schema: StructType,
driver: String,
url: String,
properties: Properties,
fqTable: String,
requiredColumns: Array[String],
filters: Array[Filter],
parts: Array[Partition]): RDD[Row] = {
val dialect = JdbcDialects.get(url)
val quotedColumns = requiredColumns.map(colName => dialect.quoteIdentifier(colName))
new JDBCRDD(
sc,
getConnector(driver, url, properties),
pruneSchema(schema, requiredColumns),
fqTable,
quotedColumns,
filters,
parts,
properties)
}
}
/**
* An RDD representing a table in a database accessed via JDBC. Both the
* driver code and the workers must be able to access the database; the driver
* needs to fetch the schema while the workers need to fetch the data.
*/
private[sql] class JDBCRDD(
sc: SparkContext,
getConnection: () => Connection,
schema: StructType,
fqTable: String,
columns: Array[String],
filters: Array[Filter],
partitions: Array[Partition],
properties: Properties)
extends RDD[Row](sc, Nil) {
/**
* Retrieve the list of partitions corresponding to this RDD.
*/
override def getPartitions: Array[Partition] = partitions
/**
* `columns`, but as a String suitable for injection into a SQL query.
*/
private val columnList: String = {
val sb = new StringBuilder()
columns.foreach(x => sb.append(",").append(x))
if (sb.length == 0) "1" else sb.substring(1)
}
/**
* Converts value to SQL expression.
*/
private def compileValue(value: Any): Any = value match {
case stringValue: UTF8String => s"'${escapeSql(stringValue.toString)}'"
case _ => value
}
private def escapeSql(value: String): String =
if (value == null) null else StringUtils.replace(value, "'", "''")
/**
* Turns a single Filter into a String representing a SQL expression.
* Returns null for an unhandled filter.
*/
private def compileFilter(f: Filter): String = f match {
case EqualTo(attr, value) => s"$attr = ${compileValue(value)}"
case LessThan(attr, value) => s"$attr < ${compileValue(value)}"
case GreaterThan(attr, value) => s"$attr > ${compileValue(value)}"
case LessThanOrEqual(attr, value) => s"$attr <= ${compileValue(value)}"
case GreaterThanOrEqual(attr, value) => s"$attr >= ${compileValue(value)}"
case _ => null
}
/**
* `filters`, but as a WHERE clause suitable for injection into a SQL query.
*/
private val filterWhereClause: String = {
val filterStrings = filters map compileFilter filter (_ != null)
if (filterStrings.size > 0) {
val sb = new StringBuilder("WHERE ")
filterStrings.foreach(x => sb.append(x).append(" AND "))
sb.substring(0, sb.length - 5)
} else ""
}
/**
* A WHERE clause representing both `filters`, if any, and the current partition.
*/
private def getWhereClause(part: JDBCPartition): String = {
if (part.whereClause != null && filterWhereClause.length > 0) {
filterWhereClause + " AND " + part.whereClause
} else if (part.whereClause != null) {
"WHERE " + part.whereClause
} else {
filterWhereClause
}
}
// Each JDBC-to-Catalyst conversion corresponds to a tag defined here so that
// we don't have to potentially poke around in the Metadata once for every
// row.
// Is there a better way to do this? I'd rather be using a type that
// contains only the tags I define.
abstract class JDBCConversion
case object BooleanConversion extends JDBCConversion
case object DateConversion extends JDBCConversion
case class DecimalConversion(precisionInfo: Option[(Int, Int)]) extends JDBCConversion
case object DoubleConversion extends JDBCConversion
case object FloatConversion extends JDBCConversion
case object IntegerConversion extends JDBCConversion
case object LongConversion extends JDBCConversion
case object BinaryLongConversion extends JDBCConversion
case object StringConversion extends JDBCConversion
case object TimestampConversion extends JDBCConversion
case object BinaryConversion extends JDBCConversion
/**
* Maps a StructType to a type tag list.
*/
def getConversions(schema: StructType): Array[JDBCConversion] = {
schema.fields.map(sf => sf.dataType match {
case BooleanType => BooleanConversion
case DateType => DateConversion
case DecimalType.Unlimited => DecimalConversion(None)
case DecimalType.Fixed(d) => DecimalConversion(Some(d))
case DoubleType => DoubleConversion
case FloatType => FloatConversion
case IntegerType => IntegerConversion
case LongType =>
if (sf.metadata.contains("binarylong")) BinaryLongConversion else LongConversion
case StringType => StringConversion
case TimestampType => TimestampConversion
case BinaryType => BinaryConversion
case _ => throw new IllegalArgumentException(s"Unsupported field $sf")
}).toArray
}
/**
* Runs the SQL query against the JDBC driver.
*/
override def compute(thePart: Partition, context: TaskContext): Iterator[Row] = new Iterator[Row]
{
var closed = false
var finished = false
var gotNext = false
var nextValue: Row = null
context.addTaskCompletionListener{ context => close() }
val part = thePart.asInstanceOf[JDBCPartition]
val conn = getConnection()
// H2's JDBC driver does not support the setSchema() method. We pass a
// fully-qualified table name in the SELECT statement. I don't know how to
// talk about a table in a completely portable way.
val myWhereClause = getWhereClause(part)
val sqlText = s"SELECT $columnList FROM $fqTable $myWhereClause"
val stmt = conn.prepareStatement(sqlText,
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
val fetchSize = properties.getProperty("fetchSize", "0").toInt
stmt.setFetchSize(fetchSize)
val rs = stmt.executeQuery()
val conversions = getConversions(schema)
val mutableRow = new SpecificMutableRow(schema.fields.map(x => x.dataType))
def getNext(): Row = {
if (rs.next()) {
var i = 0
while (i < conversions.length) {
val pos = i + 1
conversions(i) match {
case BooleanConversion => mutableRow.setBoolean(i, rs.getBoolean(pos))
case DateConversion =>
// DateUtils.fromJavaDate does not handle null value, so we need to check it.
val dateVal = rs.getDate(pos)
if (dateVal != null) {
mutableRow.update(i, DateUtils.fromJavaDate(dateVal))
} else {
mutableRow.update(i, null)
}
// When connecting with Oracle DB through JDBC, the precision and scale of BigDecimal
// object returned by ResultSet.getBigDecimal is not correctly matched to the table
// schema reported by ResultSetMetaData.getPrecision and ResultSetMetaData.getScale.
// If inserting values like 19999 into a column with NUMBER(12, 2) type, you get through
// a BigDecimal object with scale as 0. But the dataframe schema has correct type as
// DecimalType(12, 2). Thus, after saving the dataframe into parquet file and then
// retrieve it, you will get wrong result 199.99.
// So it is needed to set precision and scale for Decimal based on JDBC metadata.
case DecimalConversion(Some((p, s))) =>
val decimalVal = rs.getBigDecimal(pos)
if (decimalVal == null) {
mutableRow.update(i, null)
} else {
mutableRow.update(i, Decimal(decimalVal, p, s))
}
case DecimalConversion(None) =>
val decimalVal = rs.getBigDecimal(pos)
if (decimalVal == null) {
mutableRow.update(i, null)
} else {
mutableRow.update(i, Decimal(decimalVal))
}
case DoubleConversion => mutableRow.setDouble(i, rs.getDouble(pos))
case FloatConversion => mutableRow.setFloat(i, rs.getFloat(pos))
case IntegerConversion => mutableRow.setInt(i, rs.getInt(pos))
case LongConversion => mutableRow.setLong(i, rs.getLong(pos))
// TODO(davies): use getBytes for better performance, if the encoding is UTF-8
case StringConversion => mutableRow.setString(i, rs.getString(pos))
case TimestampConversion => mutableRow.update(i, rs.getTimestamp(pos))
case BinaryConversion => mutableRow.update(i, rs.getBytes(pos))
case BinaryLongConversion => {
val bytes = rs.getBytes(pos)
var ans = 0L
var j = 0
while (j < bytes.size) {
ans = 256 * ans + (255 & bytes(j))
j = j + 1;
}
mutableRow.setLong(i, ans)
}
}
if (rs.wasNull) mutableRow.setNullAt(i)
i = i + 1
}
mutableRow
} else {
finished = true
null.asInstanceOf[Row]
}
}
def close() {
if (closed) return
try {
if (null != rs) {
rs.close()
}
} catch {
case e: Exception => logWarning("Exception closing resultset", e)
}
try {
if (null != stmt) {
stmt.close()
}
} catch {
case e: Exception => logWarning("Exception closing statement", e)
}
try {
if (null != conn) {
conn.close()
}
logInfo("closed connection")
} catch {
case e: Exception => logWarning("Exception closing connection", e)
}
}
override def hasNext: Boolean = {
if (!finished) {
if (!gotNext) {
nextValue = getNext()
if (finished) {
close()
}
gotNext = true
}
}
!finished
}
override def next(): Row = {
if (!hasNext) {
throw new NoSuchElementException("End of stream")
}
gotNext = false
nextValue
}
}
}
| andrewor14/iolap | sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRDD.scala | Scala | apache-2.0 | 18,637 |
package com.github.mdr.folderclasspathcontainer
import java.io.File
import org.eclipse.core.runtime.IPath
import org.eclipse.core.runtime.Path
import org.eclipse.jdt.core.IClasspathEntry
import org.eclipse.jdt.core.IJavaProject
import org.eclipse.jdt.core.JavaCore
object FolderInfo {
def decode(path: IPath): Option[FolderInfo] = {
val id = Option(path.segment(0)).getOrElse(
throw new IllegalArgumentException("No id segment"))
if (id == FolderClasspathContainer.ID)
Some({
val filePath = path.removeFirstSegments(1)
val firstFileSegment = Option(filePath.segment(0)).getOrElse(
throw new IllegalArgumentException("No file segment"))
if (firstFileSegment == ROOT_MARKER)
new FolderInfo("/" + filePath.removeFirstSegments(1))
else
new FolderInfo(filePath.toString)
})
else
None
}
def fromLocation(location: String) = new FolderInfo(location)
final val ROOT_MARKER = "-"
def maybeMakeRelative(location: String, project: IJavaProject): String = {
val projectRoot = project.getProject.getLocation
if (location startsWith projectRoot.toString) {
val suffix = location.substring(projectRoot.toString.length)
if (suffix startsWith "/") suffix.tail else suffix
} else
location
}
}
class FolderInfo private (val location: String) {
import FolderInfo._
def asEncodedPath = {
val idPath = new Path(FolderClasspathContainer.ID)
if (isAbsolute)
idPath.append("/" + ROOT_MARKER + "/" + location)
else
idPath.append("/" + location)
}
private def isAbsolute = location startsWith "/"
def asPath(project: IJavaProject): IPath =
if (isAbsolute)
new Path(location)
else
project.getProject.getLocation.append(location)
def asFile(project: IJavaProject): File = asPath(project).toFile
def asClasspathEntry: IClasspathEntry = JavaCore.newContainerEntry(asEncodedPath)
} | mdr/folderclasspathcontainer | folder-classpath-container/src/com/github/mdr/folderclasspathcontainer/FolderInfo.scala | Scala | mit | 1,963 |
package com.twitter.finatra.json.tests.internal
import com.fasterxml.jackson.annotation.{JsonValue, JsonIgnore, JsonIgnoreProperties, JsonProperty}
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.databind._
import com.fasterxml.jackson.databind.annotation.JsonDeserialize
import com.fasterxml.jackson.databind.node.ValueNode
import com.twitter.finatra.domain.WrappedValue
import com.twitter.finatra.request._
import com.twitter.finatra.response.JsonCamelCase
import com.twitter.finatra.validation.{InvalidValidationInternal, Min, NotEmpty, ValidationResult}
import com.twitter.inject.Logging
import javax.inject.{Inject, Named}
import org.joda.time.DateTime
import scala.annotation.meta.param
import scala.math.BigDecimal.RoundingMode
sealed trait CarType {
@JsonValue
def toJson: String
}
object Volvo extends CarType {
override def toJson: String = "volvo"
}
object Audi extends CarType {
override def toJson: String = "audi"
}
object Volkswagen extends CarType {
override def toJson: String = "vw"
}
case class Vehicle(vin: String, `type`: CarType)
case class CaseClass(id: Long, name: String)
case class CaseClassWithLazyVal(id: Long) {
lazy val woo = "yeah"
}
case class CaseClassWithIgnoredField(id: Long) {
@JsonIgnore
val ignoreMe = "Foo"
}
@JsonIgnoreProperties(Array("ignore_me", "feh"))
case class CaseClassWithIgnoredFieldsMatchAfterToSnakeCase(id: Long) {
val ignoreMe = "Foo"
val feh = "blah"
}
@JsonIgnoreProperties(Array("ignore_me", "feh"))
case class CaseClassWithIgnoredFieldsExactMatch(id: Long) {
val ignore_me = "Foo"
val feh = "blah"
}
case class CaseClassWithTransientField(id: Long) {
@transient
val lol = "asdf"
}
case class CaseClassWithLazyField(id: Long) {
lazy val lol = "asdf"
}
case class CaseClassWithOverloadedField(id: Long) {
def id(prefix: String): String = prefix + id
}
case class CaseClassWithOption(value: Option[String] = None)
case class CaseClassWithJsonNode(value: JsonNode)
case class CaseClassWithAllTypes(
map: Map[String, String],
set: Set[Int],
string: String,
list: List[Int],
seq: Seq[Int],
indexedSeq: IndexedSeq[Int],
vector: Vector[Int],
bigDecimal: BigDecimal,
bigInt: Int, //TODO: BigInt,
int: Int,
long: Long,
char: Char,
bool: Boolean,
short: Short,
byte: Byte,
float: Float,
double: Double,
any: Any,
anyRef: AnyRef,
intMap: Map[Int, Int] = Map(),
longMap: Map[Long, Long] = Map())
case class CaseClassWithException() {
throw new NullPointerException("Oops!!!")
}
object OuterObject {
case class NestedCaseClass(id: Long)
object InnerObject {
case class SuperNestedCaseClass(id: Long)
}
}
case class CaseClassWithTwoConstructors(id: Long, name: String) {
def this(id: Long) = this(id, "New User")
}
case class CaseClassWithSnakeCase(oneThing: String, twoThing: String)
case class CaseClassWithArrays(
one: String,
two: Array[String],
three: Array[Int],
four: Array[Long],
five: Array[Char],
bools: Array[Boolean],
bytes: Array[Byte],
doubles: Array[Double],
floats: Array[Float])
case class CaseClassWithArrayLong(array: Array[Long])
case class CaseClassWithArrayListOfIntegers(arraylist: java.util.ArrayList[java.lang.Integer])
case class CaseClassWithArrayBoolean(array: Array[Boolean])
case class CaseClassWithArrayWrappedValueLong(array: Array[WrappedValueLong])
case class CaseClassWithSeqLong(seq: Seq[Long])
case class CaseClassWithSeqWrappedValueLong(
seq: Seq[WrappedValueLong])
case class CaseClassWithValidation(
@Min(1) value: Long)
case class CaseClassWithSeqOfCaseClassWithValidation(
seq: Seq[CaseClassWithValidation])
case class WrappedValueLongWithValidation(
@Min(1) value: Long)
extends WrappedValue[Long]
case class CaseClassWithSeqWrappedValueLongWithValidation(
seq: Seq[WrappedValueLongWithValidation])
case class Foo(name: String)
case class Car(
id: Long,
make: CarMake,
model: String,
passengers: Seq[Person]) {
def validateId = {
ValidationResult.validate(
id > 0,
"id must be > 0")
}
}
case class Person(
id: Int,
name: String,
age: Option[Int],
age_with_default: Option[Int] = None,
nickname: String = "unknown")
case class PersonWithDottedName(
id: Int,
@JsonProperty("name.last") lastName: String)
case class SimplePerson(name: String)
@JsonCamelCase
case class CamelCaseSimplePerson(
myName: String)
case class CaseClassWithMap(map: Map[String, String])
case class CaseClassWithSetOfLongs(set: Set[Long])
case class CaseClassWithSeqOfLongs(seq: Seq[Long])
case class CaseClassWithNestedSeqLong(
seqClass: CaseClassWithSeqLong,
setClass: CaseClassWithSetOfLongs)
case class Blah(foo: String)
case class TestIdStringWrapper(id: String)
extends WrappedValue[String]
case class ObjWithTestId(id: TestIdStringWrapper)
object Obj {
case class NestedCaseClassInObject(id: String)
}
case class WrappedValueInt(value: Int)
extends WrappedValue[Int]
case class WrappedValueLong(value: Long)
extends WrappedValue[Long]
case class WrappedValueString(value: String)
extends WrappedValue[String]
case class WrappedValueIntInObj(
foo: WrappedValueInt)
case class WrappedValueStringInObj(
foo: WrappedValueString)
case class WrappedValueLongInObj(
foo: WrappedValueLong)
case class CaseClassWithVal(
name: String) {
val `type`: String = "person"
}
case class CaseClassWithEnum(
name: String,
make: CarMakeEnum)
case class CaseClassWithComplexEnums(
name: String,
make: CarMakeEnum,
makeOpt: Option[CarMakeEnum],
makeSeq: Seq[CarMakeEnum],
makeSet: Set[CarMakeEnum])
case class CaseClassWithSeqEnum(
enumSeq: Seq[CarMakeEnum])
case class CaseClassWithOptionEnum(
enumOpt: Option[CarMakeEnum])
case class CaseClassWithDateTime(
dateTime: DateTime)
case class CaseClassWithIntAndDateTime(
@NotEmpty name: String,
age: Int,
age2: Int,
age3: Int,
dateTime: DateTime,
dateTime2: DateTime,
dateTime3: DateTime,
dateTime4: DateTime,
@NotEmpty dateTime5: Option[DateTime])
case class ClassWithFooClassInject(
@Inject fooClass: FooClass)
case class ClassWithQueryParamDateTimeInject(
@QueryParam dateTime: DateTime)
case class CaseClassWithEscapedLong(
`1-5`: Long)
case class CaseClassWithEscapedString(
`1-5`: String)
case class CaseClassWithEscapedNormalString(
`a`: String)
case class UnicodeNameCaseClass(`winning-id`: Int, name: String)
case class TestEntityIdsResponse(
entityIds: Seq[Long],
previousCursor: String,
nextCursor: String)
object TestEntityIdsResponseWithCompanion {
val msg = "im the companion"
}
case class TestEntityIdsResponseWithCompanion(
entityIds: Seq[Long],
previousCursor: String,
nextCursor: String)
case class WrappedValueStringMapObject(
map: Map[WrappedValueString, String])
case class FooClass(id: String)
case class Group3(id: String)
extends Logging
case class CaseClassWithInvalidValidation(
@(InvalidValidationInternal@param) name: String,
make: CarMakeEnum)
case class NoConstructorArgs()
case class CaseClassWithBoolean(foo: Boolean)
case class CaseClassWithSeqBooleans(foos: Seq[Boolean])
case class CaseClassInjectStringWithDefault(
@Inject string: String = "DefaultHello")
case class CaseClassInjectInt(
@Inject age: Int)
case class CaseClassInjectOptionInt(
@Inject age: Option[Int])
case class CaseClassInjectOptionString(
@Inject string: Option[String])
case class CaseClassInjectString(
@Inject string: String)
case class CaseClassTooManyInjectableAnnotations(
@Inject @QueryParam string: String)
case class CaseClassTooManyBindingAnnotations(
@Inject @Named("foo") @Named("bar") string: String)
case class CaseClassWithCustomDecimalFormat(
@JsonDeserialize(using = classOf[MyBigDecimalDeserializer])
myBigDecimal: BigDecimal,
@JsonDeserialize(using = classOf[MyBigDecimalDeserializer])
optMyBigDecimal: Option[BigDecimal])
case class CaseClassWithLongAndDeserializer(
@JsonDeserialize(contentAs = classOf[java.lang.Long])
long: Long)
case class CaseClassWithOptionLongAndDeserializer(
@JsonDeserialize(contentAs = classOf[java.lang.Long])
optLong: Option[Long])
class MyBigDecimalDeserializer extends JsonDeserializer[BigDecimal] {
override def deserialize(jp: JsonParser, ctxt: DeserializationContext): BigDecimal = {
val jsonNode: ValueNode = jp.getCodec.readTree(jp)
BigDecimal(jsonNode.asText).setScale(2, RoundingMode.HALF_UP)
}
override def getEmptyValue: BigDecimal = BigDecimal(0)
}
package object internal {
case class SimplePersonInPackageObject( // not recommended but used here for testing use case
name: String = "default-name")
case class SimplePersonInPackageObjectWithoutConstructorParams() // not recommended but used here for testing use case
}
| syamantm/finatra | jackson/src/test/scala/com/twitter/finatra/json/tests/internal/ExampleCaseClasses.scala | Scala | apache-2.0 | 8,810 |
package com.github.dcapwell.netty.examples.block.v2
import com.google.common.primitives.{Ints, Longs}
case class RequestHeader(version: Version, tpe: RequestType.RequestType)
object RequestType extends Enumeration {
type RequestType = Value
val GetBlock, PutBlock = Value
}
sealed trait Request extends Any
object Request {
val HeaderSize = Longs.BYTES + Ints.BYTES
val PutSize = Longs.BYTES
val PacketHeaderSize = 2 * Longs.BYTES + Ints.BYTES + 1 // boolean
val GetBlockSize = 2 * Ints.BYTES
}
case class GetBlock(blockId: BlockId, offset: Option[Int], length: Option[Int]) extends Request
case class PutBlock(blockId: BlockId) extends Request
case class PacketHeader(blockOffset: Long, sequenceNum: Long, length: Int, last: Boolean)
case class PutPacket(header: PacketHeader, data: Array[Byte]) extends Request
object ResponseType extends Enumeration {
type ResponseType = Value
val GetBlockResponse, PutBlockSuccess, BlockNotFound = Value
}
case class ResponseHeader(tpe: ResponseType.ResponseType)
sealed trait Response extends Any
case class GetBlockResponse(blockId: BlockId, length: Int, data: Array[Byte]) extends Response
case class PutBlockSuccess(blockId: BlockId) extends Response
// failure cases
case class BlockNotFound(blockId: BlockId) extends Response
case class OutOfOrder(blockId: BlockId, msg: String) extends Response
| dcapwell/netty-examples | src/main/scala/com/github/dcapwell/netty/examples/block/v2/Protocol.scala | Scala | mit | 1,372 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.controller.test
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import akka.http.scaladsl.model.StatusCodes.NotFound
import akka.http.scaladsl.model.StatusCodes.OK
import akka.http.scaladsl.server.Route
import whisk.core.controller.RespondWithHeaders
/**
* Tests the API in general
*
* Unit tests of the controller service as a standalone component.
* These tests exercise a fresh instance of the service object in memory -- these
* tests do NOT communication with a whisk deployment.
*
* These tests differ from the more specific tests in that they make calls to the
* outermost routes of the controller.
*
* @Idioglossia
* "using Specification DSL to write unit tests, as in should, must, not, be"
* "using Specs2RouteTest DSL to chain HTTP requests for unit testing, as in ~>"
*/
@RunWith(classOf[JUnitRunner])
class RespondWithHeadersTests extends ControllerTestCommon with RespondWithHeaders {
behavior of "General API"
val routes = {
pathPrefix("api" / "v1") {
sendCorsHeaders {
path("one") {
complete(OK)
} ~ path("two") {
complete(OK)
} ~ options {
complete(OK)
} ~ reject
}
} ~ pathPrefix("other") {
complete(OK)
}
}
it should "respond to options" in {
Options("/api/v1") ~> Route.seal(routes) ~> check {
headers should contain allOf (allowOrigin, allowHeaders)
}
}
it should "respond to options on every route under /api/v1" in {
Options("/api/v1/one") ~> Route.seal(routes) ~> check {
headers should contain allOf (allowOrigin, allowHeaders)
}
Options("/api/v1/two") ~> Route.seal(routes) ~> check {
headers should contain allOf (allowOrigin, allowHeaders)
}
}
it should "respond to options even on bogus routes under /api/v1" in {
Options("/api/v1/bogus") ~> Route.seal(routes) ~> check {
headers should contain allOf (allowOrigin, allowHeaders)
}
}
it should "not respond to options on routes before /api/v1" in {
Options("/api") ~> Route.seal(routes) ~> check {
status shouldBe NotFound
}
}
}
| duynguyen/incubator-openwhisk | tests/src/test/scala/whisk/core/controller/test/RespondWithHeadersTests.scala | Scala | apache-2.0 | 2,960 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import org.slf4j.Logger
import org.slf4j.LoggerFactory
/**
* Utility trait for classes that want to log data. Creates a SLF4J logger for the class and allows
* logging messages at different levels using methods that only evaluate parameters lazily if the
* log level is enabled.
*/
trait Logging {
// Make the log field transient so that objects with Logging can
// be serialized and used on another machine
@transient private var log_ : Logger = null
// Method to get or create the logger for this object
protected def log: Logger = {
if (log_ == null) {
var className = this.getClass.getName
// Ignore trailing $'s in the class names for Scala objects
if (className.endsWith("$")) {
className = className.substring(0, className.length - 1)
}
log_ = LoggerFactory.getLogger(className)
}
return log_
}
// Log methods that take only a String
protected def logInfo(msg: => String) {
if (log.isInfoEnabled) log.info(msg)
}
protected def logDebug(msg: => String) {
if (log.isDebugEnabled) log.debug(msg)
}
protected def logTrace(msg: => String) {
if (log.isTraceEnabled) log.trace(msg)
}
protected def logWarning(msg: => String) {
if (log.isWarnEnabled) log.warn(msg)
}
protected def logError(msg: => String) {
if (log.isErrorEnabled) log.error(msg)
}
// Log methods that take Throwables (Exceptions/Errors) too
protected def logInfo(msg: => String, throwable: Throwable) {
if (log.isInfoEnabled) log.info(msg, throwable)
}
protected def logDebug(msg: => String, throwable: Throwable) {
if (log.isDebugEnabled) log.debug(msg, throwable)
}
protected def logTrace(msg: => String, throwable: Throwable) {
if (log.isTraceEnabled) log.trace(msg, throwable)
}
protected def logWarning(msg: => String, throwable: Throwable) {
if (log.isWarnEnabled) log.warn(msg, throwable)
}
protected def logError(msg: => String, throwable: Throwable) {
if (log.isErrorEnabled) log.error(msg, throwable)
}
protected def isTraceEnabled(): Boolean = {
log.isTraceEnabled
}
// Method for ensuring that logging is initialized, to avoid having multiple
// threads do it concurrently (as SLF4J initialization is not thread safe).
protected def initLogging() { log }
}
| mkolod/incubator-spark | core/src/main/scala/org/apache/spark/Logging.scala | Scala | apache-2.0 | 3,144 |
/*
* This file is part of SmartDiet.
*
* Copyright (C) 2011, Aki Saarinen.
*
* SmartDiet was developed in affiliation with Aalto University School
* of Science, Department of Computer Science and Engineering. For
* more information about the department, see <http://cse.aalto.fi/>.
*
* SmartDiet is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SmartDiet is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SmartDiet. If not, see <http://www.gnu.org/licenses/>.
*/
package fi.akisaarinen.smartdiet.measurement.networkpacket
import org.scalatest.FunSuite
import io.Source
import fi.akisaarinen.smartdiet.measurement.networkpacket.TcpdumpReader.{TcpPacket, UdpPacket}
class TcpdumpReaderTest extends FunSuite {
test("example tcp dump") {
val data = Source.fromFile("test-data/tcp.dump").getLines().mkString("\\n")
val lines = TcpdumpReader.parse(data)
expect(12) { lines.size }
expect(2) { lines.filter(_.isInstanceOf[UdpPacket]).size }
expect(10) { lines.filter(_.isInstanceOf[TcpPacket]).size }
}
} | akisaarinen/smartdiet | src/test/scala/fi/akisaarinen/smartdiet/measurement/networkpacket/TcpdumpReaderTest.scala | Scala | gpl-3.0 | 1,493 |
/*
* Copyright (c) 2014-2021 All Rights Reserved by the RWS Group for and on behalf of its affiliates and subsidiaries.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sdl.odata.api.processor.query
import org.scalatest.FunSuite
class QueryTest extends FunSuite {
test("/Components?$filter=customMeta.Key eq 'Test' and customMeta.Value eq 'MyTestValue'") {
// JPA query: SELECT c FROM Components c JOIN c.customMeta cm WHERE cm.Key = ? AND cm.Value = ?
ODataQuery(
JoinOperation(
SelectOperation("Components"),
SelectOperation("CustomMeta")
.where(PropertyCriteriaValue("Key").eq(LiteralCriteriaValue("Test"))
.and(PropertyCriteriaValue("Value").eq(LiteralCriteriaValue("MyTestValue")))),
"customMeta", JoinSelectRight
)
)
}
test("/Customers(123)/address") {
// JPA query: SELECT c.Address FROM Customers c where c.id = ?
ODataQuery(
SelectPropertiesOperation(
SelectByKeyOperation(
SelectOperation("Customers"),
Map("id" -> "123")),
List("address")
)
)
}
test("/Persons?$expand=details"){
ODataQuery(
ExpandOperation(
SelectOperation("Persons"), List("details")
)
)
}
test("/Components(ItemId=100,PublicationId=10)/Comments"){
ODataQuery(
JoinOperation(
SelectOperation("Components")
.where(
PropertyCriteriaValue("ItemId")
.eq(LiteralCriteriaValue(100))
.and(PropertyCriteriaValue("PublicationId").eq(LiteralCriteriaValue(10)))),
SelectOperation("Comments"),
"comments", JoinSelectRight
)
)
}
test("/Promotion('MyColaCampaign')?$filter=hits mul 2 gt 100"){
ODataQuery(
SelectByKeyOperation(
SelectOperation("Promotions"), Map("title" -> "MyColaCampaign")
)
.where(
PropertyCriteriaValue("hits").mul(LiteralCriteriaValue(2)).gt(LiteralCriteriaValue(100))
)
)
}
}
| sdl/odata | odata_api/src/test/scala/com/sdl/odata/api/processor/query/QueryTest.scala | Scala | apache-2.0 | 2,521 |
package org.casualmiracles.finance.models
// Term Structure Lattice Models
// 2010 Martin Haugh
// http://www.columbia.edu/~mh2078/LatticeModelsFE1.pdf
object LatticeFE1Model extends GenericModel with GenericComputations with GeometricInterestRateModel{
}
| yuriylesyuk/scala-contracts | src/main/scala/org/casualmiracles/finance/models/LatticeModelsFE1Model.scala | Scala | mit | 260 |
package scalan
package it.lms
import scalan.compilation.lms._
import scalan.compilation.lms.scalac.LmsCompilerScala
import scalan.compilation.lms.uni.LmsCompilerUni
import scalan.it.smoke.{SmokeItTests, SmokeProg}
class LmsSmokeItTests extends SmokeItTests {
class ProgExp extends ScalanDslExp with SmokeProg with JNIExtractorOpsExp
val progStaged = new LmsCompilerScala(new ProgExp)
val progStagedU = new LmsCompilerUni(new ProgExp)
val defaultCompilers = compilers(progStaged/*, progStagedU*/)
val progStagedOnly = compilers(progStaged)
test("simpleSum") {
//TODO: lack of Either[A,B] support in JNIExtractorOps
compareOutputWithStd((_: SmokeProg).simpleSum, progStagedOnly)(7)
}
test("sumOps") {
//TODO: lack of Either[A,B] support in JNIExtractorOps
compareOutputWithStd(_.sumOps, progStagedOnly)(Right[Unit, Int](7))
}
test("lambdaApply") {
val x = 7
val f = (_: Int) * 2
compareOutputWithStd(_.lambdaApply, progStagedOnly)((x, f))
//TODO: can not pass lambda via JNI
}
test("lambdaConst") {
assert(getStagedOutput((_: SmokeProg).lambdaConst, progStagedOnly)(7).head.head.isInstanceOf[Right[_, _]])
//TODO: lack of Either[A,B] support in JNIExtractorOps
}
test("aggregates") {
compareOutputWithStd(_.aggregates)(Array(1, 2, 3, 4, 5))
}
test("sortBy") {
val in = Array((2, 1), (3, 2), (1, 3), (5, 4), (4, 5))
compareOutputWithStd(_.sortBy, progStagedOnly)(in)
//TODO: ArraySortBy is unsupported in CxxShptrCodegen
}
test("fillArrayBuffer") {
val in = Array(1, 2, 3, 4, 5, 6, 7, 8, 9)
compareOutputWithStd(_.fillArrayBuffer, progStagedOnly)(in)
//fixme error http://10.122.85.33:81/scalan-lite/scalan-lite-public/issues/49
}
test("makeArrayBuffer") {
val in = Array(1, 2, 3, 4, 5, 6, 7, 8, 9)
compareOutputWithStd(_.makeArrayBuffer, progStagedOnly)(in)
}
test("emptyNestedUnitArray") {
// Wrong type is generated in SBT/TeamCity, right in IDEA
pending
val in = 3
val seqOut = progStd.emptyNestedUnitArray(in)
println(seqOut)
val Seq(Seq(stgOut)) = getStagedOutput(_.emptyNestedUnitArray, progStagedOnly)(in)
println(stgOut)
}
test("pairIf") {
compareOutputWithStd(_.pairIf, progStagedOnly)(Tuple2(1, Array(1,2,3)))
}
test("arrayUpdateMany") {
val arr = Array(1,2,3)
val idx = Array(0,2)
val vls = Array(11, 33)
compareOutputWithStd(_.arrayUpdateMany, progStagedOnly)((arr,(idx,vls)))
}
test("applyLambda2Array") {
//FIXME: applying lambda to Array don't compile in Uni compiler (see issue #50)
compareOutputWithStd(_.applyLambda2Array, progStagedOnly)(Array(1, 2, 3, 4))
}
test("listRangeFrom0") {
compareOutputWithStd(_.listRangeFrom0, progStagedOnly)(3)
}
test("stringCompare") {
val in = ("abc", "abc")
compareOutputWithStd(_.stringCompare, progStagedOnly)(in)
}
test("stringMax") {
val in = ("abc", "abc")
compareOutputWithStd(_.stringMax, progStagedOnly)(in)
}
test("randoms") {
val (i, d) = (3, 3.14)
val in = (i, d)
val Seq(Seq((ri, rd))) = getStagedOutput(_.randoms, progStagedOnly)(in)
assert(ri >= 0 && ri < i)
assert(rd >= 0.0 && rd <= d)
}
}
| scalan/scalan | lms-backend/core/src/it/scala/scalan/it/lms/LmsSmokeItTests.scala | Scala | apache-2.0 | 3,204 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.aliyun.odps
import java.text.SimpleDateFormat
import com.aliyun.odps.account.AliyunAccount
import com.aliyun.odps.data.Record
import com.aliyun.odps.tunnel.TableTunnel
import com.aliyun.odps.tunnel.io.TunnelRecordWriter
import com.aliyun.odps.{Column, Odps, OdpsException, OdpsType, Partition, PartitionSpec, TableSchema}
import org.apache.spark.aliyun.utils.OdpsUtils
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.java.function.{Function2 => JFunction2, Function3 => JFunction3}
import org.apache.spark.executor.{DataWriteMethod, OutputMetrics}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StructField, _}
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{Logging, SparkContext, TaskContext}
import scala.reflect.ClassTag
class OdpsOps(@transient sc: SparkContext, accessKeyId: String,
accessKeySecret: String, odpsUrl: String, tunnelUrl: String)
extends Logging with Serializable {
@transient val account = new AliyunAccount(accessKeyId, accessKeySecret)
@transient val odps = new Odps(account)
odps.setEndpoint(odpsUrl)
@transient val tunnel = new TableTunnel(odps)
tunnel.setEndpoint(tunnelUrl)
@transient val odpsUtils = new OdpsUtils(odps)
val dateFormat = new SimpleDateFormat("yyyy-MM-dd")
def fakeClassTag[T]: ClassTag[T] = ClassTag.AnyRef.asInstanceOf[ClassTag[T]]
/**
* Read table from ODPS.
* {{{
* OdpsOps odpsOps = ...
* static class RecordToLongs implements Function2<Record, TableSchema, List<Long>> {
* @Override
* public List<Long> call(Record record, TableSchema schema) throws Exception {
* List<Long> ret = new ArrayList<Long>();
* for (int i = 0; i < schema.getColumns().size(); i++) {
* ret.add(Long.valueOf(record.getString(i)));
* }
* return ret;
* }
* }
*
* JavaRDD<List<Long>> readData = odpsOps.readTableWithJava("odps-project",
* "odps-table", "odps-partition", new RecordToLongs(), 2);
* }}}
* @param project The name of ODPS project.
* @param table The name of table, which job are reading.
* @param partition The name of partition, when job is reading a
* `Partitioned Table`, like pt='xxx',ds='yyy'.
* @param transfer A function for transferring ODPS table to
* [[org.apache.spark.api.java.JavaRDD]]. We apply the function to all
* [[com.aliyun.odps.data.Record]] of table.
* @param numPartition The number of RDD partition, implying the concurrency to
* read ODPS table.
* @return A JavaRDD which contains all records of ODPS table.
*/
def readTableWithJava[R](
project: String,
table: String,
partition: String,
transfer: JFunction2[Record, TableSchema, R],
numPartition: Int): JavaRDD[R] = {
new JavaRDD(
readTable(project, table, partition,
(record: Record, schema: TableSchema) => transfer.call(record, schema),
numPartition)(fakeClassTag))(fakeClassTag)
}
/**
* Read table from ODPS.
* {{{
* OdpsOps odpsOps = ...
* static class RecordToLongs implements Function2<Record, TableSchema, List<Long>> {
* @Override
* public List<Long> call(Record record, TableSchema schema) throws Exception {
* List<Long> ret = new ArrayList<Long>();
* for (int i = 0; i < schema.getColumns().size(); i++) {
* ret.add(Long.valueOf(record.getString(i)));
* }
* return ret;
* }
* }
*
* JavaRDD<List<Long>> readData = odpsOps.readTableWithJava("odps-project",
* "odps-table", new RecordToLongs(), 2);
* }}}
* @param project The name of ODPS project.
* @param table The name of table from which the job is reading
* @param transfer A function for transferring ODPS table to
* [[org.apache.spark.api.java.JavaRDD]]. We apply the function to all
* [[com.aliyun.odps.data.Record]] of table.
* @param numPartition The number of RDD partition, implying the concurrency
* to read ODPS table.
* @return A JavaRDD which contains all records of ODPS table.
*/
def readTableWithJava[R](
project: String,
table: String,
transfer: JFunction2[Record, TableSchema, R],
numPartition: Int): JavaRDD[R] = {
new JavaRDD(
readTable(project, table,
(record: Record, schema: TableSchema) => transfer.call(record, schema),
numPartition)(fakeClassTag))(fakeClassTag)
}
/**
* Save a RDD to ODPS table.
* {{{
* OdpsOps odpsOps = ...
* JavaRDD<List<Long>> data = ...
* static class SaveRecord implements Function3<List<Long>,
* Record, TableSchema, BoxedUnit> {
* @Override
* public BoxedUnit call(List<Long> data, Record record, TableSchema schema)
* throws Exception {
* for (int i = 0; i < schema.getColumns().size(); i++) {
* record.setString(i, data.get(i).toString());
* }
* return null;
* }
* }
*
* odpsOps.saveToTableWithJava("odps-project", "odps-table",
* "odps-partition", data, new SaveRecord());
* }}}
* @param project The name of ODPS project.
* @param table The name of table to which the job is writing.
* @param partition The name of partition, when job is writing a
* `Partitioned Table`, like pt='xxx',ds='yyy'.
* @param javaRdd A [[org.apache.spark.api.java.JavaRDD]] which will be
* written into a ODPS table.
* @param transfer A function for transferring
* [[org.apache.spark.api.java.JavaRDD]] to ODPS table. We apply the
* function to all elements of JavaRDD.
*/
def saveToTableWithJava[T](
project: String,
table: String,
partition: String,
javaRdd: JavaRDD[T],
transfer: JFunction3[T, Record, TableSchema, Unit]) {
saveToTable(project, table, partition, javaRdd.rdd,
(t: T, record: Record, schema: TableSchema) => transfer.call(t, record,
schema),
false, false)(fakeClassTag)
}
/**
* Save a RDD to ODPS table.
* {{{
* OdpsOps odpsOps = ...
* JavaRDD<List<Long>> data = ...
* static class SaveRecord implements Function3<List<Long>, Record,
* TableSchema, BoxedUnit> {
* @Override
* public BoxedUnit call(List<Long> data, Record record, TableSchema schema)
* throws Exception {
* for (int i = 0; i < schema.getColumns().size(); i++) {
* record.setString(i, data.get(i).toString());
* }
* return null;
* }
* }
*
* odpsOps.saveToTableWithJava("odps-project", "odps-table",
* "odps-partition", data, new SaveRecord(), false);
* }}}
* @param project The name of ODPS project.
* @param table The name of table to which the job is writing.
* @param partition The name of partition, when job is writing a
* `Partitioned Table`, like pt='xxx',ds='yyy'.
* @param javaRdd A [[org.apache.spark.api.java.JavaRDD]] which will be
* written into a ODPS table.
* @param transfer A function for transferring
* [[org.apache.spark.api.java.JavaRDD]] to ODPS table.We apply the
* function to all elements of JavaRDD.
* @param defaultCreate Implying whether to create a table partition, if
* specific partition does not exist.
*/
def saveToTableWithJava[T](
project: String,
table: String,
partition: String,
javaRdd: JavaRDD[T],
transfer: JFunction3[T, Record, TableSchema, Unit],
defaultCreate: Boolean) {
saveToTable(project, table, partition, javaRdd.rdd,
(t: T, record: Record, schema: TableSchema) => transfer.call(t, record, schema),
defaultCreate, false)(fakeClassTag)
}
/**
* Save a RDD to ODPS table.
* {{{
* OdpsOps odpsOps = ...
* JavaRDD<List<Long>> data = ...
* static class SaveRecord implements Function3<List<Long>, Record,
* TableSchema, BoxedUnit> {
* @Override
* public BoxedUnit call(List<Long> data, Record record, TableSchema schema)
* throws Exception {
* for (int i = 0; i < schema.getColumns().size(); i++) {
* record.setString(i, data.get(i).toString());
* }
* return null;
* }
* }
*
* odpsOps.saveToTableWithJava("odps-project", "odps-table", "odps-partition",
* data, new SaveRecord(), false, false);
* }}}
* @param project The name of ODPS project.
* @param table The name of table to which the job is writing.
* @param partition The name of partition, when job is writing a
* `Partitioned Table`, like pt='xxx',ds='yyy'.
* @param javaRdd A [[org.apache.spark.api.java.JavaRDD]] which will be written
* into a ODPS table.
* @param transfer A function for transferring
* [[org.apache.spark.api.java.JavaRDD]] to ODPS table. We apply the
* function to all elements of JavaRDD.
* @param defaultCreate Implying whether to create a table partition, if the
* specific partition does not exist.
* @param overwrite Implying whether to overwrite the specific partition if exists.
* NOTE: only support overwriting partition, not table.
*/
def saveToTableWithJava[T](
project: String,
table: String,
partition: String,
javaRdd: JavaRDD[T],
transfer: JFunction3[T, Record, TableSchema, Unit],
defaultCreate: Boolean,
overwrite: Boolean) {
saveToTable(project, table, partition, javaRdd.rdd,
(t: T, record: Record, schema: TableSchema) => transfer.call(t, record,
schema),
defaultCreate, overwrite)(fakeClassTag)
}
/**
* Save a RDD to ODPS table.
* {{{
* OdpsOps odpsOps = ...
* JavaRDD<List<Long>> data = ...
* static class SaveRecord implements Function3<List<Long>, Record,
* TableSchema, BoxedUnit> {
* @Override
* public BoxedUnit call(List<Long> data, Record record, TableSchema schema)
* throws Exception {
* for (int i = 0; i < schema.getColumns().size(); i++) {
* record.setString(i, data.get(i).toString());
* }
* return null;
* }
* }
*
* odpsOps.saveToTableWithJava("odps-project", "odps-table", data,
* new SaveRecord());
* }}}
* @param project The name of ODPS project.
* @param table The name of table to which the job is writing.
* @param javaRdd A [[org.apache.spark.api.java.JavaRDD]] which will be
* written into a ODPS table.
* @param transfer A function for transferring
* [[org.apache.spark.api.java.JavaRDD]] to ODPS table. We apply the
* function to all elements of JavaRDD.
*/
def saveToTableWithJava[T](
project: String,
table: String,
javaRdd: JavaRDD[T],
transfer: JFunction3[T, Record, TableSchema, Unit]) {
saveToTable(project, table, javaRdd.rdd,
(t: T, record: Record, schema: TableSchema) =>
transfer.call(t, record, schema))(fakeClassTag)
}
/**
* Read table from ODPS.
* {{{
* val odpsOps = ...
* val odpsTable = odpsOps.readTable("odps-project", "odps-table",
* "odps-partition", readFunc, 2)
*
* def readFunc(record: Record, schema: TableSchema): Array[Long] = {
* val ret = new Array[Long](schema.getColumns.size())
* for (i <- 0 until schema.getColumns.size()) {
* ret(i) = record.getString(i).toLong
* }
* ret
* }
* }}}
* @param project The name of ODPS project.
* @param table The name of table, which job is reading.
* @param partition The name of partition, when job is reading a
* `Partitioned Table`, like pt='xxx',ds='yyy'.
* @param transfer A function for transferring ODPS table to
* [[org.apache.spark.rdd.RDD]]. We apply the function to all
* [[com.aliyun.odps.data.Record]] of table.
* @param numPartition The number of RDD partition, implying the concurrency
* to read ODPS table.
* @return A RDD which contains all records of ODPS table.
*/
@unchecked
def readTable[T: ClassTag](
project: String,
table: String,
partition: String,
transfer: (Record, TableSchema) => T,
numPartition: Int): RDD[T] = {
val func = sc.clean(transfer)
if(!partition.equals("all")) {
new OdpsRDD[T](sc, accessKeyId, accessKeySecret, odpsUrl, tunnelUrl,
project, table, partition, numPartition, func)
} else {
odpsUtils.getAllPartitionSpecs(table, project).map(ptSpec => {
new OdpsRDD[T](sc, accessKeyId, accessKeySecret, odpsUrl, tunnelUrl,
project, table, ptSpec.toString, numPartition, func)
}).map(_.asInstanceOf[RDD[T]]).reduce((r1, r2) => r1.union(r2))
}
}
/**
* Read table from ODPS.
* {{{
* val odpsOps = ...
* val odpsTable = odpsOps.readTable("odps-project", "odps-table", readFunc, 2)
*
* def readFunc(record: Record, schema: TableSchema): Array[Long] = {
* val ret = new Array[Long](schema.getColumns.size())
* for (i <- 0 until schema.getColumns.size()) {
* ret(i) = record.getString(i).toLong
* }
* ret
* }
* }}}
* @param project The name of ODPS project.
* @param table The name of table, which job is reading.
* @param transfer A function for transferring ODPS table to
* [[org.apache.spark.rdd.RDD]]. We apply the function to all
* [[com.aliyun.odps.data.Record]] of table.
* @param numPartition The number of RDD partition, implying the concurrency
* to read ODPS table.
* @return A RDD which contains all records of ODPS table.
*/
@unchecked
def readTable[T: ClassTag](
project: String,
table: String,
transfer: (Record, TableSchema) => T,
numPartition: Int): RDD[T] = {
val func = sc.clean(transfer)
new OdpsRDD[T](sc, accessKeyId, accessKeySecret, odpsUrl, tunnelUrl,
project, table, numPartition, func)
}
/**
* Load ODPS table into [[org.apache.spark.sql.DataFrame]].
* {{{
* val sqlContext = ...
* val odpsOps = ...
* val odpstableDF = odpsOps.loadOdpsTable(sqlContext, "odps-project",
* "odps-table", "odps-partition", Array(0, 2, 3), 2)
* }}}
* @param sqlContext A Spark SQL context
* @param project The name of ODPS project.
* @param table The name of table, which job is reading.
* @param partition The name of partition, when job is reading a
* `Partitioned Table`, like pt='xxx',ds='yyy'.
* @param cols Implying to load which columns
* @param numPartition The number of RDD partition, implying the concurrency
* to read ODPS table.
* @return A DataFrame which contains relevant records of ODPS table.
*/
def loadOdpsTable(
sqlContext: SQLContext,
project: String,
table: String,
partition: String,
cols: Array[Int],
numPartition: Int): DataFrame = {
val colsLen = odpsUtils.getTableSchema(project, table, false).length
val schema = prepareSchema(cols, colsLen, project, table, false)
val cols_ = prepareCols(cols, colsLen)
val rdd = readTable(project, table, partition, readTransfer(cols_),
numPartition).map(e => { Row.fromSeq(e.toSeq) })
sqlContext.createDataFrame(rdd, schema)
}
/**
* Load ODPS table into [[org.apache.spark.sql.DataFrame]].
* {{{
* val sqlContext = ...
* val odpsOps = ...
* val odpstableDF = odpsOps.loadOdpsTable(sqlContext, "odps-project",
* "odps-table", Array(0, 2, 3), 2)
* }}}
* @param sqlContext A Spark SQL context
* @param project The name of ODPS project.
* @param table The name of table, which job is reading.
* @param cols Implying to load which columns, i.e. Array(0, 1, 3).
* @param numPartition The number of RDD partition, implying the concurrency
* to read ODPS table.
* @return A DataFrame which contains relevant records of ODPS table.
*/
def loadOdpsTable(
sqlContext: SQLContext,
project: String,
table: String,
cols: Array[Int],
numPartition: Int): DataFrame = {
val colsLen = odpsUtils.getTableSchema(project, table, false).length
val schema = prepareSchema(cols, colsLen, project, table, false)
val cols_ = prepareCols(cols, colsLen)
val rdd = readTable(project, table, readTransfer(cols_), numPartition)
.map(e => { Row.fromSeq(e.toSeq) })
sqlContext.createDataFrame(rdd, schema)
}
/**
* Save a RDD to ODPS table.
* {{{
* val odpsOps = ...
* val data: RDD[Array[Long]] = ...
* odps.saveToTable("odps-project", "odps-table", "odps-partition", data,
* writeFunc)
*
* def writeFunc(kv: Array[Long], record: Record, schema: TableSchema) {
* for (i <- 0 until schema.getColumns.size()) {
* record.setString(i, kv(i).toString)
* }
* }
* }}}
* @param project The name of ODPS project.
* @param table The name of table, which job is writing.
* @param partition The name of partition, when job is writing a
* `Partitioned Table`, like pt='xxx',ds='yyy'.
* @param rdd A [[org.apache.spark.rdd.RDD]] which will be written into a
* ODPS table.
* @param transfer A function for transferring [[org.apache.spark.rdd.RDD]]
* to ODPS table. We apply the function to all elements of RDD.
*/
def saveToTable[T: ClassTag](
project: String,
table: String,
partition: String,
rdd: RDD[T],
transfer: (T, Record, TableSchema) => Unit) {
saveToTable(project, table, partition, rdd, transfer, false, false)
}
/**
* Save a RDD to ODPS table.
* {{{
* val odpsOps = ...
* val data: RDD[Array[Long]] = ...
* odps.saveToTable("odps-project", "odps-table", "odps-partition", data,
* writeFunc, false)
*
* def writeFunc(kv: Array[Long], record: Record, schema: TableSchema) {
* for (i <- 0 until schema.getColumns.size()) {
* record.setString(i, kv(i).toString)
* }
* }
* }}}
* @param project The name of ODPS project.
* @param table The name of table, which job is writing.
* @param partition The name of partition, when job is writing a
* `Partitioned Table`, like pt='xxx',ds='yyy'.
* @param rdd A [[org.apache.spark.rdd.RDD]] which will be written into a
* ODPS table.
* @param transfer A function for transferring [[org.apache.spark.rdd.RDD]]
* to ODPS table. We apply the function to all elements of RDD.
* @param defaultCreate Implying whether to create a table partition, if the
* specific partition does not exist.
*/
def saveToTable[T: ClassTag](
project: String,
table: String,
partition: String,
rdd: RDD[T],
transfer: (T, Record, TableSchema) => Unit,
defaultCreate: Boolean) {
saveToTable(project, table, partition, rdd, transfer, defaultCreate, false)
}
/**
* Save a RDD to ODPS table.
* {{{
* val odpsOps = ...
* val data: RDD[Array[Long]] = ...
* odps.saveToTable("odps-project", "odps-table", "odps-partition", data,
* writeFunc, false, false)
*
* def writeFunc(kv: Array[Long], record: Record, schema: TableSchema) {
* for (i <- 0 until schema.getColumns.size()) {
* record.setString(i, kv(i).toString)
* }
* }
* }}}
* @param project The name of ODPS project.
* @param table The name of table, which job is writing.
* @param partition The name of partition, when job is writing a
* `Partitioned Table`, like pt='xxx',ds='yyy'.
* @param rdd A org.apache.spark.rdd.RDD which will be written into a ODPS table.
* @param transfer A function for transferring org.apache.spark.rdd.RDD to
* ODPS table. We apply the function to all elements of RDD.
* @param defaultCreate Implying whether to create a table partition, if the
* specific partition does not exist.
* @param overwrite Implying whether to overwrite the specific partition if
* exists. NOTE: only support overwriting partition, not table.
*/
@unchecked
def saveToTable[T: ClassTag](
project: String,
table: String,
partition: String,
rdd: RDD[T],
transfer: (T, Record, TableSchema) => Unit,
defaultCreate: Boolean,
overwrite: Boolean) {
def transfer0(t: T, record: Record, scheme: TableSchema): Record = {
transfer(t, record, scheme)
record
}
val func = sc.clean(transfer0 _)
odps.setDefaultProject(project)
val partitionSpec = new PartitionSpec(partition)
if(overwrite) {
val success = dropPartition(project, table, partition)
if(!success)
logInfo("delete partition failed.")
createPartition(project, table, partition)
}
if(defaultCreate) {
createPartition(project, table, partition)
}
val uploadSession = tunnel.createUploadSession(project, table, partitionSpec)
logInfo("Odps upload session status is: " + uploadSession.getStatus.toString)
val uploadId = uploadSession.getId
def writeToFile(context: TaskContext, iter: Iterator[T]) {
val account_ = new AliyunAccount(accessKeyId, accessKeySecret)
val odps_ = new Odps(account_)
odps_.setDefaultProject(project)
odps_.setEndpoint(odpsUrl)
val tunnel_ = new TableTunnel(odps_)
tunnel_.setEndpoint(tunnelUrl)
val partitionSpec_ = new PartitionSpec(partition)
val uploadSession_ = tunnel_.getUploadSession(project, table,
partitionSpec_, uploadId)
val writer = uploadSession_.openRecordWriter(context.partitionId)
// for odps metrics monitor
var recordsWritten = 0L
val outputMetrics = new OutputMetrics(DataWriteMethod.Hadoop)
context.taskMetrics.outputMetrics = Some(outputMetrics)
while (iter.hasNext) {
val value = iter.next()
logDebug("context id: " + context.partitionId + " write: " + value)
writer.write(func(value, uploadSession_.newRecord(),
uploadSession_.getSchema))
recordsWritten += 1
}
logDebug("ready context id: " + context.partitionId)
writer.close()
logDebug("finish context id: " + context.partitionId)
outputMetrics.setRecordsWritten(recordsWritten)
val totalBytes = writer.asInstanceOf[TunnelRecordWriter].getTotalBytes
outputMetrics.setBytesWritten(totalBytes)
}
sc.runJob(rdd, writeToFile _)
val arr = Array.tabulate(rdd.partitions.length)(l => Long.box(l))
uploadSession.commit(arr)
}
/**
* Save a RDD to ODPS table.
* {{{
* val odpsOps = ...
* val data: RDD[Array[Long]] = ...
* odps.saveToTable("odps-project", "odps-table", data, writeFunc)
*
* def writeFunc(kv: Array[Long], record: Record, schema: TableSchema) {
* for (i <- 0 until schema.getColumns.size()) {
* record.setString(i, kv(i).toString)
* }
* }
* }}}
* @param project The name of ODPS project.
* @param table The name of table, which job is writing.
* @param rdd A org.apache.spark.rdd.RDD which will be written into a ODPS table.
* @param transfer A function for transferring org.apache.spark.rdd.RDD to
* ODPS table. We apply the function to all elements of RDD.
*/
@unchecked
def saveToTable[T: ClassTag](
project: String,
table: String,
rdd: RDD[T],
transfer: (T, Record, TableSchema) => Unit) {
def transfer0(t: T, record: Record, scheme: TableSchema): Record = {
transfer(t, record, scheme)
record
}
val func = sc.clean(transfer0 _)
odps.setDefaultProject(project)
val uploadSession = tunnel.createUploadSession(project, table)
logInfo("Odps upload session status is: " + uploadSession.getStatus.toString)
val uploadId = uploadSession.getId
def writeToFile(context: TaskContext, iter: Iterator[T]) {
val account_ = new AliyunAccount(accessKeyId, accessKeySecret)
val odps_ = new Odps(account_)
odps_.setDefaultProject(project)
odps_.setEndpoint(odpsUrl)
val tunnel_ = new TableTunnel(odps_)
tunnel_.setEndpoint(tunnelUrl)
val uploadSession_ = tunnel_.getUploadSession(project, table, uploadId)
val writer = uploadSession_.openRecordWriter(context.partitionId)
// for odps metrics monitor
var recordsWritten = 0L
val outputMetrics = new OutputMetrics(DataWriteMethod.Hadoop)
context.taskMetrics.outputMetrics = Some(outputMetrics)
while (iter.hasNext) {
val value = iter.next()
logDebug("context id: " + context.partitionId + " write: " + value)
writer.write(func(value, uploadSession_.newRecord(),
uploadSession_.getSchema))
recordsWritten += 1
}
logDebug("ready context id: " + context.partitionId)
writer.close()
logDebug("finish context id: " + context.partitionId)
outputMetrics.setRecordsWritten(recordsWritten)
val totalBytes = writer.asInstanceOf[TunnelRecordWriter].getTotalBytes
outputMetrics.setBytesWritten(totalBytes)
}
sc.runJob(rdd, writeToFile _)
val arr = Array.tabulate(rdd.partitions.length)(l => Long.box(l))
uploadSession.commit(arr)
}
private def prepareCols(cols: Array[Int], columnsLen: Int): Array[Int] = {
if (cols.length == 0) {
Array.range(0, columnsLen)
} else {
cols
}
}
private def prepareSchema(
cols: Array[Int],
columnsLen: Int,
project: String,
table: String,
isPartition: Boolean): StructType = {
val tableSchema = odpsUtils.getTableSchema(project, table, isPartition)
val cols_ = if (cols.length == 0) {
Array.range(0, columnsLen)
} else {
cols
}.sorted
StructType(
cols_.map(e => {
tableSchema(e)._2 match {
case "BIGINT" => StructField(tableSchema(e)._1, LongType, true)
case "STRING" => StructField(tableSchema(e)._1, StringType, true)
case "DOUBLE" => StructField(tableSchema(e)._1, DoubleType, true)
case "BOOLEAN" => StructField(tableSchema(e)._1, BooleanType, true)
case "DATETIME" => StructField(tableSchema(e)._1, TimestampType, true)
}
})
)
}
private def readTransfer(cols: Array[Int])(record: Record, schema: TableSchema):
Array[_] = {
cols.sorted.map { idx =>
val col = schema.getColumn(idx)
col.getType match {
case OdpsType.BIGINT => record.getBigint(idx)
case OdpsType.DOUBLE => record.getDouble(idx)
case OdpsType.BOOLEAN => record.getBoolean(idx)
case OdpsType.DATETIME =>
val dt = record.getDatetime(idx)
if (dt != null) {
new java.sql.Timestamp(dt.getTime)
} else null
case OdpsType.STRING => record.getString(idx)
}
}
}
def getTableSchema(project: String, table: String, isPartition: Boolean):
Array[(String, String)] = {
odps.setDefaultProject(project)
val schema = odps.tables().get(table).getSchema
val columns = if (isPartition) schema.getPartitionColumns else schema.getColumns
columns.toArray(new Array[Column](0)).map(e => {
val name = e.getName
val colType = e.getType match {
case OdpsType.BIGINT => "BIGINT"
case OdpsType.DOUBLE => "DOUBLE"
case OdpsType.BOOLEAN => "BOOLEAN"
case OdpsType.DATETIME => "DATETIME"
case OdpsType.STRING => "STRING"
}
(name, colType)
})
}
def getColumnByName(project: String, table: String, name: String):
(String, String) = {
odps.setDefaultProject(project)
val schema = odps.tables().get(table).getSchema
val idx = schema.getColumnIndex(name)
val colType = schema.getColumn(name).getType match {
case OdpsType.BIGINT => "BIGINT"
case OdpsType.DOUBLE => "DOUBLE"
case OdpsType.BOOLEAN => "BOOLEAN"
case OdpsType.DATETIME => "DATETIME"
case OdpsType.STRING => "STRING"
}
(idx.toString, colType)
}
def getColumnByIdx(project: String, table: String, idx: Int):
(String, String) = {
odps.setDefaultProject(project)
val schema = odps.tables().get(table).getSchema
val column = schema.getColumn(idx)
val name = column.getName
val colType = column.getType match {
case OdpsType.BIGINT => "BIGINT"
case OdpsType.DOUBLE => "DOUBLE"
case OdpsType.BOOLEAN => "BOOLEAN"
case OdpsType.DATETIME => "DATETIME"
case OdpsType.STRING => "STRING"
}
(name, colType)
}
private def checkTableAndPartition(
project: String,
table: String,
pname: String): (Boolean, Boolean) = {
val partitionSpec_ = new PartitionSpec(pname)
odps.setDefaultProject(project)
val tables = odps.tables()
val tableExist = tables.exists(table)
if(!tableExist) {
logWarning("table " + table + " do not exist!")
return (false, false)
}
val partitions = tables.get(table).getPartitions
val partitionFilter = partitions.toArray(new Array[Partition](0)).iterator
.map(e => e.getPartitionSpec)
.filter(f => f.toString.equals(partitionSpec_.toString))
val partitionExist = if(partitionFilter.size == 0) false else true
if(partitionExist) {
(true, true)
} else {
(true, false)
}
}
private def dropPartition(
project: String,
table: String,
pname: String): Boolean = {
try {
val (_, partitionE) = checkTableAndPartition(project, table, pname)
if(!partitionE)
return true
odps.setDefaultProject(project)
val partitionSpec = new PartitionSpec(pname)
odps.tables().get(table).deletePartition(partitionSpec)
true
} catch {
case e: OdpsException =>
logError("somethings wrong happens when delete partition " +
pname + " of " + table + ".")
logError(e.getMessage)
false
}
}
private def dropTable(
project: String,
table: String): Boolean = {
try {
val (tableE, _) = checkTableAndPartition(project, table, "random")
if(!tableE)
return true
odps.setDefaultProject(project)
odps.tables().delete(table)
true
} catch {
case e: OdpsException =>
logError("somethings wrong happens when delete table " + table + ".")
logError(e.getMessage)
false
}
}
private def createPartition(
project: String,
table: String,
pname: String): Boolean = {
val partitionSpec_ = new PartitionSpec(pname)
val (tableE, partitionE) = checkTableAndPartition(project, table, pname)
if(!tableE) {
logWarning("table " + table + " do not exist, FAILED.")
return false
} else if(partitionE) {
logWarning("table " + table + " partition " + pname +
" exist, no need to create.")
return true
}
try {
odps.tables().get(table).createPartition(partitionSpec_)
} catch {
case e: OdpsException =>
logError("somethings wrong happens when create table " + table +
" partition " + pname + ".")
return false
}
true
}
}
object OdpsOps {
def apply(
sc: SparkContext,
accessKeyId: String,
accessKeySecret: String,
odpsUrl: String,
tunnelUrl: String) = {
new OdpsOps(sc, accessKeyId, accessKeySecret, odpsUrl, tunnelUrl)
}
} | aliyun/aliyun-spark-sdk | external/emr-maxcompute/src/main/scala/org/apache/spark/aliyun/odps/OdpsOps.scala | Scala | artistic-2.0 | 33,361 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.File
import java.util.concurrent.{Executors, TimeUnit}
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.metrics.source.HiveCatalogMetrics
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.execution.datasources.FileStatusCache
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SQLTestUtils
class PartitionedTablePerfStatsSuite
extends QueryTest with TestHiveSingleton with SQLTestUtils with BeforeAndAfterEach {
override def beforeEach(): Unit = {
super.beforeEach()
FileStatusCache.resetForTesting()
}
override def afterEach(): Unit = {
super.afterEach()
FileStatusCache.resetForTesting()
}
private case class TestSpec(setupTable: (String, File) => Unit, isDatasourceTable: Boolean)
/**
* Runs a test against both converted hive and native datasource tables. The test can use the
* passed TestSpec object for setup and inspecting test parameters.
*/
private def genericTest(testName: String)(fn: TestSpec => Unit): Unit = {
test("hive table: " + testName) {
fn(TestSpec(setupPartitionedHiveTable, false))
}
test("datasource table: " + testName) {
fn(TestSpec(setupPartitionedDatasourceTable, true))
}
}
private def setupPartitionedHiveTable(tableName: String, dir: File): Unit = {
setupPartitionedHiveTable(tableName, dir, 5)
}
private def setupPartitionedHiveTable(
tableName: String, dir: File, scale: Int, repair: Boolean = true): Unit = {
spark.range(scale).selectExpr("id as fieldOne", "id as partCol1", "id as partCol2").write
.partitionBy("partCol1", "partCol2")
.mode("overwrite")
.parquet(dir.getAbsolutePath)
spark.sql(s"""
|create external table $tableName (fieldOne long)
|partitioned by (partCol1 int, partCol2 int)
|stored as parquet
|location "${dir.toURI}"""".stripMargin)
if (repair) {
spark.sql(s"msck repair table $tableName")
}
}
private def setupPartitionedDatasourceTable(tableName: String, dir: File): Unit = {
setupPartitionedDatasourceTable(tableName, dir, 5)
}
private def setupPartitionedDatasourceTable(
tableName: String, dir: File, scale: Int, repair: Boolean = true): Unit = {
spark.range(scale).selectExpr("id as fieldOne", "id as partCol1", "id as partCol2").write
.partitionBy("partCol1", "partCol2")
.mode("overwrite")
.parquet(dir.getAbsolutePath)
spark.sql(s"""
|create table $tableName (fieldOne long, partCol1 int, partCol2 int)
|using parquet
|options (path "${dir.toURI}")
|partitioned by (partCol1, partCol2)""".stripMargin)
if (repair) {
spark.sql(s"msck repair table $tableName")
}
}
genericTest("partitioned pruned table reports only selected files") { spec =>
assert(spark.sqlContext.getConf(HiveUtils.CONVERT_METASTORE_PARQUET.key) == "true")
withTable("test") {
withTempDir { dir =>
spec.setupTable("test", dir)
val df = spark.sql("select * from test")
assert(df.count() == 5)
assert(df.inputFiles.length == 5) // unpruned
val df2 = spark.sql("select * from test where partCol1 = 3 or partCol2 = 4")
assert(df2.count() == 2)
assert(df2.inputFiles.length == 2) // pruned, so we have less files
val df3 = spark.sql("select * from test where PARTCOL1 = 3 or partcol2 = 4")
assert(df3.count() == 2)
assert(df3.inputFiles.length == 2)
val df4 = spark.sql("select * from test where partCol1 = 999")
assert(df4.count() == 0)
assert(df4.inputFiles.length == 0)
val df5 = spark.sql("select * from test where fieldOne = 4")
assert(df5.count() == 1)
assert(df5.inputFiles.length == 5)
}
}
}
genericTest("lazy partition pruning reads only necessary partition data") { spec =>
withSQLConf(
SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true",
SQLConf.HIVE_FILESOURCE_PARTITION_FILE_CACHE_SIZE.key -> "0") {
withTable("test") {
withTempDir { dir =>
spec.setupTable("test", dir)
HiveCatalogMetrics.reset()
spark.sql("select * from test where partCol1 = 999").count()
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 0)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 0)
HiveCatalogMetrics.reset()
spark.sql("select * from test where partCol1 < 2").count()
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 2)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 2)
HiveCatalogMetrics.reset()
spark.sql("select * from test where partCol1 < 3").count()
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 3)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 3)
// should read all
HiveCatalogMetrics.reset()
spark.sql("select * from test").count()
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 5)
// read all should not be cached
HiveCatalogMetrics.reset()
spark.sql("select * from test").count()
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 5)
// cache should be disabled
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
}
}
}
}
genericTest("lazy partition pruning with file status caching enabled") { spec =>
withSQLConf(
SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true",
SQLConf.HIVE_FILESOURCE_PARTITION_FILE_CACHE_SIZE.key -> "9999999") {
withTable("test") {
withTempDir { dir =>
spec.setupTable("test", dir)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test where partCol1 = 999").count() == 0)
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 0)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 0)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test where partCol1 < 2").count() == 2)
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 2)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 2)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test where partCol1 < 3").count() == 3)
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 3)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 1)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 2)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test").count() == 5)
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 2)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 3)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test").count() == 5)
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 0)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 5)
}
}
}
}
genericTest("file status caching respects refresh table and refreshByPath") { spec =>
withSQLConf(
SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true",
SQLConf.HIVE_FILESOURCE_PARTITION_FILE_CACHE_SIZE.key -> "9999999") {
withTable("test") {
withTempDir { dir =>
spec.setupTable("test", dir)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test").count() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
HiveCatalogMetrics.reset()
spark.sql("refresh table test")
assert(spark.sql("select * from test").count() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
spark.catalog.cacheTable("test")
HiveCatalogMetrics.reset()
spark.catalog.refreshByPath(dir.getAbsolutePath)
assert(spark.sql("select * from test").count() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
}
}
}
}
genericTest("file status cache respects size limit") { spec =>
withSQLConf(
SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true",
SQLConf.HIVE_FILESOURCE_PARTITION_FILE_CACHE_SIZE.key -> "1" /* 1 byte */) {
withTable("test") {
withTempDir { dir =>
spec.setupTable("test", dir)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test").count() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
assert(spark.sql("select * from test").count() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 10)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
}
}
}
}
test("datasource table: table setup does not scan filesystem") {
withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true") {
withTable("test") {
withTempDir { dir =>
HiveCatalogMetrics.reset()
setupPartitionedDatasourceTable("test", dir, scale = 10, repair = false)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 0)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
}
}
}
}
test("hive table: table setup does not scan filesystem") {
withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true") {
withTable("test") {
withTempDir { dir =>
HiveCatalogMetrics.reset()
setupPartitionedHiveTable("test", dir, scale = 10, repair = false)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 0)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
}
}
}
}
test("hive table: num hive client calls does not scale with partition count") {
withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true") {
withTable("test") {
withTempDir { dir =>
setupPartitionedHiveTable("test", dir, scale = 100)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test where partCol1 = 1").count() == 1)
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() > 0)
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() < 10)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test").count() == 100)
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() < 10)
HiveCatalogMetrics.reset()
assert(spark.sql("show partitions test").count() == 100)
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() <= 10)
}
}
}
}
test("datasource table: num hive client calls does not scale with partition count") {
withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true") {
withTable("test") {
withTempDir { dir =>
setupPartitionedDatasourceTable("test", dir, scale = 100)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test where partCol1 = 1").count() == 1)
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() > 0)
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() < 10)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test").count() == 100)
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() < 10)
HiveCatalogMetrics.reset()
assert(spark.sql("show partitions test").count() == 100)
assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() <= 10)
}
}
}
}
test("hive table: files read and cached when filesource partition management is off") {
withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "false") {
withTable("test") {
withTempDir { dir =>
setupPartitionedHiveTable("test", dir)
// We actually query the partitions from hive each time the table is resolved in this
// mode. This is kind of terrible, but is needed to preserve the legacy behavior
// of doing plan cache validation based on the entire partition set.
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test where partCol1 = 999").count() == 0)
// 5 from table resolution, another 5 from InMemoryFileIndex
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 10)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 5)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test where partCol1 < 2").count() == 2)
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 0)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test").count() == 5)
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 5)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 0)
}
}
}
}
test("datasource table: all partition data cached in memory when partition management is off") {
withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "false") {
withTable("test") {
withTempDir { dir =>
setupPartitionedDatasourceTable("test", dir)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test where partCol1 = 999").count() == 0)
// not using metastore
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 0)
// reads and caches all the files initially
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 5)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test where partCol1 < 2").count() == 2)
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 0)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 0)
HiveCatalogMetrics.reset()
assert(spark.sql("select * from test").count() == 5)
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 0)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 0)
}
}
}
}
test("SPARK-18700: table loaded only once even when resolved concurrently") {
withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "false") {
withTable("test") {
withTempDir { dir =>
setupPartitionedHiveTable("test", dir, 50)
HiveCatalogMetrics.reset()
// select the table in multi-threads
val executorPool = Executors.newFixedThreadPool(10)
(1 to 10).map(threadId => {
val runnable = new Runnable {
override def run(): Unit = {
spark.sql("select * from test where partCol1 = 999").count()
}
}
executorPool.execute(runnable)
None
})
executorPool.shutdown()
executorPool.awaitTermination(30, TimeUnit.SECONDS)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 50)
assert(HiveCatalogMetrics.METRIC_PARALLEL_LISTING_JOB_COUNT.getCount() == 1)
}
}
}
}
test("resolveRelation for a FileFormat DataSource without userSchema scan filesystem only once") {
withTempDir { dir =>
import spark.implicits._
Seq(1).toDF("a").write.mode("overwrite").save(dir.getAbsolutePath)
HiveCatalogMetrics.reset()
spark.read.load(dir.getAbsolutePath)
assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 1)
assert(HiveCatalogMetrics.METRIC_FILE_CACHE_HITS.getCount() == 0)
}
}
}
| maropu/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionedTablePerfStatsSuite.scala | Scala | apache-2.0 | 17,736 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.xml
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.TypeCheckFailure
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.util.GenericArrayData
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* Base class for xpath_boolean, xpath_double, xpath_int, etc.
*
* This is not the world's most efficient implementation due to type conversion, but works.
*/
abstract class XPathExtract extends BinaryExpression with ExpectsInputTypes with CodegenFallback {
override def left: Expression = xml
override def right: Expression = path
/** XPath expressions are always nullable, e.g. if the xml string is empty. */
override def nullable: Boolean = true
override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType)
override def checkInputDataTypes(): TypeCheckResult = {
if (!path.foldable) {
TypeCheckFailure("path should be a string literal")
} else {
super.checkInputDataTypes()
}
}
@transient protected lazy val xpathUtil = new UDFXPathUtil
@transient protected lazy val pathString: String = path.eval().asInstanceOf[UTF8String].toString
/** Concrete implementations need to override the following three methods. */
def xml: Expression
def path: Expression
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(xml, xpath) - Returns true if the XPath expression evaluates to true, or if a matching node is found.",
examples = """
Examples:
> SELECT _FUNC_('<a><b>1</b></a>','a/b');
true
""")
// scalastyle:on line.size.limit
case class XPathBoolean(xml: Expression, path: Expression) extends XPathExtract {
override def prettyName: String = "xpath_boolean"
override def dataType: DataType = BooleanType
override def nullSafeEval(xml: Any, path: Any): Any = {
xpathUtil.evalBoolean(xml.asInstanceOf[UTF8String].toString, pathString)
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(xml, xpath) - Returns a short integer value, or the value zero if no match is found, or a match is found but the value is non-numeric.",
examples = """
Examples:
> SELECT _FUNC_('<a><b>1</b><b>2</b></a>', 'sum(a/b)');
3
""")
// scalastyle:on line.size.limit
case class XPathShort(xml: Expression, path: Expression) extends XPathExtract {
override def prettyName: String = "xpath_short"
override def dataType: DataType = ShortType
override def nullSafeEval(xml: Any, path: Any): Any = {
val ret = xpathUtil.evalNumber(xml.asInstanceOf[UTF8String].toString, pathString)
if (ret eq null) null else ret.shortValue()
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(xml, xpath) - Returns an integer value, or the value zero if no match is found, or a match is found but the value is non-numeric.",
examples = """
Examples:
> SELECT _FUNC_('<a><b>1</b><b>2</b></a>', 'sum(a/b)');
3
""")
// scalastyle:on line.size.limit
case class XPathInt(xml: Expression, path: Expression) extends XPathExtract {
override def prettyName: String = "xpath_int"
override def dataType: DataType = IntegerType
override def nullSafeEval(xml: Any, path: Any): Any = {
val ret = xpathUtil.evalNumber(xml.asInstanceOf[UTF8String].toString, pathString)
if (ret eq null) null else ret.intValue()
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(xml, xpath) - Returns a long integer value, or the value zero if no match is found, or a match is found but the value is non-numeric.",
examples = """
Examples:
> SELECT _FUNC_('<a><b>1</b><b>2</b></a>', 'sum(a/b)');
3
""")
// scalastyle:on line.size.limit
case class XPathLong(xml: Expression, path: Expression) extends XPathExtract {
override def prettyName: String = "xpath_long"
override def dataType: DataType = LongType
override def nullSafeEval(xml: Any, path: Any): Any = {
val ret = xpathUtil.evalNumber(xml.asInstanceOf[UTF8String].toString, pathString)
if (ret eq null) null else ret.longValue()
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(xml, xpath) - Returns a float value, the value zero if no match is found, or NaN if a match is found but the value is non-numeric.",
examples = """
Examples:
> SELECT _FUNC_('<a><b>1</b><b>2</b></a>', 'sum(a/b)');
3.0
""")
// scalastyle:on line.size.limit
case class XPathFloat(xml: Expression, path: Expression) extends XPathExtract {
override def prettyName: String = "xpath_float"
override def dataType: DataType = FloatType
override def nullSafeEval(xml: Any, path: Any): Any = {
val ret = xpathUtil.evalNumber(xml.asInstanceOf[UTF8String].toString, pathString)
if (ret eq null) null else ret.floatValue()
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(xml, xpath) - Returns a double value, the value zero if no match is found, or NaN if a match is found but the value is non-numeric.",
examples = """
Examples:
> SELECT _FUNC_('<a><b>1</b><b>2</b></a>', 'sum(a/b)');
3.0
""")
// scalastyle:on line.size.limit
case class XPathDouble(xml: Expression, path: Expression) extends XPathExtract {
override def prettyName: String = "xpath_float"
override def dataType: DataType = DoubleType
override def nullSafeEval(xml: Any, path: Any): Any = {
val ret = xpathUtil.evalNumber(xml.asInstanceOf[UTF8String].toString, pathString)
if (ret eq null) null else ret.doubleValue()
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(xml, xpath) - Returns the text contents of the first xml node that matches the XPath expression.",
examples = """
Examples:
> SELECT _FUNC_('<a><b>b</b><c>cc</c></a>','a/c');
cc
""")
// scalastyle:on line.size.limit
case class XPathString(xml: Expression, path: Expression) extends XPathExtract {
override def prettyName: String = "xpath_string"
override def dataType: DataType = StringType
override def nullSafeEval(xml: Any, path: Any): Any = {
val ret = xpathUtil.evalString(xml.asInstanceOf[UTF8String].toString, pathString)
UTF8String.fromString(ret)
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(xml, xpath) - Returns a string array of values within the nodes of xml that match the XPath expression.",
examples = """
Examples:
> SELECT _FUNC_('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>','a/b/text()');
['b1','b2','b3']
""")
// scalastyle:on line.size.limit
case class XPathList(xml: Expression, path: Expression) extends XPathExtract {
override def prettyName: String = "xpath"
override def dataType: DataType = ArrayType(StringType, containsNull = false)
override def nullSafeEval(xml: Any, path: Any): Any = {
val nodeList = xpathUtil.evalNodeList(xml.asInstanceOf[UTF8String].toString, pathString)
if (nodeList ne null) {
val ret = new Array[UTF8String](nodeList.getLength)
var i = 0
while (i < nodeList.getLength) {
ret(i) = UTF8String.fromString(nodeList.item(i).getNodeValue)
i += 1
}
new GenericArrayData(ret)
} else {
null
}
}
}
| minixalpha/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/xml/xpath.scala | Scala | apache-2.0 | 8,280 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.tools.ingest
import java.io.File
import java.text.SimpleDateFormat
import java.util.Date
import com.google.common.io.Files
import com.vividsolutions.jts.geom.Coordinate
import org.geotools.data.Transaction
import org.geotools.data.shapefile.ShapefileDataStoreFactory
import org.geotools.factory.Hints
import org.geotools.geometry.jts.JTSFactoryFinder
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.tools.{AccumuloDataStoreCommand, AccumuloRunner}
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class ShpIngestTest extends Specification {
sequential
"ShpIngest" >> {
val geomBuilder = JTSFactoryFinder.getGeometryFactory
val shpStoreFactory = new ShapefileDataStoreFactory
val shpFile = new File(Files.createTempDir(), "shpingest.shp")
val shpUrl = shpFile.toURI.toURL
val params = Map("url" -> shpUrl)
val shpStore = shpStoreFactory.createNewDataStore(params)
val schema = SimpleFeatureTypes.createType("shpingest", "age:Integer,dtg:Date,*geom:Point:srid=4326")
shpStore.createSchema(schema)
val df = new SimpleDateFormat("dd-MM-yyyy")
val (minDate, maxDate) = (df.parse("01-01-2011"), df.parse("01-01-2012"))
val (minX, maxX, minY, maxY) = (10.0, 20.0, 30.0, 40.0)
val data =
List(
("1", 1, minDate, (minX, minY)),
("1", 2, maxDate, (maxX, maxY))
)
val writer = shpStore.getFeatureWriterAppend("shpingest", Transaction.AUTO_COMMIT)
data.foreach { case (id, age, dtg, (lat, lon)) =>
val f = writer.next()
f.setAttribute("age", age)
f.setAttribute("dtg", dtg)
val pt = geomBuilder.createPoint(new Coordinate(lat, lon))
f.setDefaultGeometry(pt)
f.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
f.getUserData.put(Hints.PROVIDED_FID, id)
writer.write()
}
writer.flush()
writer.close()
val args = Array[String]("ingest", "--zookeepers", "zoo", "--mock", "--instance", "mycloud", "--user", "myuser",
"--password", "mypassword", "--catalog", "testshpingestcatalog", shpFile.getAbsolutePath)
"should properly ingest a shapefile" >> {
val command = AccumuloRunner.parseCommand(args).asInstanceOf[AccumuloDataStoreCommand]
command.execute()
val fs = command.withDataStore(_.getFeatureSource("shpingest"))
SelfClosingIterator(fs.getFeatures.features).toList must haveLength(2)
val bounds = fs.getBounds
bounds.getMinX mustEqual minX
bounds.getMaxX mustEqual maxX
bounds.getMinY mustEqual minY
bounds.getMaxY mustEqual maxY
command.withDataStore { (ds) =>
ds.stats.getAttributeBounds[Date](ds.getSchema("shpingest"), "dtg").map(_.tuple) must
beSome((minDate, maxDate, 2L))
}
}
"should support renaming the feature type" >> {
val newArgs = Array(args.head) ++ Array("--feature-name", "changed") ++ args.tail
val command = AccumuloRunner.parseCommand(newArgs).asInstanceOf[AccumuloDataStoreCommand]
command.execute()
val fs = command.withDataStore(_.getFeatureSource("changed"))
SelfClosingIterator(fs.getFeatures.features).toList must haveLength(2)
val bounds = fs.getBounds
bounds.getMinX mustEqual minX
bounds.getMaxX mustEqual maxX
bounds.getMinY mustEqual minY
bounds.getMaxY mustEqual maxY
command.withDataStore { (ds) =>
ds.stats.getAttributeBounds[Date](ds.getSchema("changed"), "dtg").map(_.tuple) must
beSome((minDate, maxDate, 2L))
}
}
}
}
| ronq/geomesa | geomesa-accumulo/geomesa-accumulo-tools/src/test/scala/org/locationtech/geomesa/accumulo/tools/ingest/ShpIngestTest.scala | Scala | apache-2.0 | 4,298 |
/* sbt -- Simple Build Tool
* Copyright 2011 Mark Harrah
*/
package sbt
import java.io.File
import java.net.URI
import Project._
import Keys.{appConfiguration, stateBuildStructure, commands, configuration, historyPath, projectCommand, sessionSettings, sessionVars, shellPrompt, thisProject, thisProjectRef, watch}
import Scope.{GlobalScope,ThisScope}
import Load.BuildStructure
import Types.{idFun, Id}
import complete.DefaultParsers
sealed trait ProjectDefinition[PR <: ProjectReference]
{
def id: String
def base: File
def configurations: Seq[Configuration]
def settings: Seq[Project.Setting[_]]
def aggregate: Seq[PR]
def delegates: Seq[PR]
def dependencies: Seq[ClasspathDep[PR]]
def uses: Seq[PR] = aggregate ++ dependencies.map(_.project)
def referenced: Seq[PR] = delegates ++ uses
override final def hashCode: Int = id.hashCode ^ base.hashCode ^ getClass.hashCode
override final def equals(o: Any) = o match {
case p: ProjectDefinition[_] => p.getClass == this.getClass && p.id == id && p.base == base
case _ => false
}
override def toString = "Project(id: " + id + ", base: " + base + ", aggregate: " + aggregate + ", dependencies: " + dependencies + ", delegates: " + delegates + ", configurations: " + configurations + ")"
}
sealed trait Project extends ProjectDefinition[ProjectReference]
{
def copy(id: String = id, base: File = base, aggregate: => Seq[ProjectReference] = aggregate, dependencies: => Seq[ClasspathDep[ProjectReference]] = dependencies, delegates: => Seq[ProjectReference] = delegates,
settings: => Seq[Project.Setting[_]] = settings, configurations: Seq[Configuration] = configurations): Project =
Project(id, base, aggregate = aggregate, dependencies = dependencies, delegates = delegates, settings, configurations)
def resolve(resolveRef: ProjectReference => ProjectRef): ResolvedProject =
{
def resolveRefs(prs: Seq[ProjectReference]) = prs map resolveRef
def resolveDeps(ds: Seq[ClasspathDep[ProjectReference]]) = ds map resolveDep
def resolveDep(d: ClasspathDep[ProjectReference]) = ResolvedClasspathDependency(resolveRef(d.project), d.configuration)
resolved(id, base, aggregate = resolveRefs(aggregate), dependencies = resolveDeps(dependencies), delegates = resolveRefs(delegates), settings, configurations)
}
def resolveBuild(resolveRef: ProjectReference => ProjectReference): Project =
{
def resolveRefs(prs: Seq[ProjectReference]) = prs map resolveRef
def resolveDeps(ds: Seq[ClasspathDep[ProjectReference]]) = ds map resolveDep
def resolveDep(d: ClasspathDep[ProjectReference]) = ClasspathDependency(resolveRef(d.project), d.configuration)
apply(id, base, aggregate = resolveRefs(aggregate), dependencies = resolveDeps(dependencies), delegates = resolveRefs(delegates), settings, configurations)
}
def overrideConfigs(cs: Configuration*): Project = copy(configurations = Defaults.overrideConfigs(cs : _*)(configurations))
def dependsOn(deps: ClasspathDep[ProjectReference]*): Project = copy(dependencies = dependencies ++ deps)
def delegateTo(from: ProjectReference*): Project = copy(delegates = delegates ++ from)
def aggregate(refs: ProjectReference*): Project = copy(aggregate = (aggregate: Seq[ProjectReference]) ++ refs)
def configs(cs: Configuration*): Project = copy(configurations = configurations ++ cs)
def settings(ss: Project.Setting[_]*): Project = copy(settings = (settings: Seq[Project.Setting[_]]) ++ ss)
}
sealed trait ResolvedProject extends ProjectDefinition[ProjectRef]
final case class Extracted(structure: BuildStructure, session: SessionSettings, currentRef: ProjectRef)(implicit val showKey: Show[ScopedKey[_]])
{
def rootProject = structure.rootProject
lazy val currentUnit = structure units currentRef.build
lazy val currentProject = currentUnit defined currentRef.project
lazy val currentLoader: ClassLoader = currentUnit.loader
def get[T](key: TaskKey[T]): Task[T] = get(key.task)
def get[T](key: SettingKey[T]) = getOrError(inCurrent(key), key.key)
def getOpt[T](key: SettingKey[T]): Option[T] = structure.data.get(inCurrent(key), key.key)
private[this] def inCurrent[T](key: SettingKey[T]): Scope = if(key.scope.project == This) key.scope.copy(project = Select(currentRef)) else key.scope
@deprecated("This method does not apply state changes requested during task execution. Use 'runTask' instead, which does.", "0.11.1")
def evalTask[T](key: TaskKey[T], state: State): T = runTask(key, state)._2
def runTask[T](key: TaskKey[T], state: State): (State, T) =
{
import EvaluateTask._
val rkey = resolve(key.scopedKey)
val config = extractedConfig(this, structure)
val value: Option[(State, Result[T])] = apply(structure, key.task.scopedKey, state, currentRef, config)
val (newS, result) = getOrError(rkey.scope, rkey.key, value)
(newS, processResult(result, newS.log))
}
def runAggregated[T](key: TaskKey[T], state: State): State =
{
val rkey = resolve(key.scopedKey)
val tasks = Aggregation.getTasks(rkey, structure, true)
Aggregation.runTasks(state, structure, tasks, Aggregation.Dummies(KNil, HNil), show = false )(showKey)
}
private[this] def resolve[T](key: ScopedKey[T]): ScopedKey[T] =
Project.mapScope(Scope.resolveScope(GlobalScope, currentRef.build, rootProject) )( key.scopedKey )
private def getOrError[T](scope: Scope, key: AttributeKey[_], value: Option[T])(implicit display: Show[ScopedKey[_]]): T =
value getOrElse error(display(ScopedKey(scope, key)) + " is undefined.")
private def getOrError[T](scope: Scope, key: AttributeKey[T])(implicit display: Show[ScopedKey[_]]): T =
structure.data.get(scope, key) getOrElse error(display(ScopedKey(scope, key)) + " is undefined.")
def append(settings: Seq[Setting[_]], state: State): State =
{
val appendSettings = Load.transformSettings(Load.projectScope(currentRef), currentRef.build, rootProject, settings)
val newStructure = Load.reapply(session.original ++ appendSettings, structure)
Project.setProject(session, newStructure, state)
}
}
sealed trait ClasspathDep[PR <: ProjectReference] { def project: PR; def configuration: Option[String] }
final case class ResolvedClasspathDependency(project: ProjectRef, configuration: Option[String]) extends ClasspathDep[ProjectRef]
final case class ClasspathDependency(project: ProjectReference, configuration: Option[String]) extends ClasspathDep[ProjectReference]
object Project extends Init[Scope] with ProjectExtra
{
lazy val showFullKey: Show[ScopedKey[_]] = new Show[ScopedKey[_]] { def apply(key: ScopedKey[_]) = displayFull(key) }
def showContextKey(state: State): Show[ScopedKey[_]] =
if(isProjectLoaded(state)) showContextKey( session(state), structure(state) ) else showFullKey
def showContextKey(session: SessionSettings, structure: BuildStructure): Show[ScopedKey[_]] = showRelativeKey(session.current, structure.allProjects.size > 1)
def showLoadingKey(loaded: Load.LoadedBuild): Show[ScopedKey[_]] = showRelativeKey( ProjectRef(loaded.root, loaded.units(loaded.root).rootProjects.head), loaded.allProjectRefs.size > 1 )
def showRelativeKey(current: ProjectRef, multi: Boolean): Show[ScopedKey[_]] = new Show[ScopedKey[_]] {
def apply(key: ScopedKey[_]) = Scope.display(key.scope, key.key.label, ref => displayRelative(current, multi, ref))
}
def displayRelative(current: ProjectRef, multi: Boolean, project: Reference): String = project match {
case BuildRef(current.build) => "{.}/"
case `current` => if(multi) current.project + "/" else ""
case ProjectRef(current.build, x) => x + "/"
case _ => display(project) + "/"
}
private abstract class ProjectDef[PR <: ProjectReference](val id: String, val base: File, aggregate0: => Seq[PR], dependencies0: => Seq[ClasspathDep[PR]], delegates0: => Seq[PR],
settings0: => Seq[Setting[_]], val configurations: Seq[Configuration]) extends ProjectDefinition[PR]
{
lazy val aggregate = aggregate0
lazy val dependencies = dependencies0
lazy val delegates = delegates0
lazy val settings = settings0
Dag.topologicalSort(configurations)(_.extendsConfigs) // checks for cyclic references here instead of having to do it in Scope.delegates
}
def apply(id: String, base: File, aggregate: => Seq[ProjectReference] = Nil, dependencies: => Seq[ClasspathDep[ProjectReference]] = Nil, delegates: => Seq[ProjectReference] = Nil,
settings: => Seq[Setting[_]] = defaultSettings, configurations: Seq[Configuration] = Configurations.default): Project =
{
Command.parse(id, DefaultParsers.ID).left.foreach(errMsg => error("Invalid project ID: " + errMsg))
new ProjectDef[ProjectReference](id, base, aggregate, dependencies, delegates, settings, configurations) with Project
}
def resolved(id: String, base: File, aggregate: => Seq[ProjectRef], dependencies: => Seq[ResolvedClasspathDependency], delegates: => Seq[ProjectRef],
settings: Seq[Setting[_]], configurations: Seq[Configuration]): ResolvedProject =
new ProjectDef[ProjectRef](id, base, aggregate, dependencies, delegates, settings, configurations) with ResolvedProject
def defaultSettings: Seq[Setting[_]] = Defaults.defaultSettings
final class Constructor(p: ProjectReference) {
def %(conf: Configuration): ClasspathDependency = %(conf.name)
def %(conf: String): ClasspathDependency = new ClasspathDependency(p, Some(conf))
}
def getOrError[T](state: State, key: AttributeKey[T], msg: String): T = state get key getOrElse error(msg)
def structure(state: State): BuildStructure = getOrError(state, stateBuildStructure, "No build loaded.")
def session(state: State): SessionSettings = getOrError(state, sessionSettings, "Session not initialized.")
def isProjectLoaded(state: State): Boolean = (state has sessionSettings) && (state has stateBuildStructure)
def extract(state: State): Extracted = extract( session(state), structure(state) )
def extract(se: SessionSettings, st: BuildStructure): Extracted = Extracted(st, se, se.current)( showContextKey(se, st) )
def getProjectForReference(ref: Reference, structure: BuildStructure): Option[ResolvedProject] =
ref match { case pr: ProjectRef => getProject(pr, structure); case _ => None }
def getProject(ref: ProjectRef, structure: BuildStructure): Option[ResolvedProject] = getProject(ref, structure.units)
def getProject(ref: ProjectRef, structure: Load.LoadedBuild): Option[ResolvedProject] = getProject(ref, structure.units)
def getProject(ref: ProjectRef, units: Map[URI, Load.LoadedBuildUnit]): Option[ResolvedProject] =
(units get ref.build).flatMap(_.defined get ref.project)
def runUnloadHooks(s: State): State =
{
val previousOnUnload = orIdentity(s get Keys.onUnload.key)
previousOnUnload(s.runExitHooks())
}
def setProject(session: SessionSettings, structure: BuildStructure, s: State): State =
{
val unloaded = runUnloadHooks(s)
val (onLoad, onUnload) = getHooks(structure.data)
val newAttrs = unloaded.attributes.put(stateBuildStructure, structure).put(sessionSettings, session).put(Keys.onUnload.key, onUnload)
val newState = unloaded.copy(attributes = newAttrs)
onLoad(updateCurrent( newState ))
}
def orIdentity[T](opt: Option[T => T]): T => T = opt getOrElse idFun
def getHook[T](key: SettingKey[T => T], data: Settings[Scope]): T => T = orIdentity(key in GlobalScope get data)
def getHooks(data: Settings[Scope]): (State => State, State => State) = (getHook(Keys.onLoad, data), getHook(Keys.onUnload, data))
def current(state: State): ProjectRef = session(state).current
def updateCurrent(s: State): State =
{
val structure = Project.structure(s)
val ref = Project.current(s)
val project = Load.getProject(structure.units, ref.build, ref.project)
val msg = Keys.onLoadMessage in ref get structure.data getOrElse ""
if(!msg.isEmpty) s.log.info(msg)
def get[T](k: SettingKey[T]): Option[T] = k in ref get structure.data
def commandsIn(axis: ResolvedReference) = commands in axis get structure.data toList ;
val allCommands = commandsIn(ref) ++ commandsIn(BuildRef(ref.build)) ++ (commands in Global get structure.data toList )
val history = get(historyPath) flatMap idFun
val prompt = get(shellPrompt)
val watched = get(watch)
val commandDefs = allCommands.distinct.flatten[Command].map(_ tag (projectCommand, true))
val newDefinedCommands = commandDefs ++ BuiltinCommands.removeTagged(s.definedCommands, projectCommand)
val newAttrs = setCond(Watched.Configuration, watched, s.attributes).put(historyPath.key, history)
s.copy(attributes = setCond(shellPrompt.key, prompt, newAttrs), definedCommands = newDefinedCommands)
}
def setCond[T](key: AttributeKey[T], vopt: Option[T], attributes: AttributeMap): AttributeMap =
vopt match { case Some(v) => attributes.put(key, v); case None => attributes.remove(key) }
def makeSettings(settings: Seq[Setting[_]], delegates: Scope => Seq[Scope], scopeLocal: ScopedKey[_] => Seq[Setting[_]])(implicit display: Show[ScopedKey[_]]) =
make(settings)(delegates, scopeLocal, display)
def displayFull(scoped: ScopedKey[_]): String = Scope.display(scoped.scope, scoped.key.label)
def display(ref: Reference): String =
ref match
{
case pr: ProjectReference => display(pr)
case br: BuildReference => display(br)
}
def display(ref: BuildReference) =
ref match
{
case ThisBuild => "{<this>}"
case BuildRef(uri) => "{" + uri + "}"
}
def display(ref: ProjectReference) =
ref match
{
case ThisProject => "{<this>}<this>"
case LocalRootProject => "{<this>}<root>"
case LocalProject(id) => "{<this>}" + id
case RootProject(uri) => "{" + uri + " }<root>"
case ProjectRef(uri, id) => "{" + uri + "}" + id
}
def fillTaskAxis(scoped: ScopedKey[_]): ScopedKey[_] =
ScopedKey(Scope.fillTaskAxis(scoped.scope, scoped.key), scoped.key)
def mapScope(f: Scope => Scope) = new (ScopedKey ~> ScopedKey) { def apply[T](key: ScopedKey[T]) =
ScopedKey( f(key.scope), key.key)
}
def transform(g: Scope => Scope, ss: Seq[Setting[_]]): Seq[Setting[_]] = {
val f = mapScope(g)
ss.map(_ mapKey f mapReferenced f)
}
def transformRef(g: Scope => Scope, ss: Seq[Setting[_]]): Seq[Setting[_]] = {
val f = mapScope(g)
ss.map(_ mapReferenced f)
}
def delegates(structure: BuildStructure, scope: Scope, key: AttributeKey[_]): Seq[ScopedKey[_]] =
structure.delegates(scope).map(d => ScopedKey(d, key))
def scopedKeyData(structure: BuildStructure, scope: Scope, key: AttributeKey[_]): Option[ScopedKeyData[_]] =
structure.data.get(scope, key) map { v => ScopedKeyData(ScopedKey(scope, key), v) }
def details(structure: BuildStructure, actual: Boolean, scope: Scope, key: AttributeKey[_])(implicit display: Show[ScopedKey[_]]): String =
{
val scoped = ScopedKey(scope,key)
val data = scopedKeyData(structure, scope, key) map {_.description} getOrElse {"No entry for key."}
val description = key.description match { case Some(desc) => "Description:\\n\\t" + desc + "\\n"; case None => "" }
val definedIn = structure.data.definingScope(scope, key) match {
case Some(sc) => "Provided by:\\n\\t" + Scope.display(sc, key.label) + "\\n"
case None => ""
}
val cMap = flattenLocals(compiled(structure.settings, actual)(structure.delegates, structure.scopeLocal, display))
val related = cMap.keys.filter(k => k.key == key && k.scope != scope)
val depends = cMap.get(scoped) match { case Some(c) => c.dependencies.toSet; case None => Set.empty }
val reverse = reverseDependencies(cMap, scoped)
def printScopes(label: String, scopes: Iterable[ScopedKey[_]]) =
if(scopes.isEmpty) "" else scopes.map(display.apply).mkString(label + ":\\n\\t", "\\n\\t", "\\n")
data + "\\n" +
description +
definedIn +
printScopes("Dependencies", depends) +
printScopes("Reverse dependencies", reverse) +
printScopes("Delegates", delegates(structure, scope, key)) +
printScopes("Related", related)
}
def settingGraph(structure: BuildStructure, basedir: File, scoped: ScopedKey[_])(implicit display: Show[ScopedKey[_]]): SettingGraph =
SettingGraph(structure, basedir, scoped, 0)
def graphSettings(structure: BuildStructure, basedir: File)(implicit display: Show[ScopedKey[_]])
{
def graph(actual: Boolean, name: String) = graphSettings(structure, actual, name, new File(basedir, name + ".dot"))
graph(true, "actual_dependencies")
graph(false, "declared_dependencies")
}
def graphSettings(structure: BuildStructure, actual: Boolean, graphName: String, file: File)(implicit display: Show[ScopedKey[_]])
{
val rel = relation(structure, actual)
val keyToString = display.apply _
DotGraph.generateGraph(file, graphName, rel, keyToString, keyToString)
}
def relation(structure: BuildStructure, actual: Boolean)(implicit display: Show[ScopedKey[_]]) =
{
type Rel = Relation[ScopedKey[_], ScopedKey[_]]
val cMap = flattenLocals(compiled(structure.settings, actual)(structure.delegates, structure.scopeLocal, display))
((Relation.empty: Rel) /: cMap) { case (r, (key, value)) =>
r + (key, value.dependencies)
}
}
def showDefinitions(key: AttributeKey[_], defs: Seq[Scope])(implicit display: Show[ScopedKey[_]]): String =
defs.map(scope => display(ScopedKey(scope, key))).sorted.mkString("\\n\\t", "\\n\\t", "\\n\\n")
def showUses(defs: Seq[ScopedKey[_]])(implicit display: Show[ScopedKey[_]]): String =
defs.map(display.apply).sorted.mkString("\\n\\t", "\\n\\t", "\\n\\n")
def definitions(structure: BuildStructure, actual: Boolean, key: AttributeKey[_])(implicit display: Show[ScopedKey[_]]): Seq[Scope] =
relation(structure, actual)(display)._1s.toSeq flatMap { sk => if(sk.key == key) sk.scope :: Nil else Nil }
def usedBy(structure: BuildStructure, actual: Boolean, key: AttributeKey[_])(implicit display: Show[ScopedKey[_]]): Seq[ScopedKey[_]] =
relation(structure, actual)(display).all.toSeq flatMap { case (a,b) => if(b.key == key) List[ScopedKey[_]](a) else Nil }
def reverseDependencies(cMap: Map[ScopedKey[_],Flattened], scoped: ScopedKey[_]): Iterable[ScopedKey[_]] =
for( (key,compiled) <- cMap; dep <- compiled.dependencies if dep == scoped) yield key
def setAll(extracted: Extracted, settings: Seq[Setting[_]]) =
{
import extracted._
val allDefs = relation(extracted.structure, true)._1s.toSeq
val projectScope = Load.projectScope(currentRef)
def resolve(s: Setting[_]): Seq[Setting[_]] = Load.transformSettings(projectScope, currentRef.build, rootProject, s :: Nil)
def rescope[T](setting: Setting[T]): Seq[Setting[_]] =
{
val akey = setting.key.key
val global = ScopedKey(Global, akey)
val globalSetting = resolve( Project.setting(global, setting.init) )
globalSetting ++ allDefs.flatMap { d =>
if(d.key == akey)
Seq( SettingKey(akey) in d.scope <<= global)
else
Nil
}
}
extracted.session.appendRaw(settings flatMap { x => rescope(x) } )
}
object LoadAction extends Enumeration {
val Return, Current, Plugins = Value
}
import LoadAction._
import DefaultParsers._
val loadActionParser = token(Space ~> ("plugins" ^^^ Plugins | "return" ^^^ Return)) ?? Current
val ProjectReturn = AttributeKey[List[File]]("project-return", "Maintains a stack of builds visited using reload.")
def projectReturn(s: State): List[File] = s.attributes get ProjectReturn getOrElse Nil
def setProjectReturn(s: State, pr: List[File]): State = s.copy(attributes = s.attributes.put( ProjectReturn, pr) )
def loadAction(s: State, action: LoadAction.Value) = action match {
case Return =>
projectReturn(s) match
{
case current :: returnTo :: rest => (setProjectReturn(s, returnTo :: rest), returnTo)
case _ => error("Not currently in a plugin definition")
}
case Current =>
val base = s.configuration.baseDirectory
projectReturn(s) match { case Nil => (setProjectReturn(s, base :: Nil), base); case x :: xs => (s, x) }
case Plugins =>
val extracted = Project.extract(s)
val newBase = extracted.currentUnit.unit.plugins.base
val newS = setProjectReturn(s, newBase :: projectReturn(s))
(newS, newBase)
}
@deprecated("This method does not apply state changes requested during task execution. Use 'runTask' instead, which does.", "0.11.1")
def evaluateTask[T](taskKey: ScopedKey[Task[T]], state: State, checkCycles: Boolean = false, maxWorkers: Int = EvaluateTask.SystemProcessors): Option[Result[T]] =
runTask(taskKey, state, EvaluateConfig(true, EvaluateTask.defaultRestrictions(maxWorkers), checkCycles)).map(_._2)
def runTask[T](taskKey: ScopedKey[Task[T]], state: State, checkCycles: Boolean = false): Option[(State, Result[T])] =
runTask(taskKey, state, EvaluateConfig(true, EvaluateTask.restrictions(state), checkCycles))
def runTask[T](taskKey: ScopedKey[Task[T]], state: State, config: EvaluateConfig): Option[(State, Result[T])] =
{
val extracted = Project.extract(state)
EvaluateTask(extracted.structure, taskKey, state, extracted.currentRef, config)
}
// this is here instead of Scoped so that it is considered without need for import (because of Project.Initialize)
implicit def richInitializeTask[T](init: Initialize[Task[T]]): Scoped.RichInitializeTask[T] = new Scoped.RichInitializeTask(init)
implicit def richInitializeInputTask[T](init: Initialize[InputTask[T]]): Scoped.RichInitializeInputTask[T] = new Scoped.RichInitializeInputTask(init)
implicit def richInitialize[T](i: Initialize[T]): Scoped.RichInitialize[T] = new Scoped.RichInitialize[T](i)
}
final case class ScopedKeyData[A](scoped: ScopedKey[A], value: Any)
{
import Types.const
val key = scoped.key
val scope = scoped.scope
def typeName: String = fold(fmtMf("Task[%s]"), fmtMf("InputTask[%s]"), key.manifest.toString)
def settingValue: Option[Any] = fold(const(None), const(None), Some(value))
def description: String = fold(fmtMf("Task: %s"), fmtMf("Input task: %s"),
"Setting: %s = %s" format (key.manifest.toString, value.toString))
def fold[A](targ: OptManifest[_] => A, itarg: OptManifest[_] => A, s: => A): A =
if (key.manifest.erasure == classOf[Task[_]]) targ(key.manifest.typeArguments.head)
else if (key.manifest.erasure == classOf[InputTask[_]]) itarg(key.manifest.typeArguments.head)
else s
def fmtMf(s: String): OptManifest[_] => String = s format _
}
trait ProjectExtra
{
implicit def configDependencyConstructor[T <% ProjectReference](p: T): Constructor = new Constructor(p)
implicit def classpathDependency[T <% ProjectReference](p: T): ClasspathDependency = new ClasspathDependency(p, None)
def inConfig(conf: Configuration)(ss: Seq[Setting[_]]): Seq[Setting[_]] =
inScope(ThisScope.copy(config = Select(conf)) )( (configuration :== conf) +: ss)
def inTask(t: Scoped)(ss: Seq[Setting[_]]): Seq[Setting[_]] =
inScope(ThisScope.copy(task = Select(t.key)) )( ss )
def inScope(scope: Scope)(ss: Seq[Setting[_]]): Seq[Setting[_]] =
Project.transform(Scope.replaceThis(scope), ss)
}
import sbinary.{Format, Operations}
object SessionVar
{
val DefaultDataID = "data"
// these are required because of inference+manifest limitations
final case class Key[T](key: ScopedKey[Task[T]])
final case class Map(map: IMap[Key, Id]) {
def get[T](k: ScopedKey[Task[T]]): Option[T] = map get Key(k)
def put[T](k: ScopedKey[Task[T]], v: T): Map = Map(map put (Key(k), v))
}
def emptyMap = Map(IMap.empty)
def persistAndSet[T](key: ScopedKey[Task[T]], state: State, value: T)(implicit f: sbinary.Format[T]): State =
{
persist(key, state, value)(f)
set(key, state, value)
}
def persist[T](key: ScopedKey[Task[T]], state: State, value: T)(implicit f: sbinary.Format[T]): Unit =
Project.structure(state).streams(state).use(key)( s =>
Operations.write(s.binary(DefaultDataID), value)(f)
)
def clear(s: State): State = s.put(sessionVars, SessionVar.emptyMap)
def get[T](key: ScopedKey[Task[T]], state: State): Option[T] = orEmpty(state get sessionVars) get key
def set[T](key: ScopedKey[Task[T]], state: State, value: T): State = state.update(sessionVars)(om => orEmpty(om) put (key, value))
def orEmpty(opt: Option[Map]) = opt getOrElse emptyMap
def transform[S](task: Task[S], f: (State, S) => State): Task[S] =
{
val g = (s: S, map: AttributeMap) => map.put(Keys.transformState, (state: State) => f(state, s))
task.copy(info = task.info.postTransform(g))
}
def resolveContext[T](key: ScopedKey[Task[T]], context: Scope, state: State): ScopedKey[Task[T]] =
{
val subScope = Scope.replaceThis(context)(key.scope)
val scope = Project.structure(state).data.definingScope(subScope, key.key) getOrElse subScope
ScopedKey(scope, key.key)
}
def read[T](key: ScopedKey[Task[T]], state: State)(implicit f: Format[T]): Option[T] =
Project.structure(state).streams(state).use(key) { s =>
try { Some(Operations.read(s.readBinary(key, DefaultDataID))) }
catch { case e: Exception => None }
}
def load[T](key: ScopedKey[Task[T]], state: State)(implicit f: Format[T]): Option[T] =
get(key, state) orElse read(key, state)(f)
def loadAndSet[T](key: ScopedKey[Task[T]], state: State, setIfUnset: Boolean = true)(implicit f: Format[T]): (State, Option[T]) =
get(key, state) match {
case s: Some[T] => (state, s)
case None => read(key, state)(f) match {
case s @ Some(t) =>
val newState = if(setIfUnset && get(key, state).isDefined) state else set(key, state, t)
(newState, s)
case None => (state, None)
}
}
}
| kuochaoyi/xsbt | main/Project.scala | Scala | bsd-3-clause | 25,250 |
package net.hotelling.harold.audiowidget
/**
* Discrete Fourier Transform
*
* Naive O(n^^2) just to play around for now.
*/
object DFT {
/**
* Take the raw data points and test for the strength of the signal
* at various frequencies going from minFreq to maxFreq in increments
* of stepFreq.
*/
def dft(data: Array[Int], samplesPerSecond: Double,
minFreqHz: Int = 20, maxFreqHz: Int = 20000, stepFreqHz: Int = 15): Array[Double] = {
// Spin the data around the origin at the given frequency
def spin(freq: Int): Point = {
data.zipWithIndex.map({ case (value, i) =>
val time = i.toDouble / samplesPerSecond
val angle = time * freq
val magnitude = data(i).toDouble
// Convert polar coordinates to cartesian:
Point(magnitude * Math.cos(angle), magnitude * Math.sin(angle))
}).reduce(_ + _)
}
// Test the strength of signal at each frequency of interest:
(minFreqHz until maxFreqHz by stepFreqHz).map { freq =>
spin(freq).magnitude
}.toArray
}
case class Point(x: Double, y: Double) {
def +(that: Point): Point = Point(this.x + that.x, this.y + that.y)
def magnitude: Double = Math.sqrt((x * x) + (y * y))
}
}
| haroldl/audiowidget | src/main/scala/net/hotelling/harold/audiowidget/DFT.scala | Scala | apache-2.0 | 1,244 |
/**
* Copyright (C) 2015 Pants project contributors (see CONTRIBUTORS.md).
* Licensed under the Apache License, Version 2.0 (see LICENSE).
*/
package org.pantsbuild.zinc.cache
import com.google.common.{cache => gcache}
import java.util.concurrent.Callable
import scala.collection.JavaConverters._
/**
* An LRU cache using soft references.
*/
object Cache {
final val DefaultInitialSize = 8
def apply[K<:AnyRef, V<:AnyRef](maxSize: Int): gcache.Cache[K, V] =
gcache.CacheBuilder.newBuilder()
.softValues()
.initialCapacity(maxSize min DefaultInitialSize)
.maximumSize(maxSize)
.build()
/**
* Implicitly add conveniences to the guava Cache.
*
* NB: This should become a value class after we're on scala 2.11.x: see SI-8011.
*/
implicit class Implicits[K, V](val c: gcache.Cache[K, V]) {
def getOrElseUpdate(key: K)(value: => V): V =
c.get(key, new Callable[V] { def call = value })
def entries: Seq[(K,V)] =
c.asMap.entrySet.asScala.toSeq.map { e => e.getKey -> e.getValue }
}
}
| pombredanne/pants | src/scala/org/pantsbuild/zinc/cache/Cache.scala | Scala | apache-2.0 | 1,060 |
package cromwell.backend.impl.jes
import cromwell.backend.BackendJobDescriptorKey
import cromwell.backend.io.JobPaths
import cromwell.core.path.Path
import cromwell.services.metadata.CallMetadataKeys
object JesJobPaths {
val JesLogPathKey = "jesLog"
val JesMonitoringKey = "monitoring"
val JesExecParamName = "exec"
}
final case class JesJobPaths(override val workflowPaths: JesWorkflowPaths, jobKey: BackendJobDescriptorKey) extends JobPaths {
val jesLogBasename = {
val index = jobKey.index.map(s => s"-$s").getOrElse("")
s"${jobKey.node.localName}$index"
}
override val returnCodeFilename: String = s"$jesLogBasename-rc.txt"
override val defaultStdoutFilename: String = s"$jesLogBasename-stdout.log"
override val defaultStderrFilename: String = s"$jesLogBasename-stderr.log"
override val scriptFilename: String = s"${JesJobPaths.JesExecParamName}.sh"
val jesLogFilename: String = s"$jesLogBasename.log"
lazy val jesLogPath: Path = callExecutionRoot.resolve(jesLogFilename)
val jesMonitoringLogFilename: String = s"${JesJobPaths.JesMonitoringKey}.log"
lazy val jesMonitoringLogPath: Path = callExecutionRoot.resolve(jesMonitoringLogFilename)
val jesMonitoringScriptFilename: String = s"${JesJobPaths.JesMonitoringKey}.sh"
/*
TODO: Move various monitoring files path generation here.
"/cromwell_root" is a well known path, called in the regular JobPaths callDockerRoot.
This JesCallPaths should know about that root, and be able to create the monitoring file paths.
Instead of the AsyncActor creating the paths, the paths could then be shared with the CachingActor.
Those monitoring paths could then be returned by metadataFiles and detritusFiles.
*/
override lazy val customMetadataPaths = Map(
CallMetadataKeys.BackendLogsPrefix + ":log" -> jesLogPath
) ++ (
workflowPaths.monitoringScriptPath map { p => Map(JesMetadataKeys.MonitoringScript -> p,
JesMetadataKeys.MonitoringLog -> jesMonitoringLogPath) } getOrElse Map.empty
)
override lazy val customDetritusPaths: Map[String, Path] = Map(
JesJobPaths.JesLogPathKey -> jesLogPath
)
override lazy val customLogPaths: Map[String, Path] = Map(
JesJobPaths.JesLogPathKey -> jesLogPath
)
}
| ohsu-comp-bio/cromwell | supportedBackends/jes/src/main/scala/cromwell/backend/impl/jes/JesJobPaths.scala | Scala | bsd-3-clause | 2,288 |
package controllers
import model.{FatalError, Game}
import models.GameRowRel._
import models.TicketType.PriorityPointTicketType
import models._
import play.api.mvc.{AnyContent, Request}
/**
* Created by alex on 13/02/15.
*/
trait LinkFactories extends Secure with Secret {
def gameRowLinksFactory(includeUpdates: Boolean)(implicit request: Request[_ <: AnyContent]): Game => Links[GameRowRel] = game => {
val links = Links
.withSelf[GameRowRel](controllers.routes.Application.game(game.id).absoluteURL())
.withLink(GameRowRel.LOCATION, controllers.routes.Location.location(game.id).absoluteURL())
.withLink(GameRowRel.MATCH_REPORT, game.matchReport)
.withLink(GameRowRel.HOME_LOGO, game.homeTeamImageLink)
.withLink(GameRowRel.AWAY_LOGO, game.awayTeamImageLink)
.withLink(GameRowRel.COMPETITION_LOGO, game.competitionImageLink)
if (includeUpdates) {
links.withLink(UNATTEND, controllers.routes.Update.unattend(game.id).absoluteURL())
.withLink(ATTEND, controllers.routes.Update.attend(game.id).absoluteURL())
}
else {
links
}
}
def fatalErrorReportLinksFactory(implicit request: Request[_ <: AnyContent]): FatalError => Links[FatalErrorReportRel] = fatalError => {
Links.withLink(FatalErrorReportRel.MESSAGE, routes.Errors.message(secret.token, fatalError.id).absoluteURL())
}
def secureLinks[R <: Rel](game: Game, links: Links[R])(f: Links[R] => Links[R])(implicit request: Request[_ <: AnyContent]): Links[R] = {
emailAndUsername.foldLeft(links) { (newLinks, _) => f(newLinks) }
}
}
| unclealex72/west-ham-calendar | app/controllers/LinkFactories.scala | Scala | apache-2.0 | 1,595 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.util
import org.scalatest.FunSuite
class FileExtensionsSuite extends FunSuite {
test("ends in gzip extension") {
assert(FileExtensions.isGzip("file.vcf.gz"))
assert(FileExtensions.isGzip("file.fastq.bgz"))
assert(!FileExtensions.isGzip("file.fastq.bgzf"))
assert(!FileExtensions.isGzip("file.vcf"))
assert(!FileExtensions.isGzip("file.fastq"))
}
test("is a vcf extension") {
assert(FileExtensions.isVcfExt("file.vcf"))
assert(FileExtensions.isVcfExt("file.vcf.bgz"))
assert(!FileExtensions.isVcfExt("file.bcf"))
assert(FileExtensions.isVcfExt("file.vcf.gz"))
assert(!FileExtensions.isVcfExt("file.vcf.bgzf"))
}
}
| laserson/adam | adam-core/src/test/scala/org/bdgenomics/adam/util/FileExtensionsSuite.scala | Scala | apache-2.0 | 1,487 |
package argonaut.example
import argonaut._, Argonaut._
import org.specs2._
object CursorExample extends Specification {
val json =
"""
{
"abc" :
{
"def" : 7
},
"ghi" :
{
"ata" : null,
"jkl" :
{
"mno" : "argo"
}
},
"pqr" : false,
"operator": "is",
"values": [
["cat", "lol"]
, "dog"
, "rabbit"
],
"xyz" : 24
}
"""
def is = s2"""
Replace '["cat", "lol"]' with 'false' ${
json.parseOption flatMap (k =>
+k --\\ "values" flatMap (_.downArray) map (_ := jBool(false)) map (-_)
) must beSome
}
Visit the 'values' array ${
json.parseOption flatMap (k =>
+k --\\ "values" flatMap (_.downArray) map (-_)
) must beSome
}
Delete the element '"dog"' from the 'values' array. ${
json.parseOption flatMap (k =>
+k --\\ "values" flatMap (_.downArray) flatMap (_.right) flatMap (!_) map (-_)
) must beSome
}
Replace '["cat", "lol"]' with 'false' and '"rabbit"' with 'true' ${
json.parseOption flatMap (k =>
+k --\\ "values" flatMap (_.downArray) map (_ := jBool(false)) flatMap (_.right) flatMap (_.right) map (_ := jBool(true)) map (-_)
) must beSome
}
"""
}
| etorreborre/argonaut | src/test/scala/argonaut/example/CursorExample.scala | Scala | bsd-3-clause | 1,429 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.clazz.model
import org.beangle.commons.collection.Collections
import org.beangle.data.model.LongId
import org.beangle.data.model.pojo.Updated
import org.openurp.base.edu.model.Semester
import org.openurp.base.model.User
import java.time.Instant
import java.util.Locale
import scala.collection.mutable
/** 授课计划
* 每个任务唯一
*/
class TeachingPlan extends LongId with Updated {
/** 教学任务 */
var clazz: Clazz = _
/** 授课计划语言 */
var docLocale: Locale = _
/** 学期 */
var semester: Semester = _
/** 作者 */
var author: Option[User] = None
/** 授课内容 */
var lessons: mutable.Buffer[Lesson] = Collections.newBuffer[Lesson]
/** 文件大小 */
var fileSize: Int = _
/** 文件类型 */
var mimeType: Option[String] = None
/** 文件路径 */
var filePath: Option[String] = None
/** 是否通过 */
var passed: Option[Boolean] = None
/** 审核人 */
var auditor: Option[User] = None
/** 审核时间 */
var auditAt: Option[Instant] = None
}
| openurp/api | edu/src/main/scala/org/openurp/edu/clazz/model/TeachingPlan.scala | Scala | lgpl-3.0 | 1,782 |
/**
* Copyright (c) 2017-2018 BusyMachines
*
* See company homepage at: https://www.busymachines.com/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package busymachines.json
/**
*
* @author Lorand Szakacs, lsz@lorandszakacs.com, lorand.szakacs@busymachines.com
* @since 10 Aug 2017
*
*/
trait Codec[A] extends Encoder[A] with Decoder[A]
object Codec {
def apply[A](implicit instance: Codec[A]): Codec[A] = instance
def instance[A](encode: Encoder[A], decode: Decoder[A]): Codec[A] = {
new Codec[A] {
private val enc = encode
private val dec = decode
override def apply(a: A): Json = enc(a)
override def apply(c: HCursor): io.circe.Decoder.Result[A] = dec(c)
}
}
}
| busymachines/busymachines-commons | json/src/main/scala/busymachines/json/Codec.scala | Scala | apache-2.0 | 1,253 |
package actors.ws
import akka.actor._
import play.api.libs.json._
import play.api.libs.iteratee._
import play.api.libs.concurrent._
import akka.util.Timeout
import akka.pattern.ask
import play.api.Play.current
import play.api.mvc._
import play.api.libs.concurrent.Execution.Implicits._
import scala.concurrent.duration._
import java.util.Date
abstract class WebSocketManager[M <: AbstractWSManagerActor](implicit ct: scala.reflect.ClassTag[M]) {
val name: Option[String] = None
private val _actor =
(name) match {
case Some(n) => Akka.system.actorOf(Props[M],n)
case _ => Akka.system.actorOf(Props[M])
}
def actor: ActorRef = _actor
implicit val timeout = Timeout(30 seconds)
import WSInnerMsgs._
def control(implicit request: RequestHeader):scala.concurrent.Future[(Iteratee[JsValue,_],Enumerator[JsValue])] = {
(actor ? Connect(request)).map {
case Connected(iteratee,enumerator) =>
(iteratee,enumerator)
}
}
}
object WSInnerMsgs {
case class Connect(request: RequestHeader)
case class Connected(iteratee: Iteratee[JsValue,_],enumerator: Enumerator[JsValue])
case object Disconnect
}
abstract class AbstractWSManagerActor extends Actor {
import WSInnerMsgs._
val initTimeout = 4 seconds
val browserTimeout = 2 seconds
def receive = {
connectionManagement orElse
dispatch
}
def dispatch: PartialFunction[Any,Unit] = {
case msg =>
context.children.foreach(act => act ! msg)
}
def clientProp(implicit request: RequestHeader): Props
import java.util.Date
def connectionManagement: PartialFunction[Any,Unit] = {
case Connect(r) =>
import WSClientInnerMsgs._
implicit val request: RequestHeader = r
val act: ActorRef = context.actorOf(clientProp)
val outChannel: Enumerator[JsValue] =
Concurrent.unicast[JsValue](
onStart = (
(c) => {
act ! InitDone(c)
}),
onComplete = () => act ! Quit,
onError = {(_,_) => act ! Quit})
val inChannel: Iteratee[JsValue,Unit] =
Iteratee.foreach[JsValue](msg =>
(msg.\("pong").asOpt[Boolean]) match {
case Some(true) =>
act ! Pong
case _ =>
import WSClientMsgs._
act ! JsFromClient(msg)
})
sender ! Connected(inChannel,outChannel)
case Disconnect =>
context.stop(sender)
}
}
object WSClientMsgs {
case class JsFromClient(elem: JsValue)(implicit _request: RequestHeader) {
def request = _request
}
case class JsToClient(elem: JsValue)
}
object WSClientInnerMsgs {
case class InitDone(channel: Concurrent.Channel[JsValue])
case object Ping
case object Pong
case object Quit
}
trait JsPushee {
me : Actor =>
def --->(topush: JsValue)(implicit channel: Concurrent.Channel[JsValue]) {
import WSClientInnerMsgs._
try channel.push(topush) catch {case _ : Throwable => me.self ! Quit}
}
}
abstract class WSManagerActor extends AbstractWSManagerActor {
def operative(implicit request: RequestHeader) :
((ActorRef) => Receive)
def clientProp(implicit request: RequestHeader): Props =
Props(
new WsClientActor(
initTimeout,
browserTimeout,
operative)(request)
)
}
abstract class StatefullWSManagerActor extends AbstractWSManagerActor {
def wsDevice: Props
def clientProp(implicit request: RequestHeader): Props =
Props(
new WsDispatcherClientActor(
initTimeout,
browserTimeout,
wsDevice)(request)
)
}
abstract class AbstractWsClientActor(implicit request: RequestHeader) extends Actor with JsPushee {
val initTimeout: FiniteDuration
val browserTimeout: FiniteDuration
def operative(implicit request: RequestHeader): ((ActorRef) => PartialFunction[Any,Unit])
import WSClientInnerMsgs._
val pingTimeout = 500 milliseconds
def receive = {
case InitDone(channel) =>
rescheduleOperative(initTimeout)(channel)
case any =>
context.system.scheduler.scheduleOnce(50 milliseconds)(
self forward any)
}
def rescheduleOperative(timeout: FiniteDuration)(implicit channel: Concurrent.Channel[JsValue]): Unit = {
val newNextStop =
context.system.scheduler.scheduleOnce(browserTimeout, self, Quit)
context.become(operativePingPong(newNextStop), true)
context.system.scheduler.scheduleOnce(pingTimeout, self, Ping)
}
def rescheduleOperative(implicit channel: Concurrent.Channel[JsValue]): Unit =
rescheduleOperative(browserTimeout)
def operativePingPong(nextStop: Cancellable)(implicit channel: Concurrent.Channel[JsValue]): PartialFunction[Any,Unit] = {
pingReceive orElse
pongReceive(nextStop) orElse
quitReceive orElse
jsToClientReceive orElse
operative.apply(self)
}
import WSClientMsgs._
def jsToClientReceive(implicit channel: Concurrent.Channel[JsValue]): PartialFunction[Any,Unit] = {
case JsToClient(js) =>
--->(js)
}
def pingReceive(implicit channel: Concurrent.Channel[JsValue]): PartialFunction[Any,Unit] = {
case Ping =>
--->(Json.obj("ping" -> true))
}
def pongReceive(nextStop: Cancellable)(implicit channel: Concurrent.Channel[JsValue]): PartialFunction[Any,Unit] = {
case Pong =>
nextStop.cancel
rescheduleOperative
}
def quitReceive(implicit channel: Concurrent.Channel[JsValue]): PartialFunction[Any,Unit] = {
case Quit =>
channel.eofAndEnd
context.stop(self)
}
}
class WsClientActor(
_initTimeout: FiniteDuration,
_browserTimeout: FiniteDuration,
_operative: ((ActorRef) => PartialFunction[Any,Unit]))
(implicit request: RequestHeader) extends AbstractWsClientActor {
val initTimeout = _initTimeout
val browserTimeout = _browserTimeout
def operative(implicit request: RequestHeader) = _operative
}
class WsDispatcherClientActor(
_initTimeout: FiniteDuration,
_browserTimeout: FiniteDuration,
deviceProp: Props)
(implicit request: RequestHeader) extends AbstractWsClientActor {
val initTimeout = _initTimeout
val browserTimeout = _browserTimeout
val device =
context.actorOf(deviceProp)
import WSClientMsgs._
def operative(implicit request: RequestHeader): ((ActorRef) => PartialFunction[Any,Unit]) = {
(wsClient: ActorRef) => {
case msg =>
device ! msg
}
}
} | TPTeam/play_websocket_plugin | app/actors/ws/WebSocketManager.scala | Scala | gpl-2.0 | 6,559 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import java.util.Locale
import scala.util.control.NonFatal
import org.json4s._
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.annotation.Stable
import org.apache.spark.sql.catalyst.analysis.Resolver
import org.apache.spark.sql.catalyst.expressions.{Cast, Expression}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.Utils
/**
* The base type of all Spark SQL data types.
*
* @since 1.3.0
*/
@Stable
abstract class DataType extends AbstractDataType {
/**
* Enables matching against DataType for expressions:
* {{{
* case Cast(child @ BinaryType(), StringType) =>
* ...
* }}}
*/
private[sql] def unapply(e: Expression): Boolean = e.dataType == this
/**
* The default size of a value of this data type, used internally for size estimation.
*/
def defaultSize: Int
/** Name of the type used in JSON serialization. */
def typeName: String = {
this.getClass.getSimpleName
.stripSuffix("$").stripSuffix("Type").stripSuffix("UDT")
.toLowerCase(Locale.ROOT)
}
private[sql] def jsonValue: JValue = typeName
/** The compact JSON representation of this data type. */
def json: String = compact(render(jsonValue))
/** The pretty (i.e. indented) JSON representation of this data type. */
def prettyJson: String = pretty(render(jsonValue))
/** Readable string representation for the type. */
def simpleString: String = typeName
/** String representation for the type saved in external catalogs. */
def catalogString: String = simpleString
/** Readable string representation for the type with truncation */
private[sql] def simpleString(maxNumberFields: Int): String = simpleString
def sql: String = simpleString.toUpperCase(Locale.ROOT)
/**
* Check if `this` and `other` are the same data type when ignoring nullability
* (`StructField.nullable`, `ArrayType.containsNull`, and `MapType.valueContainsNull`).
*/
private[spark] def sameType(other: DataType): Boolean =
if (SQLConf.get.caseSensitiveAnalysis) {
DataType.equalsIgnoreNullability(this, other)
} else {
DataType.equalsIgnoreCaseAndNullability(this, other)
}
/**
* Returns the same data type but set all nullability fields are true
* (`StructField.nullable`, `ArrayType.containsNull`, and `MapType.valueContainsNull`).
*/
private[spark] def asNullable: DataType
/**
* Returns true if any `DataType` of this DataType tree satisfies the given function `f`.
*/
private[spark] def existsRecursively(f: (DataType) => Boolean): Boolean = f(this)
override private[sql] def defaultConcreteType: DataType = this
override private[sql] def acceptsType(other: DataType): Boolean = sameType(other)
}
/**
* @since 1.3.0
*/
@Stable
object DataType {
private val FIXED_DECIMAL = """decimal\\(\\s*(\\d+)\\s*,\\s*(\\-?\\d+)\\s*\\)""".r
def fromDDL(ddl: String): DataType = {
try {
CatalystSqlParser.parseDataType(ddl)
} catch {
case NonFatal(_) => CatalystSqlParser.parseTableSchema(ddl)
}
}
def fromJson(json: String): DataType = parseDataType(parse(json))
private val nonDecimalNameToType = {
Seq(NullType, DateType, TimestampType, BinaryType, IntegerType, BooleanType, LongType,
DoubleType, FloatType, ShortType, ByteType, StringType, CalendarIntervalType)
.map(t => t.typeName -> t).toMap
}
/** Given the string representation of a type, return its DataType */
private def nameToType(name: String): DataType = {
name match {
case "decimal" => DecimalType.USER_DEFAULT
case FIXED_DECIMAL(precision, scale) => DecimalType(precision.toInt, scale.toInt)
case other => nonDecimalNameToType.getOrElse(
other,
throw new IllegalArgumentException(
s"Failed to convert the JSON string '$name' to a data type."))
}
}
private object JSortedObject {
def unapplySeq(value: JValue): Option[List[(String, JValue)]] = value match {
case JObject(seq) => Some(seq.toList.sortBy(_._1))
case _ => None
}
}
// NOTE: Map fields must be sorted in alphabetical order to keep consistent with the Python side.
private[sql] def parseDataType(json: JValue): DataType = json match {
case JString(name) =>
nameToType(name)
case JSortedObject(
("containsNull", JBool(n)),
("elementType", t: JValue),
("type", JString("array"))) =>
ArrayType(parseDataType(t), n)
case JSortedObject(
("keyType", k: JValue),
("type", JString("map")),
("valueContainsNull", JBool(n)),
("valueType", v: JValue)) =>
MapType(parseDataType(k), parseDataType(v), n)
case JSortedObject(
("fields", JArray(fields)),
("type", JString("struct"))) =>
StructType(fields.map(parseStructField))
// Scala/Java UDT
case JSortedObject(
("class", JString(udtClass)),
("pyClass", _),
("sqlType", _),
("type", JString("udt"))) =>
Utils.classForName[UserDefinedType[_]](udtClass).getConstructor().newInstance()
// Python UDT
case JSortedObject(
("pyClass", JString(pyClass)),
("serializedClass", JString(serialized)),
("sqlType", v: JValue),
("type", JString("udt"))) =>
new PythonUserDefinedType(parseDataType(v), pyClass, serialized)
case other =>
throw new IllegalArgumentException(
s"Failed to convert the JSON string '${compact(render(other))}' to a data type.")
}
private def parseStructField(json: JValue): StructField = json match {
case JSortedObject(
("metadata", metadata: JObject),
("name", JString(name)),
("nullable", JBool(nullable)),
("type", dataType: JValue)) =>
StructField(name, parseDataType(dataType), nullable, Metadata.fromJObject(metadata))
// Support reading schema when 'metadata' is missing.
case JSortedObject(
("name", JString(name)),
("nullable", JBool(nullable)),
("type", dataType: JValue)) =>
StructField(name, parseDataType(dataType), nullable)
case other =>
throw new IllegalArgumentException(
s"Failed to convert the JSON string '${compact(render(other))}' to a field.")
}
protected[types] def buildFormattedString(
dataType: DataType,
prefix: String,
builder: StringBuilder): Unit = {
dataType match {
case array: ArrayType =>
array.buildFormattedString(prefix, builder)
case struct: StructType =>
struct.buildFormattedString(prefix, builder)
case map: MapType =>
map.buildFormattedString(prefix, builder)
case _ =>
}
}
/**
* Compares two types, ignoring nullability of ArrayType, MapType, StructType.
*/
private[types] def equalsIgnoreNullability(left: DataType, right: DataType): Boolean = {
(left, right) match {
case (ArrayType(leftElementType, _), ArrayType(rightElementType, _)) =>
equalsIgnoreNullability(leftElementType, rightElementType)
case (MapType(leftKeyType, leftValueType, _), MapType(rightKeyType, rightValueType, _)) =>
equalsIgnoreNullability(leftKeyType, rightKeyType) &&
equalsIgnoreNullability(leftValueType, rightValueType)
case (StructType(leftFields), StructType(rightFields)) =>
leftFields.length == rightFields.length &&
leftFields.zip(rightFields).forall { case (l, r) =>
l.name == r.name && equalsIgnoreNullability(l.dataType, r.dataType)
}
case (l, r) => l == r
}
}
/**
* Compares two types, ignoring compatible nullability of ArrayType, MapType, StructType.
*
* Compatible nullability is defined as follows:
* - If `from` and `to` are ArrayTypes, `from` has a compatible nullability with `to`
* if and only if `to.containsNull` is true, or both of `from.containsNull` and
* `to.containsNull` are false.
* - If `from` and `to` are MapTypes, `from` has a compatible nullability with `to`
* if and only if `to.valueContainsNull` is true, or both of `from.valueContainsNull` and
* `to.valueContainsNull` are false.
* - If `from` and `to` are StructTypes, `from` has a compatible nullability with `to`
* if and only if for all every pair of fields, `to.nullable` is true, or both
* of `fromField.nullable` and `toField.nullable` are false.
*/
private[sql] def equalsIgnoreCompatibleNullability(from: DataType, to: DataType): Boolean = {
(from, to) match {
case (ArrayType(fromElement, fn), ArrayType(toElement, tn)) =>
(tn || !fn) && equalsIgnoreCompatibleNullability(fromElement, toElement)
case (MapType(fromKey, fromValue, fn), MapType(toKey, toValue, tn)) =>
(tn || !fn) &&
equalsIgnoreCompatibleNullability(fromKey, toKey) &&
equalsIgnoreCompatibleNullability(fromValue, toValue)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields).forall { case (fromField, toField) =>
fromField.name == toField.name &&
(toField.nullable || !fromField.nullable) &&
equalsIgnoreCompatibleNullability(fromField.dataType, toField.dataType)
}
case (fromDataType, toDataType) => fromDataType == toDataType
}
}
/**
* Compares two types, ignoring nullability of ArrayType, MapType, StructType, and ignoring case
* sensitivity of field names in StructType.
*/
private[sql] def equalsIgnoreCaseAndNullability(from: DataType, to: DataType): Boolean = {
(from, to) match {
case (ArrayType(fromElement, _), ArrayType(toElement, _)) =>
equalsIgnoreCaseAndNullability(fromElement, toElement)
case (MapType(fromKey, fromValue, _), MapType(toKey, toValue, _)) =>
equalsIgnoreCaseAndNullability(fromKey, toKey) &&
equalsIgnoreCaseAndNullability(fromValue, toValue)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields).forall { case (l, r) =>
l.name.equalsIgnoreCase(r.name) &&
equalsIgnoreCaseAndNullability(l.dataType, r.dataType)
}
case (fromDataType, toDataType) => fromDataType == toDataType
}
}
/**
* Returns true if the two data types share the same "shape", i.e. the types
* are the same, but the field names don't need to be the same.
*
* @param ignoreNullability whether to ignore nullability when comparing the types
*/
def equalsStructurally(
from: DataType,
to: DataType,
ignoreNullability: Boolean = false): Boolean = {
(from, to) match {
case (left: ArrayType, right: ArrayType) =>
equalsStructurally(left.elementType, right.elementType) &&
(ignoreNullability || left.containsNull == right.containsNull)
case (left: MapType, right: MapType) =>
equalsStructurally(left.keyType, right.keyType) &&
equalsStructurally(left.valueType, right.valueType) &&
(ignoreNullability || left.valueContainsNull == right.valueContainsNull)
case (StructType(fromFields), StructType(toFields)) =>
fromFields.length == toFields.length &&
fromFields.zip(toFields)
.forall { case (l, r) =>
equalsStructurally(l.dataType, r.dataType) &&
(ignoreNullability || l.nullable == r.nullable)
}
case (fromDataType, toDataType) => fromDataType == toDataType
}
}
private val SparkGeneratedName = """col\\d+""".r
private def isSparkGeneratedName(name: String): Boolean = name match {
case SparkGeneratedName(_*) => true
case _ => false
}
/**
* Returns true if the write data type can be read using the read data type.
*
* The write type is compatible with the read type if:
* - Both types are arrays, the array element types are compatible, and element nullability is
* compatible (read allows nulls or write does not contain nulls).
* - Both types are maps and the map key and value types are compatible, and value nullability
* is compatible (read allows nulls or write does not contain nulls).
* - Both types are structs and have the same number of fields. The type and nullability of each
* field from read/write is compatible. If byName is true, the name of each field from
* read/write needs to be the same.
* - Both types are atomic and the write type can be safely cast to the read type.
*
* Extra fields in write-side structs are not allowed to avoid accidentally writing data that
* the read schema will not read, and to ensure map key equality is not changed when data is read.
*
* @param write a write-side data type to validate against the read type
* @param read a read-side data type
* @return true if data written with the write type can be read using the read type
*/
def canWrite(
write: DataType,
read: DataType,
byName: Boolean,
resolver: Resolver,
context: String,
addError: String => Unit): Boolean = {
(write, read) match {
case (wArr: ArrayType, rArr: ArrayType) =>
// run compatibility check first to produce all error messages
val typesCompatible = canWrite(
wArr.elementType, rArr.elementType, byName, resolver, context + ".element", addError)
if (wArr.containsNull && !rArr.containsNull) {
addError(s"Cannot write nullable elements to array of non-nulls: '$context'")
false
} else {
typesCompatible
}
case (wMap: MapType, rMap: MapType) =>
// map keys cannot include data fields not in the read schema without changing equality when
// read. map keys can be missing fields as long as they are nullable in the read schema.
// run compatibility check first to produce all error messages
val keyCompatible = canWrite(
wMap.keyType, rMap.keyType, byName, resolver, context + ".key", addError)
val valueCompatible = canWrite(
wMap.valueType, rMap.valueType, byName, resolver, context + ".value", addError)
if (wMap.valueContainsNull && !rMap.valueContainsNull) {
addError(s"Cannot write nullable values to map of non-nulls: '$context'")
false
} else {
keyCompatible && valueCompatible
}
case (StructType(writeFields), StructType(readFields)) =>
var fieldCompatible = true
readFields.zip(writeFields).zipWithIndex.foreach {
case ((rField, wField), i) =>
val nameMatch = resolver(wField.name, rField.name) || isSparkGeneratedName(wField.name)
val fieldContext = s"$context.${rField.name}"
val typesCompatible = canWrite(
wField.dataType, rField.dataType, byName, resolver, fieldContext, addError)
if (byName && !nameMatch) {
addError(s"Struct '$context' $i-th field name does not match " +
s"(may be out of order): expected '${rField.name}', found '${wField.name}'")
fieldCompatible = false
} else if (!rField.nullable && wField.nullable) {
addError(s"Cannot write nullable values to non-null field: '$fieldContext'")
fieldCompatible = false
} else if (!typesCompatible) {
// errors are added in the recursive call to canWrite above
fieldCompatible = false
}
}
if (readFields.size > writeFields.size) {
val missingFieldsStr = readFields.takeRight(readFields.size - writeFields.size)
.map(f => s"'${f.name}'").mkString(", ")
if (missingFieldsStr.nonEmpty) {
addError(s"Struct '$context' missing fields: $missingFieldsStr")
fieldCompatible = false
}
} else if (writeFields.size > readFields.size) {
val extraFieldsStr = writeFields.takeRight(writeFields.size - readFields.size)
.map(f => s"'${f.name}'").mkString(", ")
addError(s"Cannot write extra fields to struct '$context': $extraFieldsStr")
fieldCompatible = false
}
fieldCompatible
case (w: AtomicType, r: AtomicType) =>
if (!Cast.canUpCast(w, r)) {
addError(s"Cannot safely cast '$context': $w to $r")
false
} else {
true
}
case (w, r) if w.sameType(r) && !w.isInstanceOf[NullType] =>
true
case (w, r) =>
addError(s"Cannot write '$context': $w is incompatible with $r")
false
}
}
}
| aosagie/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/types/DataType.scala | Scala | apache-2.0 | 17,552 |
package scala.tasty.internal.dotc
package core
import scala.language.implicitConversions
import scala.collection.{mutable, immutable}
import scala.annotation.switch
import Names._
import Decorators.StringDecorator
import scala.collection.breakOut
object StdNames {
/** Base strings from which synthetic names are derived. */
abstract class TastyDefinedNames[N <: Name] {
protected implicit def fromString(s: String): N
protected def fromName(name: Name): N = fromString(name.toString)
private val kws = mutable.Set[N]()
protected def kw(name: N) = { kws += name; name }
final val keywords: collection.Set[N] = kws
}
abstract class TastyScalaNames[N <: Name] extends TastyDefinedNames[N] {
protected def encode(s: String): N = fromName(fromString(s).encode)
// Keywords, need to come first -----------------------
final val ABSTRACTkw: N = kw("abstract")
final val CASEkw: N = kw("case")
final val CLASSkw: N = kw("class")
final val CATCHkw: N = kw("catch")
final val DEFkw: N = kw("def")
final val DOkw: N = kw("do")
final val ELSEkw: N = kw("else")
final val EXTENDSkw: N = kw("extends")
final val FALSEkw: N = kw("false")
final val FINALkw: N = kw("final")
final val FINALLYkw: N = kw("finally")
final val FORkw: N = kw("for")
final val FORSOMEkw: N = kw("forSome")
final val IFkw: N = kw("if")
final val IMPLICITkw: N = kw("implicit")
final val IMPORTkw: N = kw("import")
final val LAZYkw: N = kw("lazy")
final val MACROkw: N = kw("macro")
final val MATCHkw: N = kw("match")
final val NEWkw: N = kw("new")
final val NULLkw: N = kw("null")
final val OBJECTkw: N = kw("object")
final val OVERRIDEkw: N = kw("override")
final val PACKAGEkw: N = kw("package")
final val PRIVATEkw: N = kw("private")
final val PROTECTEDkw: N = kw("protected")
final val RETURNkw: N = kw("return")
final val SEALEDkw: N = kw("sealed")
final val SUPERkw: N = kw("super")
final val THENkw: N = kw("then")
final val THISkw: N = kw("this")
final val THROWkw: N = kw("throw")
final val TRAITkw: N = kw("trait")
final val TRUEkw: N = kw("true")
final val TRYkw: N = kw("try")
final val TYPEkw: N = kw("type")
final val VALkw: N = kw("val")
final val VARkw: N = kw("var")
final val WITHkw: N = kw("with")
final val WHILEkw: N = kw("while")
final val YIELDkw: N = kw("yield")
final val DOTkw: N = kw(".")
final val USCOREkw: N = kw("_")
final val COLONkw: N = kw(":")
final val EQUALSkw: N = kw("=")
final val ARROWkw: N = kw("=>")
final val LARROWkw: N = kw("<-")
final val SUBTYPEkw: N = kw("<:")
final val VIEWBOUNDkw: N = kw("<%")
final val SUPERTYPEkw: N = kw(">:")
final val HASHkw: N = kw("#")
final val ATkw: N = kw("@")
val ANON_CLASS: N = "$anon"
val ANON_FUN: N = "$anonfun"
val BITMAP_PREFIX: N = "bitmap$"
val BITMAP_NORMAL: N = BITMAP_PREFIX // initialization bitmap for public/protected lazy vals
val BITMAP_TRANSIENT: N = BITMAP_PREFIX + "trans$" // initialization bitmap for transient lazy vals
val BITMAP_CHECKINIT: N = BITMAP_PREFIX + "init$" // initialization bitmap for checkinit values
val BITMAP_CHECKINIT_TRANSIENT: N = BITMAP_PREFIX + "inittrans$" // initialization bitmap for transient checkinit values
val DEFAULT_GETTER: N = "$default$"
val DO_WHILE_PREFIX: N = "doWhile$"
val EMPTY: N = ""
val EMPTY_PACKAGE: N = Names.EMPTY_PACKAGE.toString
val EVIDENCE_PARAM_PREFIX: N = "evidence$"
val EXCEPTION_RESULT_PREFIX: N = "exceptionResult"
//*
val EXPAND_SEPARATOR: N = "$$"
val IMPL_CLASS_SUFFIX: N = "$class"
val IMPORT: N = "<import>"
val INTERPRETER_IMPORT_WRAPPER: N = "$iw"
val INTERPRETER_LINE_PREFIX: N = "line"
val INTERPRETER_VAR_PREFIX: N = "res"
val INTERPRETER_WRAPPER_SUFFIX: N = "$object"
val LOCALDUMMY_PREFIX: N = "<local " // owner of local blocks
val AVOID_CLASH_SUFFIX: N = "$_avoid_name_clash_$"
val MODULE_VAR_SUFFIX: N = "$module"
val USCORE_PARAM_PREFIX: N = "_$"
val OPS_PACKAGE: N = "<special-ops>"
val OVERLOADED: N = "<overloaded>"
val PACKAGE: N = "package"
val PACKAGE_CLS: N = "package$"
val PROTECTED_PREFIX: N = "protected$"
val PROTECTED_SET_PREFIX: N = PROTECTED_PREFIX + "set"
val ROOT: N = "<root>"
val SHADOWED: N = "(shadowed)" // tag to be used until we have proper name kinds
val SINGLETON_SUFFIX: N = ".type"
val SPECIALIZED_SUFFIX: N = "$sp"
val SUPER_PREFIX: N = "super$"
val WHILE_PREFIX: N = "while$"
val DEFAULT_EXCEPTION_NAME: N = "ex$"
val INITIALIZER_PREFIX: N = "initial$"
val COMPANION_MODULE_METHOD: N = "companion$module"
val COMPANION_CLASS_METHOD: N = "companion$class"
val TRAIT_SETTER_SEPARATOR: N = "$_setter_$"
// value types (and AnyRef) are all used as terms as well
// as (at least) arguments to the @specialize annotation.
final val Boolean: N = "Boolean"
final val Byte: N = "Byte"
final val Char: N = "Char"
final val Double: N = "Double"
final val Float: N = "Float"
final val Int: N = "Int"
final val Long: N = "Long"
final val Short: N = "Short"
final val Unit: N = "Unit"
final val ScalaValueNames: scala.List[N] =
scala.List(Byte, Char, Short, Int, Long, Float, Double, Boolean, Unit)
// some types whose companions we utilize
final val AnyRef: N = "AnyRef"
final val Array: N = "Array"
final val List: N = "List"
final val Seq: N = "Seq"
final val Symbol: N = "Symbol"
final val ClassTag: N = "ClassTag"
final val classTag: N = "classTag"
final val WeakTypeTag: N = "WeakTypeTag"
final val TypeTag : N = "TypeTag"
final val typeTag: N = "typeTag"
final val Expr: N = "Expr"
final val String: N = "String"
final val Annotation: N = "Annotation"
// fictions we use as both types and terms
final val ERROR: N = "<error>"
final val ERRORenc: N = encode("<error>")
final val NO_NAME: N = "<none>" // formerly NOSYMBOL
final val WILDCARD: N = "_"
// ----- Type names -----------------------------------------
final val BYNAME_PARAM_CLASS: N = "<byname>"
final val EQUALS_PATTERN: N = "<equals>"
final val LOCAL_CHILD: N = "<local child>"
final val REPEATED_PARAM_CLASS: N = "<repeated>"
final val WILDCARD_STAR: N = "_*"
final val REIFY_TREECREATOR_PREFIX: N = "$treecreator"
final val REIFY_TYPECREATOR_PREFIX: N = "$typecreator"
final val LAMBDA_ARG_PREFIX: N = "HK$"
final val LAMBDA_ARG_PREFIXhead: Char = LAMBDA_ARG_PREFIX.head
final val Any: N = "Any"
final val AnyVal: N = "AnyVal"
final val ExprApi: N = "ExprApi"
final val Mirror: N = "Mirror"
final val Nothing: N = "Nothing"
final val Null: N = "Null"
final val Object: N = "Object"
final val PartialFunction: N = "PartialFunction"
final val PrefixType: N = "PrefixType"
final val Product: N = "Product"
final val Serializable: N = "Serializable"
final val Singleton: N = "Singleton"
final val Throwable: N = "Throwable"
final val ClassfileAnnotation: N = "ClassfileAnnotation"
final val ClassManifest: N = "ClassManifest"
final val Enum: N = "Enum"
final val Group: N = "Group"
final val Tree: N = "Tree"
final val Type : N = "Type"
final val TypeTree: N = "TypeTree"
// Annotation simple names, used in Namer
final val BeanPropertyAnnot: N = "BeanProperty"
final val BooleanBeanPropertyAnnot: N = "BooleanBeanProperty"
final val bridgeAnnot: N = "bridge"
// Classfile Attributes
final val AnnotationDefaultATTR: N = "AnnotationDefault"
final val BridgeATTR: N = "Bridge"
final val ClassfileAnnotationATTR: N = "RuntimeInvisibleAnnotations" // RetentionPolicy.CLASS. Currently not used (Apr 2009).
final val CodeATTR: N = "Code"
final val ConstantValueATTR: N = "ConstantValue"
final val DeprecatedATTR: N = "Deprecated"
final val ExceptionsATTR: N = "Exceptions"
final val InnerClassesATTR: N = "InnerClasses"
final val LineNumberTableATTR: N = "LineNumberTable"
final val LocalVariableTableATTR: N = "LocalVariableTable"
final val RuntimeAnnotationATTR: N = "RuntimeVisibleAnnotations" // RetentionPolicy.RUNTIME
final val RuntimeParamAnnotationATTR: N = "RuntimeVisibleParameterAnnotations" // RetentionPolicy.RUNTIME (annotations on parameters)
final val ScalaATTR: N = "Scala"
final val ScalaSignatureATTR: N = "ScalaSig"
final val TASTYATTR: N = "TASTY"
final val SignatureATTR: N = "Signature"
final val SourceFileATTR: N = "SourceFile"
final val SyntheticATTR: N = "Synthetic"
// ----- Term names -----------------------------------------
// Compiler-internal
val ANYname: N = "<anyname>"
val CONSTRUCTOR: N = Names.CONSTRUCTOR.toString
val DEFAULT_CASE: N = "defaultCase$"
val EVT2U: N = "evt2u$"
val EQEQ_LOCAL_VAR: N = "eqEqTemp$"
val FAKE_LOCAL_THIS: N = "this$"
val LAZY_LOCAL: N = "$lzy"
val LAZY_LOCAL_INIT: N = "$lzyINIT"
val LAZY_FIELD_OFFSET: N = "OFFSET$"
val LAZY_SLOW_SUFFIX: N = "$lzycompute"
val LOCAL_SUFFIX: N = "$$local"
val UNIVERSE_BUILD_PREFIX: N = "$u.build."
val UNIVERSE_BUILD: N = "$u.build"
val UNIVERSE_PREFIX: N = "$u."
val UNIVERSE_SHORT: N = "$u"
val MIRROR_PREFIX: N = "$m."
val MIRROR_SHORT: N = "$m"
val MIRROR_UNTYPED: N = "$m$untyped"
val REIFY_FREE_PREFIX: N = "free$"
val REIFY_FREE_THIS_SUFFIX: N = "$this"
val REIFY_FREE_VALUE_SUFFIX: N = "$value"
val REIFY_SYMDEF_PREFIX: N = "symdef$"
val OUTER: N = "$outer"
val OUTER_LOCAL: N = "$outer "
val OUTER_SYNTH: N = "<outer>" // emitted by virtual pattern matcher, replaced by outer accessor in explicitouter
val REFINE_CLASS: N = "<refinement>"
val ROOTPKG: N = "_root_"
val SELECTOR_DUMMY: N = "<unapply-selector>"
val SELF: N = "$this"
val SETTER_SUFFIX: N = encode("_=")
val SKOLEM: N = "<skolem>"
val SPECIALIZED_INSTANCE: N = "specInstance$"
val THIS: N = "_$this"
val TRAIT_CONSTRUCTOR: N = "$init$"
val U2EVT: N = "u2evt$"
final val Nil: N = "Nil"
final val Predef: N = "Predef"
final val ScalaRunTime: N = "ScalaRunTime"
final val Some: N = "Some"
// val x_0 : N = "x$0"
// val x_1 : N = "x$1"
// val x_2 : N = "x$2"
// val x_3 : N = "x$3"
// val x_4 : N = "x$4"
// val x_5 : N = "x$5"
// val x_6 : N = "x$6"
// val x_7 : N = "x$7"
// val x_8 : N = "x$8"
// val x_9 : N = "x$9"
// val _1 : N = "_1"
// val _2 : N = "_2"
// val _3 : N = "_3"
// val _4 : N = "_4"
// val _5 : N = "_5"
// val _6 : N = "_6"
// val _7 : N = "_7"
// val _8 : N = "_8"
// val _9 : N = "_9"
// val _10 : N = "_10"
// val _11 : N = "_11"
// val _12 : N = "_12"
// val _13 : N = "_13"
// val _14 : N = "_14"
// val _15 : N = "_15"
// val _16 : N = "_16"
// val _17 : N = "_17"
// val _18 : N = "_18"
// val _19 : N = "_19"
// val _20 : N = "_20"
// val _21 : N = "_21"
// val _22 : N = "_22"
val ??? = encode("???")
val genericWrapArray: N = "genericWrapArray"
def wrapRefArray: N = "wrapRefArray"
def wrapXArray(clsName: Name): N = "wrap" + clsName + "Array"
// Compiler utilized names
val AnnotatedType: N = "AnnotatedType"
val AppliedTypeTree: N = "AppliedTypeTree"
val hkApply: N = "$apply"
val ArrayAnnotArg: N = "ArrayAnnotArg"
val Constant: N = "Constant"
val ConstantType: N = "ConstantType"
val ExistentialTypeTree: N = "ExistentialTypeTree"
val Flag : N = "Flag"
val Ident: N = "Ident"
val Import: N = "Import"
val LambdaPrefix: N = "Lambda$"
val Literal: N = "Literal"
val LiteralAnnotArg: N = "LiteralAnnotArg"
val Modifiers: N = "Modifiers"
val NestedAnnotArg: N = "NestedAnnotArg"
val NoFlags: N = "NoFlags"
val NoPrefix: N = "NoPrefix"
val NoSymbol: N = "NoSymbol"
val NoType: N = "NoType"
val Pair: N = "Pair"
val Ref: N = "Ref"
val RootPackage: N = "RootPackage"
val RootClass: N = "RootClass"
val Select: N = "Select"
val StringContext: N = "StringContext"
val This: N = "This"
val ThisType: N = "ThisType"
val Tuple2: N = "Tuple2"
val TYPE_ : N = "TYPE"
val TypeApply: N = "TypeApply"
val TypeRef: N = "TypeRef"
val UNIT : N = "UNIT"
val add_ : N = "add"
val annotation: N = "annotation"
val anyValClass: N = "anyValClass"
val append: N = "append"
val apply: N = "apply"
val applyDynamic: N = "applyDynamic"
val applyDynamicNamed: N = "applyDynamicNamed"
val applyOrElse: N = "applyOrElse"
val args : N = "args"
val argv : N = "argv"
val arrayClass: N = "arrayClass"
val arrayElementClass: N = "arrayElementClass"
val arrayValue: N = "arrayValue"
val array_apply : N = "array_apply"
val array_clone : N = "array_clone"
val array_length : N = "array_length"
val array_update : N = "array_update"
val arraycopy: N = "arraycopy"
val asTerm: N = "asTerm"
val asModule: N = "asModule"
val asMethod: N = "asMethod"
val asType: N = "asType"
val asClass: N = "asClass"
val asInstanceOf_ : N = "asInstanceOf"
val assert_ : N = "assert"
val assume_ : N = "assume"
val box: N = "box"
val build : N = "build"
val bytes: N = "bytes"
val canEqual_ : N = "canEqual"
val checkInitialized: N = "checkInitialized"
val ClassManifestFactory: N = "ClassManifestFactory"
val classOf: N = "classOf"
val clone_ : N = "clone"
// val conforms : N = "conforms" // Dotty deviation: no special treatment of conforms, so the occurrence of the name here would cause to unintended implicit shadowing. Should find a less common name for it in Predef.
val copy: N = "copy"
val currentMirror: N = "currentMirror"
val create: N = "create"
val definitions: N = "definitions"
val delayedInit: N = "delayedInit"
val delayedInitArg: N = "delayedInit$body"
val drop: N = "drop"
val dummyApply: N = "<dummy-apply>"
val elem: N = "elem"
val emptyValDef: N = "emptyValDef"
val ensureAccessible : N = "ensureAccessible"
val eq: N = "eq"
val equalsNumChar : N = "equalsNumChar"
val equalsNumNum : N = "equalsNumNum"
val equalsNumObject : N = "equalsNumObject"
val equals_ : N = "equals"
val error: N = "error"
val eval: N = "eval"
val ex: N = "ex"
val experimental: N = "experimental"
val f: N = "f"
val false_ : N = "false"
val filter: N = "filter"
val finalize_ : N = "finalize"
val find_ : N = "find"
val flagsFromBits : N = "flagsFromBits"
val flatMap: N = "flatMap"
val foreach: N = "foreach"
val genericArrayOps: N = "genericArrayOps"
val get: N = "get"
val getClass_ : N = "getClass"
val getOrElse: N = "getOrElse"
val hasNext: N = "hasNext"
val hashCode_ : N = "hashCode"
val hash_ : N = "hash"
val head: N = "head"
val higherKinds: N = "higherKinds"
val identity: N = "identity"
val implicitly: N = "implicitly"
val in: N = "in"
val info: N = "info"
val inlinedEquals: N = "inlinedEquals"
val isArray: N = "isArray"
val isDefined: N = "isDefined"
val isDefinedAt: N = "isDefinedAt"
val isDefinedAtImpl: N = "$isDefinedAt"
val isEmpty: N = "isEmpty"
val isInstanceOf_ : N = "isInstanceOf"
val java: N = "java"
val keepUnions: N = "keepUnions"
val key: N = "key"
val lang: N = "lang"
val length: N = "length"
val lengthCompare: N = "lengthCompare"
val liftedTree: N = "liftedTree"
val `macro` : N = "macro"
val macroThis : N = "_this"
val macroContext : N = "c"
val main: N = "main"
val manifest: N = "manifest"
val ManifestFactory: N = "ManifestFactory"
val manifestToTypeTag: N = "manifestToTypeTag"
val map: N = "map"
val materializeClassTag: N = "materializeClassTag"
val materializeWeakTypeTag: N = "materializeWeakTypeTag"
val materializeTypeTag: N = "materializeTypeTag"
val mirror : N = "mirror"
val moduleClass : N = "moduleClass"
val name: N = "name"
val ne: N = "ne"
val newFreeTerm: N = "newFreeTerm"
val newFreeType: N = "newFreeType"
val newNestedSymbol: N = "newNestedSymbol"
val newScopeWith: N = "newScopeWith"
val next: N = "next"
val nmeNewTermName: N = "newTermName"
val nmeNewTypeName: N = "newTypeName"
val noAutoTupling: N = "noAutoTupling"
val normalize: N = "normalize"
val notifyAll_ : N = "notifyAll"
val notify_ : N = "notify"
val null_ : N = "null"
val ofDim: N = "ofDim"
val origin: N = "origin"
val prefix : N = "prefix"
val productArity: N = "productArity"
val productElement: N = "productElement"
val productIterator: N = "productIterator"
val productPrefix: N = "productPrefix"
val readResolve: N = "readResolve"
val reflect : N = "reflect"
val reify : N = "reify"
val rootMirror : N = "rootMirror"
val runOrElse: N = "runOrElse"
val runtime: N = "runtime"
val runtimeClass: N = "runtimeClass"
val runtimeMirror: N = "runtimeMirror"
val sameElements: N = "sameElements"
val scala_ : N = "scala"
val selectDynamic: N = "selectDynamic"
val selectOverloadedMethod: N = "selectOverloadedMethod"
val selectTerm: N = "selectTerm"
val selectType: N = "selectType"
val self: N = "self"
val seqToArray: N = "seqToArray"
val setAccessible: N = "setAccessible"
val setAnnotations: N = "setAnnotations"
val setSymbol: N = "setSymbol"
val setType: N = "setType"
val setTypeSignature: N = "setTypeSignature"
val splice: N = "splice"
val staticClass : N = "staticClass"
val staticModule : N = "staticModule"
val staticPackage : N = "staticPackage"
val synchronized_ : N = "synchronized"
val tail: N = "tail"
val `then` : N = "then"
val this_ : N = "this"
val thisPrefix : N = "thisPrefix"
val throw_ : N = "throw"
val toArray: N = "toArray"
val toList: N = "toList"
val toObjectArray : N = "toObjectArray"
val toSeq: N = "toSeq"
val toString_ : N = "toString"
val toTypeConstructor: N = "toTypeConstructor"
val tpe : N = "tpe"
val tree : N = "tree"
val true_ : N = "true"
val typedProductIterator: N = "typedProductIterator"
val typeTagToManifest: N = "typeTagToManifest"
val unapply: N = "unapply"
val unapplySeq: N = "unapplySeq"
val unbox: N = "unbox"
val universe: N = "universe"
val update: N = "update"
val updateDynamic: N = "updateDynamic"
val value: N = "value"
val valueOf : N = "valueOf"
val values : N = "values"
val view_ : N = "view"
val wait_ : N = "wait"
val withFilter: N = "withFilter"
val withFilterIfRefutable: N = "withFilterIfRefutable$"
val wrap: N = "wrap"
val zero: N = "zero"
val zip: N = "zip"
val nothingRuntimeClass: N = "scala.runtime.Nothing$"
val nullRuntimeClass: N = "scala.runtime.Null$"
val synthSwitch: N = "$synthSwitch"
// unencoded operators
object raw {
final val AMP : N = "&"
final val BANG : N = "!"
final val BAR : N = "|"
final val DOLLAR: N = "$"
final val GE: N = ">="
final val LE: N = "<="
final val MINUS: N = "-"
final val NE: N = "!="
final val PLUS : N = "+"
final val SLASH: N = "/"
final val STAR : N = "*"
final val TILDE: N = "~"
final val isUnary: Set[Name] = Set(MINUS, PLUS, TILDE, BANG)
}
object specializedTypeNames {
final val Boolean: N = "Z"
final val Byte: N = "B"
final val Char: N = "C"
final val Short: N = "S"
final val Int: N = "I"
final val Long: N = "J"
final val Float: N = "F"
final val Double: N = "D"
final val Void: N = "V"
final val Object: N = "L"
final val prefix: N = "$mc"
final val suffix: N = "$sp"
}
// value-conversion methods
val toByte: N = "toByte"
val toShort: N = "toShort"
val toChar: N = "toChar"
val toInt: N = "toInt"
val toLong: N = "toLong"
val toFloat: N = "toFloat"
val toDouble: N = "toDouble"
// primitive operation methods for structural types mostly
// overlap with the above, but not for these two.
val toCharacter: N = "toCharacter"
val toInteger: N = "toInteger"
def newLazyValSlowComputeName(lzyValName: N) = lzyValName ++ LAZY_SLOW_SUFFIX
// ASCII names for operators
val ADD = encode("+")
val AND = encode("&")
val ASR = encode(">>")
val DIV = encode("/")
val EQ = encode("==")
val EQL = encode("=")
val GE = encode(">=")
val GT = encode(">")
val HASHHASH = encode("##")
val LE = encode("<=")
val LSL = encode("<<")
val LSR = encode(">>>")
val LT = encode("<")
val MINUS = encode("-")
val MOD = encode("%")
val MUL = encode("*")
val NE = encode("!=")
val OR = encode("|")
val PLUS = ADD // technically redundant, but ADD looks funny with MINUS
val SUB = MINUS // ... as does SUB with PLUS
val XOR = encode("^")
val ZAND = encode("&&")
val ZOR = encode("||")
// unary operators
val UNARY_PREFIX: N = "unary_"
val UNARY_~ = encode("unary_~")
val UNARY_+ = encode("unary_+")
val UNARY_- = encode("unary_-")
val UNARY_! = encode("unary_!")
// Grouped here so Cleanup knows what tests to perform.
val CommonOpNames = Set[Name](OR, XOR, AND, EQ, NE)
val ConversionNames = Set[Name](toByte, toChar, toDouble, toFloat, toInt, toLong, toShort)
val BooleanOpNames = Set[Name](ZOR, ZAND, UNARY_!) ++ CommonOpNames
val NumberOpNames = (
Set[Name](ADD, SUB, MUL, DIV, MOD, LSL, LSR, ASR, LT, LE, GE, GT)
++ Set(UNARY_+, UNARY_-, UNARY_!)
++ ConversionNames
++ CommonOpNames
)
val add: N = "add"
val complement: N = "complement"
val divide: N = "divide"
val multiply: N = "multiply"
val negate: N = "negate"
val positive: N = "positive"
val shiftLogicalRight: N = "shiftLogicalRight"
val shiftSignedLeft: N = "shiftSignedLeft"
val shiftSignedRight: N = "shiftSignedRight"
val subtract: N = "subtract"
val takeAnd: N = "takeAnd"
val takeConditionalAnd: N = "takeConditionalAnd"
val takeConditionalOr: N = "takeConditionalOr"
val takeModulo: N = "takeModulo"
val takeNot: N = "takeNot"
val takeOr: N = "takeOr"
val takeXor: N = "takeXor"
val testEqual: N = "testEqual"
val testGreaterOrEqualThan: N = "testGreaterOrEqualThan"
val testGreaterThan: N = "testGreaterThan"
val testLessOrEqualThan: N = "testLessOrEqualThan"
val testLessThan: N = "testLessThan"
val testNotEqual: N = "testNotEqual"
val isBoxedNumberOrBoolean: N = "isBoxedNumberOrBoolean"
val isBoxedNumber: N = "isBoxedNumber"
val reflPolyCacheName: N = "reflPoly$Cache"
val reflClassCacheName: N = "reflClass$Cache"
val reflParamsCacheName: N = "reflParams$Cache"
val reflMethodCacheName: N = "reflMethod$Cache"
val reflMethodName: N = "reflMethod$Method"
private val reflectionCacheNames = Set[N](
reflPolyCacheName,
reflClassCacheName,
reflParamsCacheName,
reflMethodCacheName,
reflMethodName
)
def isReflectionCacheName(name: Name) = reflectionCacheNames exists (name startsWith _)
}
class TastyScalaTermNames extends TastyScalaNames[TermName] {
protected implicit def fromString(s: String): TermName = termName(s)
//def lambdaTraitName2: Unit = scala.List().foreach{x => x} //.mkString //scala.List(1,2,3).map{x: Int => 'f'}.mkString
def newBitmapName(bitmapPrefix: TermName, n: Int): TermName = bitmapPrefix ++ n.toString
def selectorName(n: Int): TermName = "_" + (n + 1)
object primitive {
val arrayApply: TermName = "[]apply"
val arrayUpdate: TermName = "[]update"
val arrayLength: TermName = "[]length"
val names: Set[Name] = Set(arrayApply, arrayUpdate, arrayLength)
}
def isPrimitiveName(name: Name) = primitive.names.contains(name)
}
class TastyScalaTypeNames extends TastyScalaNames[TypeName] {
protected implicit def fromString(s: String): TypeName = typeName(s)
@switch def syntheticTypeParamName(i: Int): TypeName = "T" + i
//Problem arises (ClassBType.info not yet assigned) if uncomment this def (only if name of the def starts with lambdaTraitName)
//def lambdaTraitName(vcs: List[Int]): TypeName = LambdaPrefix ++ vcs.map(varianceSuffix).mkString
def varianceSuffix(v: Int): Char = varianceSuffixes.charAt(v + 1)
val varianceSuffixes = "NIP"
}
abstract class TastyJavaNames[N <: Name] extends TastyDefinedNames[N] {
final val ABSTRACTkw: N = kw("abstract")
final val ASSERTkw: N = kw("assert")
final val BOOLEANkw: N = kw("boolean")
final val BREAKkw: N = kw("break")
final val BYTEkw: N = kw("byte")
final val CASEkw: N = kw("case")
final val CATCHkw: N = kw("catch")
final val CHARkw: N = kw("char")
final val CLASSkw: N = kw("class")
final val CONSTkw: N = kw("const")
final val CONTINUEkw: N = kw("continue")
final val DEFAULTkw: N = kw("default")
final val DOkw: N = kw("do")
final val DOUBLEkw: N = kw("double")
final val ELSEkw: N = kw("else")
final val ENUMkw: N = kw("enum")
final val EXTENDSkw: N = kw("extends")
final val FINALkw: N = kw("final")
final val FINALLYkw: N = kw("finally")
final val FLOATkw: N = kw("float")
final val FORkw: N = kw("for")
final val IFkw: N = kw("if")
final val GOTOkw: N = kw("goto")
final val IMPLEMENTSkw: N = kw("implements")
final val IMPORTkw: N = kw("import")
final val INSTANCEOFkw: N = kw("instanceof")
final val INTkw: N = kw("int")
final val INTERFACEkw: N = kw("interface")
final val LONGkw: N = kw("long")
final val NATIVEkw: N = kw("native")
final val NEWkw: N = kw("new")
final val PACKAGEkw: N = kw("package")
final val PRIVATEkw: N = kw("private")
final val PROTECTEDkw: N = kw("protected")
final val PUBLICkw: N = kw("public")
final val RETURNkw: N = kw("return")
final val SHORTkw: N = kw("short")
final val STATICkw: N = kw("static")
final val STRICTFPkw: N = kw("strictfp")
final val SUPERkw: N = kw("super")
final val SWITCHkw: N = kw("switch")
final val SYNCHRONIZEDkw: N = kw("synchronized")
final val THISkw: N = kw("this")
final val THROWkw: N = kw("throw")
final val THROWSkw: N = kw("throws")
final val TRANSIENTkw: N = kw("transient")
final val TRYkw: N = kw("try")
final val VOIDkw: N = kw("void")
final val VOLATILEkw: N = kw("volatile")
final val WHILEkw: N = kw("while")
final val BoxedBoolean: N = "java.lang.Boolean"
final val BoxedByte: N = "java.lang.Byte"
final val BoxedCharacter: N = "java.lang.Character"
final val BoxedDouble: N = "java.lang.Double"
final val BoxedFloat: N = "java.lang.Float"
final val BoxedInteger: N = "java.lang.Integer"
final val BoxedLong: N = "java.lang.Long"
final val BoxedNumber: N = "java.lang.Number"
final val BoxedShort: N = "java.lang.Short"
final val Class: N = "java.lang.Class"
final val IOOBException: N = "java.lang.IndexOutOfBoundsException"
final val InvTargetException: N = "java.lang.reflect.InvocationTargetException"
final val MethodAsObject: N = "java.lang.reflect.Method"
final val NPException: N = "java.lang.NullPointerException"
final val Object: N = "java.lang.Object"
final val String: N = "java.lang.String"
final val Throwable: N = "java.lang.Throwable"
final val ForName: N = "forName"
final val GetCause: N = "getCause"
final val GetClass: N = "getClass"
final val GetClassLoader: N = "getClassLoader"
final val GetComponentType: N = "getComponentType"
final val GetMethod: N = "getMethod"
final val Invoke: N = "invoke"
final val JavaLang: N = "java.lang"
final val BeanProperty: N = "scala.beans.BeanProperty"
final val BooleanBeanProperty: N = "scala.beans.BooleanBeanProperty"
final val JavaSerializable: N = "java.io.Serializable"
}
class TastyJavaTermNames extends TastyJavaNames[TermName] {
protected def fromString(s: String): TermName = termName(s)
}
class TastyJavaTypeNames extends TastyJavaNames[TypeName] {
protected def fromString(s: String): TypeName = typeName(s)
}
val nme: TastyScalaTermNames = new TastyScalaTermNames
val jtpnme: TastyJavaTypeNames = new TastyJavaTypeNames
val tpnme: TastyScalaTypeNames = new TastyScalaTypeNames
}
| VladimirNik/tasty | plugin/src/main/scala/scala/tasty/internal/dotc/core/StdNames.scala | Scala | bsd-3-clause | 33,781 |
package mesosphere.marathon.core.appinfo.impl
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.health.Health
import mesosphere.marathon.state.Timestamp
import org.apache.mesos.Protos.TaskState
/** Precalculated task infos for internal calculations. */
private[appinfo] class TaskForStatistics(
val version: Timestamp,
val running: Boolean,
val staging: Boolean,
val healthy: Boolean,
val unhealthy: Boolean,
val maybeLifeTime: Option[Double])
private[appinfo] object TaskForStatistics {
def forTasks(
now: Timestamp,
tasks: Iterable[Task],
statuses: Map[Task.Id, Seq[Health]]): Iterable[TaskForStatistics] = {
val nowTs: Long = now.toDateTime.getMillis
def taskForStatistics(task: Task): Option[TaskForStatistics] = {
task.launched.map { launched =>
val maybeTaskState = launched.status.mesosStatus.map(_.getState)
val healths = statuses.getOrElse(task.taskId, Seq.empty)
val maybeTaskLifeTime = launched.status.startedAt.map { startedAt =>
(nowTs - startedAt.toDateTime.getMillis) / 1000.0
}
new TaskForStatistics(
version = launched.runSpecVersion,
running = maybeTaskState.contains(TaskState.TASK_RUNNING),
// Tasks that are staged do not have the taskState set at all, currently.
// To make this a bit more robust, we also allow it to be set explicitly.
staging = maybeTaskState.isEmpty || maybeTaskState.contains(TaskState.TASK_STAGING),
healthy = healths.nonEmpty && healths.forall(_.alive),
unhealthy = healths.exists(!_.alive),
maybeLifeTime = maybeTaskLifeTime
)
}
}
tasks.iterator.flatMap(taskForStatistics).toVector
}
}
| timcharper/marathon | src/main/scala/mesosphere/marathon/core/appinfo/impl/TaskForStatistics.scala | Scala | apache-2.0 | 1,756 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.submit
import java.io.{File, StringWriter}
import java.nio.charset.MalformedInputException
import java.util.Properties
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.io.{Codec, Source}
import io.fabric8.kubernetes.api.model.{ConfigMap, ConfigMapBuilder, KeyToPath}
import org.apache.spark.SparkConf
import org.apache.spark.deploy.k8s.{Config, Constants, KubernetesUtils}
import org.apache.spark.deploy.k8s.Config.{KUBERNETES_DNSNAME_MAX_LENGTH, KUBERNETES_NAMESPACE}
import org.apache.spark.deploy.k8s.Constants.ENV_SPARK_CONF_DIR
import org.apache.spark.internal.Logging
private[spark] object KubernetesClientUtils extends Logging {
// Config map name can be 63 chars at max.
def configMapName(prefix: String): String = {
val suffix = "-conf-map"
s"${prefix.take(KUBERNETES_DNSNAME_MAX_LENGTH - suffix.length)}$suffix"
}
val configMapNameExecutor: String = configMapName(s"spark-exec-${KubernetesUtils.uniqueID()}")
val configMapNameDriver: String = configMapName(s"spark-drv-${KubernetesUtils.uniqueID()}")
private def buildStringFromPropertiesMap(configMapName: String,
propertiesMap: Map[String, String]): String = {
val properties = new Properties()
propertiesMap.foreach { case (k, v) =>
properties.setProperty(k, v)
}
val propertiesWriter = new StringWriter()
properties.store(propertiesWriter,
s"Java properties built from Kubernetes config map with name: $configMapName")
propertiesWriter.toString
}
/**
* Build, file -> 'file's content' map of all the selected files in SPARK_CONF_DIR.
*/
def buildSparkConfDirFilesMap(
configMapName: String,
sparkConf: SparkConf,
resolvedPropertiesMap: Map[String, String]): Map[String, String] = synchronized {
val loadedConfFilesMap = KubernetesClientUtils.loadSparkConfDirFiles(sparkConf)
// Add resolved spark conf to the loaded configuration files map.
if (resolvedPropertiesMap.nonEmpty) {
val resolvedProperties: String = KubernetesClientUtils
.buildStringFromPropertiesMap(configMapName, resolvedPropertiesMap)
loadedConfFilesMap ++ Map(Constants.SPARK_CONF_FILE_NAME -> resolvedProperties)
} else {
loadedConfFilesMap
}
}
def buildKeyToPathObjects(confFilesMap: Map[String, String]): Seq[KeyToPath] = {
confFilesMap.map {
case (fileName: String, _: String) =>
val filePermissionMode = 420 // 420 is decimal for octal literal 0644.
new KeyToPath(fileName, filePermissionMode, fileName)
}.toList.sortBy(x => x.getKey) // List is sorted to make mocking based tests work
}
/**
* Build a Config Map that will hold the content for environment variable SPARK_CONF_DIR
* on remote pods.
*/
def buildConfigMap(configMapName: String, confFileMap: Map[String, String],
withLabels: Map[String, String] = Map()): ConfigMap = {
val configMapNameSpace =
confFileMap.getOrElse(KUBERNETES_NAMESPACE.key, KUBERNETES_NAMESPACE.defaultValueString)
new ConfigMapBuilder()
.withNewMetadata()
.withName(configMapName)
.withNamespace(configMapNameSpace)
.withLabels(withLabels.asJava)
.endMetadata()
.withImmutable(true)
.addToData(confFileMap.asJava)
.build()
}
private def orderFilesBySize(confFiles: Seq[File]): Seq[File] = {
val fileToFileSizePairs = confFiles.map(f => (f, f.getName.length + f.length()))
// sort first by name and then by length, so that during tests we have consistent results.
fileToFileSizePairs.sortBy(f => f._1).sortBy(f => f._2).map(_._1)
}
// exposed for testing
private[submit] def loadSparkConfDirFiles(conf: SparkConf): Map[String, String] = {
val confDir = Option(conf.getenv(ENV_SPARK_CONF_DIR)).orElse(
conf.getOption("spark.home").map(dir => s"$dir/conf"))
val maxSize = conf.get(Config.CONFIG_MAP_MAXSIZE)
if (confDir.isDefined) {
val confFiles: Seq[File] = listConfFiles(confDir.get, maxSize)
val orderedConfFiles = orderFilesBySize(confFiles)
var truncatedMapSize: Long = 0
val truncatedMap = mutable.HashMap[String, String]()
val skippedFiles = mutable.HashSet[String]()
var source: Source = Source.fromString("") // init with empty source.
for (file <- orderedConfFiles) {
try {
source = Source.fromFile(file)(Codec.UTF8)
val (fileName, fileContent) = file.getName -> source.mkString
if ((truncatedMapSize + fileName.length + fileContent.length) < maxSize) {
truncatedMap.put(fileName, fileContent)
truncatedMapSize = truncatedMapSize + (fileName.length + fileContent.length)
} else {
skippedFiles.add(fileName)
}
} catch {
case e: MalformedInputException =>
logWarning(
s"Unable to read a non UTF-8 encoded file ${file.getAbsolutePath}. Skipping...", e)
None
} finally {
source.close()
}
}
if (truncatedMap.nonEmpty) {
logInfo(s"Spark configuration files loaded from $confDir :" +
s" ${truncatedMap.keys.mkString(",")}")
}
if (skippedFiles.nonEmpty) {
logWarning(s"Skipped conf file(s) ${skippedFiles.mkString(",")}, due to size constraint." +
s" Please see, config: `${Config.CONFIG_MAP_MAXSIZE.key}` for more details.")
}
truncatedMap.toMap
} else {
Map.empty[String, String]
}
}
private def listConfFiles(confDir: String, maxSize: Long): Seq[File] = {
// At the moment configmaps do not support storing binary content (i.e. skip jar,tar,gzip,zip),
// and configMaps do not allow for size greater than 1.5 MiB(configurable).
// https://etcd.io/docs/v3.4.0/dev-guide/limit/
def testIfTooLargeOrBinary(f: File): Boolean = (f.length() + f.getName.length > maxSize) ||
f.getName.matches(".*\\\\.(gz|zip|jar|tar)")
// We exclude all the template files and user provided spark conf or properties,
// Spark properties are resolved in a different step.
def testIfSparkConfOrTemplates(f: File) = f.getName.matches(".*\\\\.template") ||
f.getName.matches("spark.*(conf|properties)")
val fileFilter = (f: File) => {
f.isFile && !testIfTooLargeOrBinary(f) && !testIfSparkConfOrTemplates(f)
}
val confFiles: Seq[File] = {
val dir = new File(confDir)
if (dir.isDirectory) {
dir.listFiles.filter(x => fileFilter(x)).toSeq
} else {
Nil
}
}
confFiles
}
}
| ueshin/apache-spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientUtils.scala | Scala | apache-2.0 | 7,422 |
/*
* This file is part of EasyForger which is released under GPLv3 License.
* See file LICENSE.txt or go to http://www.gnu.org/licenses/gpl-3.0.en.html for full license details.
*/
package com.easyforger.recipes
import net.minecraftforge.fml.common.registry.GameRegistry
object Crafting {
def crafting(craftingRecipes: CraftingRecipe*): Unit =
for (recipe <- craftingRecipes) {
val result = recipe.result
if (recipe.shape.isDefined)
GameRegistry.addRecipe(result.result, RecipeOps.calcMCParamsArray(recipe): _*)
else
GameRegistry.addShapelessRecipe(result.result, recipe.sources.map(_.itemStack).toArray: _*)
}
}
| easyforger/easyforger | src/main/scala/com/easyforger/recipes/Crafting.scala | Scala | gpl-3.0 | 663 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.disk.stubs
import com.treode.async.Async
import com.treode.disk.{DiskLaunch, DiskRecovery}
trait StubDiskRecovery extends DiskRecovery {
def reattach (disk: StubDiskDrive): Async [DiskLaunch]
}
| Treode/store | disk/stub/com/treode/disk/stubs/StubDiskRecovery.scala | Scala | apache-2.0 | 815 |
/*
* Copyright (C) FuseSource, Inc.
* http://fusesource.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.fabric.api.monitor
/**
* Creates a Poller for a given DataSourceDTO
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait PollerFactory {
def jaxb_package: String
def accepts(source: DataSourceDTO): Boolean
def create(source: DataSourceDTO): Poller
}
/**
* Capable of polling for a value
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait Poller {
def close: Unit
def source: DataSourceDTO
def poll: Double
} | Jitendrakry/fuse | fabric/fabric-core/src/main/scala/org/fusesource/fabric/api/monitor/PollerFactory.scala | Scala | apache-2.0 | 1,123 |
package controllers.backend
import com.google.inject.ImplementedBy
import javax.inject.Inject
import scala.collection.immutable
import scala.concurrent.Future
import com.overviewdocs.database.Database
import com.overviewdocs.metadata.MetadataSchema
import com.overviewdocs.models.{ApiToken,DocumentSet,DocumentSetUser,View}
import com.overviewdocs.models.tables.{ApiTokens,DocumentSetUsers,DocumentSets,Plugins,Views}
import models.pagination.{Page,PageRequest}
import models.tables.Users
@ImplementedBy(classOf[DbDocumentSetBackend])
trait DocumentSetBackend {
/** Creates a DocumentSet, a DocumentSetUser, and one View per autocreate Plugin.
*
* Throws an error if something unfathomable happens.
*/
def create(attributes: DocumentSet.CreateAttributes, userEmail: String): Future[DocumentSet]
/** Sets whether the DocumentSet is public.
*
* Ignores a missing DocumentSet.
*/
def updatePublic(documentSetId: Long, public: Boolean): Future[Unit]
/** Sets whether the DocumentSet is deleted.
*
* Ignores a missing DocumentSet.
*/
def updateDeleted(documentSetId: Long, deleted: Boolean): Future[Unit]
/** Sets a new MetadataSchema on a DocumentSet.
*
* Ignores a missing DocumentSet.
*/
def updateMetadataSchema(documentSetId: Long, schema: MetadataSchema): Future[Unit]
/** Finds a Page of DocumentSets for a User.
*
* The DocumentSets will be sorted by createdAt, newest to oldest.
*
* DocumentSets for which the User is a Viewer will not be returned.
*/
def indexPageByOwner(email: String, pageRequest: PageRequest): Future[Page[DocumentSet]]
/** Finds all DocumentSets viewed by a User, with their owners.
*
* The DocumentSets will be sorted by createdAt, newest to oldest.
*
* DocumentSets for which the User is an Owner will not be returned.
*/
def indexByViewerEmail(email: String): Future[immutable.Seq[(DocumentSet,String)]]
/** Finds all public DocumentSets, with their owners.
*
* The DocumentSets will be sorted by createdAt, newest to oldest.
*/
def indexPublic: Future[immutable.Seq[(DocumentSet,String)]]
/** Returns a single DocumentSet. */
def show(documentSetId: Long): Future[Option[DocumentSet]]
/** Returns the number of document sets owned (not viewed) by the given user. */
def countByOwnerEmail(userEmail: String): Future[Int]
}
class DbDocumentSetBackend @Inject() (
val database: Database
) extends DocumentSetBackend with DbBackend {
import database.api._
import database.executionContext
private val Owner = DocumentSetUser.Role(true)
override def create(attributes: DocumentSet.CreateAttributes, userEmail: String) = {
database.seq(autocreatePlugins).flatMap { plugins =>
val queries = for {
documentSet <- documentSetInserter.+=(attributes)
_ <- documentSetUserInserter.+=(DocumentSetUser(documentSet.id, userEmail, Owner))
apiTokens <- apiTokenInserter.++=(plugins.map { plugin =>
ApiToken.generate(userEmail, Some(documentSet.id), "[plugin-autocreate] " + plugin.name)
})
_ <- viewInserter.++=(plugins.zip(apiTokens).map { case (plugin, apiToken) =>
(documentSet.id, View.CreateAttributes(plugin.url, plugin.serverUrlFromPlugin, apiToken.token, plugin.name))
})
} yield documentSet
database.run(queries.transactionally)
}
}
override def show(documentSetId: Long) = database.option(byIdCompiled(documentSetId))
override def indexPageByOwner(email: String, pageRequest: PageRequest) = {
page(
pageByOwnerCompiled(email, pageRequest.offset, pageRequest.limit),
countByOwnerCompiled(email),
pageRequest
)
}
override def indexByViewerEmail(email: String) = {
database.seq(indexByViewerEmailCompiled(email))
}
override def indexPublic = {
database.seq(indexPublicCompiled)
}
override def updatePublic(documentSetId: Long, public: Boolean) = {
database.runUnit(updatePublicCompiled(documentSetId).update(public))
}
override def updateDeleted(documentSetId: Long, deleted: Boolean) = {
database.runUnit(updateDeletedCompiled(documentSetId).update(deleted))
}
override def updateMetadataSchema(documentSetId: Long, metadataSchema: MetadataSchema) = {
database.runUnit(updateMetadataSchemaCompiled(documentSetId).update(metadataSchema))
}
override def countByOwnerEmail(userEmail: String) = {
database.run(countByOwnerCompiled(userEmail).result)
}
protected lazy val apiTokenInserter = (ApiTokens returning ApiTokens)
protected lazy val documentSetInserter = (DocumentSets.map(_.createAttributes) returning DocumentSets)
protected lazy val documentSetUserInserter = DocumentSetUsers
protected lazy val viewInserter = (Views.map((v) => (v.documentSetId, v.createAttributes)) returning Views)
private lazy val autocreatePlugins = Compiled {
Plugins
.filter(_.autocreate === true)
.sortBy((p) => (p.autocreateOrder, p.id))
}
private lazy val byIdCompiled = Compiled { (documentSetId: Rep[Long]) =>
DocumentSets.filter(_.id === documentSetId)
}
private lazy val indexByViewerEmailCompiled = Compiled { (email: Rep[String]) =>
val ids = DocumentSetUsers
.filter(dsu => dsu.userEmail === email && dsu.role === DocumentSetUser.Role(false))
.map(_.documentSetId)
DocumentSets
.filter(_.id in ids)
.join(DocumentSetUsers).on((ds, dsu) => ds.id === dsu.documentSetId && dsu.role === DocumentSetUser.Role(true))
.map(t => t._1 -> t._2.userEmail)
.sortBy(_._1.createdAt.desc)
}
private lazy val indexPublicCompiled = {
DocumentSets
.filter(_.isPublic)
.join(DocumentSetUsers).on((ds, dsu) => ds.id === dsu.documentSetId && dsu.role === DocumentSetUser.Role(true))
.join(Users).on((dsAndDsu, users) => dsAndDsu._2.userEmail === users.email)
.map(t => t._1._1 -> t._1._2.userEmail)
.sortBy(_._1.createdAt.desc)
}
private lazy val updatePublicCompiled = Compiled { (documentSetId: Rep[Long]) =>
DocumentSets.filter(_.id === documentSetId).map(_.isPublic)
}
private lazy val updateDeletedCompiled = Compiled { (documentSetId: Rep[Long]) =>
DocumentSets.filter(_.id === documentSetId).map(_.deleted)
}
private lazy val updateMetadataSchemaCompiled = Compiled { (documentSetId: Rep[Long]) =>
DocumentSets.filter(_.id === documentSetId).map(_.metadataSchema)
}
private def documentSetIdsByOwner(email: Rep[String]) = {
DocumentSetUsers
.filter(_.userEmail === email)
.filter(_.role === Owner)
.map(_.documentSetId)
}
private lazy val pageByOwnerCompiled = Compiled { (email: Rep[String], offset: ConstColumn[Long], limit: ConstColumn[Long]) =>
DocumentSets
.filter(!_.deleted)
.filter(_.id in documentSetIdsByOwner(email))
.sortBy(_.createdAt.desc)
.drop(offset).take(limit)
}
private lazy val countByOwnerCompiled = Compiled { email: Rep[String] =>
DocumentSets
.filter(!_.deleted)
.filter(_.id in documentSetIdsByOwner(email))
.length
}
}
| overview/overview-server | web/app/controllers/backend/DocumentSetBackend.scala | Scala | agpl-3.0 | 7,133 |
package uk.co.turingatemyhamster
package owl2
package ast
/**
*
*
* @author Matthew Pocock
*/
trait PropositionalConnectivesAndEnumerationOfIndividualsModuleAst extends owl2.PropositionalConnectivesAndEnumerationOfIndividualsModule {
importedModules : owl2.IriModule with owl2.EntitiesLiteralsAnonymousIndividualsModule {
type ClassExpression = ast.ClassExpression
} =>
override final type ObjectUnionOf = ast.ObjectUnionOf
override final type ObjectComplementOf = ast.ObjectComplementOf
override final type ObjectOneOf = ast.ObjectOneOf
override final type ObjectIntersectionOf = ast.ObjectIntersectionOf
}
case class ObjectUnionOf(classExpressions: List[ClassExpression]) extends ClassExpression
case class ObjectComplementOf(classExpression: ClassExpression) extends ClassExpression
case class ObjectOneOf(individuals: List[Individual]) extends ClassExpression
case class ObjectIntersectionOf(classExpressions: List[ClassExpression]) extends ClassExpression | drdozer/owl2 | core/src/main/scala/uk/co/turingatemyhamster/owl2/ast/PropositionalConnectivesAndEnumerationOfIndividualsModuleAst.scala | Scala | apache-2.0 | 995 |
package go3d
import collection.mutable
def newGame(size: Int): Game = Game(size, newGoban(size), Array(), Map[Int, Array[Move]]())
class Game(val size: Int, val goban: Goban, val moves: Array[Move | Pass],
val captures: Map[Int, Array[Move]]) extends GoGame:
def captures(color: Color): Int = captures.values.filter(_(0).color == color).flatten.size
def lastCapture: Array[Move] = if captures.isEmpty then Array() else captures.last._2
override def equals(obj: Any): Boolean =
obj match
case g: Game => goban == g.goban && toString == g.toString
case _ => false
def at(pos: Position): Color = goban.at(pos)
def at(x: Int, y: Int, z: Int): Color = at(Position(x, y, z))
def isOver: Boolean =
moves.length >= size * size * size || (
moves.length >= 2 && moves.last.isInstanceOf[Pass] && moves.init.last.isInstanceOf[Pass]
)
def isTurn(color: Color): Boolean =
if moves.isEmpty then color == Black else color != moves.last.color
def makeMove(move: Move | Pass): Game =
move match
case p: Pass =>
return Game(size, goban, moves.appended(move), captures)
case m: Move =>
checkValid(m)
val newboard = setStone(m)
return Game(size, newboard.goban, moves.appended(move), newboard.captures)
def checkValid(move: Move): Unit =
if !isDifferentPlayer(move.color) then throw WrongTurn(move)
goban.checkValid(move)
if isKo(move) then throw Ko(move)
override def toString: String =
var out = "\\n"
for y <- 0 to size + 1 do
for z <- 1 to size do
for x <- 0 to size + 1 do
out += goban.at(x, y, z)
if z < size then out += "|"
else if y == 1 then out += " "+Black.toString*captures(Black)
else if y == 3 then out += " "+White.toString*captures(White)
out += "\\n"
for (move, caps) <- captures.toSeq.sortBy(x => x._1) do out += s"$move: ${caps.toList}\\n"
out += score.toString
out
def setStone(move: Move): Game = doCaptures(move, goban.setStone(move))
def hasLiberties(move: Move): Boolean = goban.hasLiberties(move)
def connectedStones(move: Move): Set[Move] = goban.connectedStones(move)
def possibleMoves(color: Color): List[Position] =
if !isDifferentPlayer(color) then return List()
if moves.size >= size*size*size then return List()
if moves.isEmpty && color == White then return List()
if moves.nonEmpty && color == moves.last.color then return List()
return goban.emptyPositions.toList.filter(isPossibleMove(_, color))
def score: Map[Color, Int] =
var scores = mutable.Map[Color, Int]().withDefaultValue(0)
for color <- List(Black, White) do
for pos <- goban.allPositions if at(pos) == color do scores(color) = scores(color) + 1
scores(color) = scores(color) - captures(color)
val emptyAreas = addToConnectedAreas(goban.emptyPositions, Set())
for area <- emptyAreas do
boundaryColor(area) match
case Some(color) => scores(color) = scores(color) + area.size
case None =>
return scores.toMap
private def boundaryColor(area: Set[Move]): Option[Color] =
var boundaryColors = mutable.Set[Color]()
for stone <- area do
for neighbor <- goban.neighbors(stone.position) if at(neighbor) != stone.color do
boundaryColors.add(at(neighbor))
if boundaryColors.size == 1 then return Some(boundaryColors.head)
return None
private def addToConnectedAreas(emptyPositions: Seq[Position], areas: Set[Set[Move]]): Set[Set[Move]] =
if emptyPositions.isEmpty then return areas
val connected = connectedStones(Move(emptyPositions.last, Empty))
return addToConnectedAreas(emptyPositions.dropRight(1), areas + connected)
private def isPossibleMove(emptyPos: Position, color: Color): Boolean =
try
if !goban.hasEmptyNeighbor(emptyPos) then checkValid(Move(emptyPos, color))
catch case e: IllegalMove => return false
return true
private def gameOver(pass: Pass): Boolean = moves.nonEmpty && moves.last.isInstanceOf[Pass]
private def isDifferentPlayer(color: Color): Boolean = moves.isEmpty || moves.last.color != color
private def isKo(move: Move): Boolean =
captures.nonEmpty && lastCapture.length == 1 && lastCapture(0) == move
private def doCaptures(move: Move, board: Goban): Game =
val newBoard = captureNeighbors(board, board.neighbors(move.position), move.color)
val captured = (board-newBoard).toArray
val newCaptures = if captured.nonEmpty then captures + (moves.length -> captured) else captures
return Game(size, newBoard, moves, newCaptures)
private def captureNeighbors(board: Goban, neighbors: Seq[Position], color: Color): Goban =
if neighbors.isEmpty then return board
val newBoard = board.checkAndClear(Move(neighbors.last, color))
return captureNeighbors(newBoard, neighbors.dropRight(1), color)
| lene/go-3 | src/main/scala/Game.scala | Scala | gpl-2.0 | 4,892 |
import scala.language.implicitConversions
trait Fixture[A] extends Conversion[0, A]
trait TestFramework[A]:
def (testName: String).in(test: Fixture[A] ?=> Unit): Unit = ???
trait Greeter:
def greet(name: String): String = s"Hello $name"
case class MyFixture(name: String, greeter: Greeter)
object Test1:
given conv as Conversion[0, Greeter]:
def apply(x: 0): Greeter = ???
val g: Greeter = 0
class MyTest extends TestFramework[MyFixture]:
"say hello" in {
assert(0.greeter.greet(0.name) == s"Hello ${0.name}")
}
| som-snytt/dotty | tests/pos/i7413.scala | Scala | apache-2.0 | 539 |
package com.lucaongaro.similaria.lmdb
import scala.language.implicitConversions
import java.nio.ByteBuffer
case class Count( value: Int ) {
def +( increment: Int ) = {
Count( value + increment )
}
}
object Count {
def unapply( bytes: Array[Byte] ) = {
if ( bytes == null )
None
else {
val value = ByteBuffer.wrap( bytes ).getInt
Some( value )
}
}
implicit def countToBytes( count: Count ) = {
val bb = ByteBuffer.allocate(4)
bb.putInt( count.value ).array()
}
}
| lucaong/similaria | src/main/scala/com/lucaongaro/similaria/lmdb/Count.scala | Scala | mit | 520 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.{TypeCheckResult, TypeCoercion}
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.types._
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr1, expr2, expr3) - If `expr1` evaluates to true, then returns `expr2`; otherwise returns `expr3`.",
examples = """
Examples:
> SELECT _FUNC_(1 < 2, 'a', 'b');
a
""",
since = "1.0.0")
// scalastyle:on line.size.limit
case class If(predicate: Expression, trueValue: Expression, falseValue: Expression)
extends ComplexTypeMergingExpression {
@transient
override lazy val inputTypesForMerging: Seq[DataType] = {
Seq(trueValue.dataType, falseValue.dataType)
}
override def children: Seq[Expression] = predicate :: trueValue :: falseValue :: Nil
override def nullable: Boolean = trueValue.nullable || falseValue.nullable
override def checkInputDataTypes(): TypeCheckResult = {
if (predicate.dataType != BooleanType) {
TypeCheckResult.TypeCheckFailure(
"type of predicate expression in If should be boolean, " +
s"not ${predicate.dataType.catalogString}")
} else if (!TypeCoercion.haveSameType(inputTypesForMerging)) {
TypeCheckResult.TypeCheckFailure(s"differing types in '$sql' " +
s"(${trueValue.dataType.catalogString} and ${falseValue.dataType.catalogString}).")
} else {
TypeCheckResult.TypeCheckSuccess
}
}
override def eval(input: InternalRow): Any = {
if (java.lang.Boolean.TRUE.equals(predicate.eval(input))) {
trueValue.eval(input)
} else {
falseValue.eval(input)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val condEval = predicate.genCode(ctx)
val trueEval = trueValue.genCode(ctx)
val falseEval = falseValue.genCode(ctx)
val code =
code"""
|${condEval.code}
|boolean ${ev.isNull} = false;
|${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
|if (!${condEval.isNull} && ${condEval.value}) {
| ${trueEval.code}
| ${ev.isNull} = ${trueEval.isNull};
| ${ev.value} = ${trueEval.value};
|} else {
| ${falseEval.code}
| ${ev.isNull} = ${falseEval.isNull};
| ${ev.value} = ${falseEval.value};
|}
""".stripMargin
ev.copy(code = code)
}
override def toString: String = s"if ($predicate) $trueValue else $falseValue"
override def sql: String = s"(IF(${predicate.sql}, ${trueValue.sql}, ${falseValue.sql}))"
}
/**
* Case statements of the form "CASE WHEN a THEN b [WHEN c THEN d]* [ELSE e] END".
* When a = true, returns b; when c = true, returns d; else returns e.
*
* @param branches seq of (branch condition, branch value)
* @param elseValue optional value for the else branch
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "CASE WHEN expr1 THEN expr2 [WHEN expr3 THEN expr4]* [ELSE expr5] END - When `expr1` = true, returns `expr2`; else when `expr3` = true, returns `expr4`; else returns `expr5`.",
arguments = """
Arguments:
* expr1, expr3 - the branch condition expressions should all be boolean type.
* expr2, expr4, expr5 - the branch value expressions and else value expression should all be
same type or coercible to a common type.
""",
examples = """
Examples:
> SELECT CASE WHEN 1 > 0 THEN 1 WHEN 2 > 0 THEN 2.0 ELSE 1.2 END;
1.0
> SELECT CASE WHEN 1 < 0 THEN 1 WHEN 2 > 0 THEN 2.0 ELSE 1.2 END;
2.0
> SELECT CASE WHEN 1 < 0 THEN 1 WHEN 2 < 0 THEN 2.0 END;
NULL
""",
since = "1.0.1")
// scalastyle:on line.size.limit
case class CaseWhen(
branches: Seq[(Expression, Expression)],
elseValue: Option[Expression] = None)
extends ComplexTypeMergingExpression with Serializable {
override def children: Seq[Expression] = branches.flatMap(b => b._1 :: b._2 :: Nil) ++ elseValue
// both then and else expressions should be considered.
@transient
override lazy val inputTypesForMerging: Seq[DataType] = {
branches.map(_._2.dataType) ++ elseValue.map(_.dataType)
}
override def nullable: Boolean = {
// Result is nullable if any of the branch is nullable, or if the else value is nullable
branches.exists(_._2.nullable) || elseValue.map(_.nullable).getOrElse(true)
}
override def checkInputDataTypes(): TypeCheckResult = {
if (TypeCoercion.haveSameType(inputTypesForMerging)) {
// Make sure all branch conditions are boolean types.
if (branches.forall(_._1.dataType == BooleanType)) {
TypeCheckResult.TypeCheckSuccess
} else {
val index = branches.indexWhere(_._1.dataType != BooleanType)
TypeCheckResult.TypeCheckFailure(
s"WHEN expressions in CaseWhen should all be boolean type, " +
s"but the ${index + 1}th when expression's type is ${branches(index)._1}")
}
} else {
val branchesStr = branches.map(_._2.dataType).map(dt => s"WHEN ... THEN ${dt.catalogString}")
.mkString(" ")
val elseStr = elseValue.map(expr => s" ELSE ${expr.dataType.catalogString}").getOrElse("")
TypeCheckResult.TypeCheckFailure(
"THEN and ELSE expressions should all be same type or coercible to a common type," +
s" got CASE $branchesStr$elseStr END")
}
}
override def eval(input: InternalRow): Any = {
var i = 0
val size = branches.size
while (i < size) {
if (java.lang.Boolean.TRUE.equals(branches(i)._1.eval(input))) {
return branches(i)._2.eval(input)
}
i += 1
}
if (elseValue.isDefined) {
return elseValue.get.eval(input)
} else {
return null
}
}
override def toString: String = {
val cases = branches.map { case (c, v) => s" WHEN $c THEN $v" }.mkString
val elseCase = elseValue.map(" ELSE " + _).getOrElse("")
"CASE" + cases + elseCase + " END"
}
override def sql: String = {
val cases = branches.map { case (c, v) => s" WHEN ${c.sql} THEN ${v.sql}" }.mkString
val elseCase = elseValue.map(" ELSE " + _.sql).getOrElse("")
"CASE" + cases + elseCase + " END"
}
private def multiBranchesCodegen(ctx: CodegenContext, ev: ExprCode): ExprCode = {
// This variable holds the state of the result:
// -1 means the condition is not met yet and the result is unknown.
val NOT_MATCHED = -1
// 0 means the condition is met and result is not null.
val HAS_NONNULL = 0
// 1 means the condition is met and result is null.
val HAS_NULL = 1
// It is initialized to `NOT_MATCHED`, and if it's set to `HAS_NULL` or `HAS_NONNULL`,
// We won't go on anymore on the computation.
val resultState = ctx.freshName("caseWhenResultState")
ev.value = JavaCode.global(
ctx.addMutableState(CodeGenerator.javaType(dataType), ev.value),
dataType)
// these blocks are meant to be inside a
// do {
// ...
// } while (false);
// loop
val cases = branches.map { case (condExpr, valueExpr) =>
val cond = condExpr.genCode(ctx)
val res = valueExpr.genCode(ctx)
s"""
|${cond.code}
|if (!${cond.isNull} && ${cond.value}) {
| ${res.code}
| $resultState = (byte)(${res.isNull} ? $HAS_NULL : $HAS_NONNULL);
| ${ev.value} = ${res.value};
| continue;
|}
""".stripMargin
}
val elseCode = elseValue.map { elseExpr =>
val res = elseExpr.genCode(ctx)
s"""
|${res.code}
|$resultState = (byte)(${res.isNull} ? $HAS_NULL : $HAS_NONNULL);
|${ev.value} = ${res.value};
""".stripMargin
}
val allConditions = cases ++ elseCode
// This generates code like:
// caseWhenResultState = caseWhen_1(i);
// if(caseWhenResultState != -1) {
// continue;
// }
// caseWhenResultState = caseWhen_2(i);
// if(caseWhenResultState != -1) {
// continue;
// }
// ...
// and the declared methods are:
// private byte caseWhen_1234() {
// byte caseWhenResultState = -1;
// do {
// // here the evaluation of the conditions
// } while (false);
// return caseWhenResultState;
// }
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = allConditions,
funcName = "caseWhen",
returnType = CodeGenerator.JAVA_BYTE,
makeSplitFunction = func =>
s"""
|${CodeGenerator.JAVA_BYTE} $resultState = $NOT_MATCHED;
|do {
| $func
|} while (false);
|return $resultState;
""".stripMargin,
foldFunctions = _.map { funcCall =>
s"""
|$resultState = $funcCall;
|if ($resultState != $NOT_MATCHED) {
| continue;
|}
""".stripMargin
}.mkString)
ev.copy(code =
code"""
|${CodeGenerator.JAVA_BYTE} $resultState = $NOT_MATCHED;
|do {
| $codes
|} while (false);
|// TRUE if any condition is met and the result is null, or no any condition is met.
|final boolean ${ev.isNull} = ($resultState != $HAS_NONNULL);
""".stripMargin)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (branches.length == 1) {
// If we have only single branch we can use If expression and its codeGen
If(
branches(0)._1,
branches(0)._2,
elseValue.getOrElse(Literal.create(null, branches(0)._2.dataType))).doGenCode(ctx, ev)
} else {
multiBranchesCodegen(ctx, ev)
}
}
}
/** Factory methods for CaseWhen. */
object CaseWhen {
def apply(branches: Seq[(Expression, Expression)], elseValue: Expression): CaseWhen = {
CaseWhen(branches, Option(elseValue))
}
/**
* A factory method to facilitate the creation of this expression when used in parsers.
*
* @param branches Expressions at even position are the branch conditions, and expressions at odd
* position are branch values.
*/
def createFromParser(branches: Seq[Expression]): CaseWhen = {
val cases = branches.grouped(2).flatMap {
case cond :: value :: Nil => Some((cond, value))
case value :: Nil => None
}.toArray.toSeq // force materialization to make the seq serializable
val elseValue = if (branches.size % 2 != 0) Some(branches.last) else None
CaseWhen(cases, elseValue)
}
}
/**
* Case statements of the form "CASE a WHEN b THEN c [WHEN d THEN e]* [ELSE f] END".
* When a = b, returns c; when a = d, returns e; else returns f.
*/
object CaseKeyWhen {
def apply(key: Expression, branches: Seq[Expression]): CaseWhen = {
val cases = branches.grouped(2).flatMap {
case Seq(cond, value) => Some((EqualTo(key, cond), value))
case Seq(value) => None
}.toArray.toSeq // force materialization to make the seq serializable
val elseValue = if (branches.size % 2 != 0) Some(branches.last) else None
CaseWhen(cases, elseValue)
}
}
| shuangshuangwang/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/conditionalExpressions.scala | Scala | apache-2.0 | 12,114 |
package com.twitter.finagle.stats
import com.twitter.common.metrics.{Histogram, Metrics}
import com.twitter.finagle.benchmark.StdBenchAnnotations
import com.twitter.ostrich.stats.StatsSummary
import com.twitter.util.events.Sink
import java.util
import org.openjdk.jmh.annotations._
import scala.util.Random
// ./sbt 'project finagle-benchmark' 'run .*StatsReceiverBenchmark.*'
@Threads(3)
class StatsReceiverBenchmark extends StdBenchAnnotations {
import StatsReceiverBenchmark._
private[this] def newStat(statRecv: StatsReceiver): Stat =
statRecv.stat("stats_receiver_histogram")
private[this] def add(addState: AddState, stat: Stat): Unit = {
val i = addState.i
addState.i += 1
stat.add(i)
}
// ----- Ostrich ------
@Benchmark
def newStatOstrich(state: StatsReceiverState): Stat =
newStat(state.ostrichStatsReceiver)
@Benchmark
def addOstrich(addState: AddState, state: StatState): Unit =
add(addState, state.ostrichStat)
@Benchmark
def incrOstrich(state: CounterState): Unit =
state.ostrichCounter.incr()
@Benchmark
def queryOstrich(state: QueryState): StatsSummary =
state.ostrichGet()
// ----- Commons Metrics ------
@Benchmark
def newStatMetricsCommons(state: StatsReceiverState): Stat =
newStat(state.metricsStatsReceiver)
@Benchmark
def addMetricsCommons(addState: AddState, state: StatState): Unit =
add(addState, state.metricsStat)
@Benchmark
def incrMetricsCommons(state: CounterState): Unit =
state.metricsCounter.incr()
@Benchmark
def queryMetricsCommons(state: QueryState): util.Map[String, Number] =
state.metricsGet()
// ----- Commons Metrics (Bucketed) ------
@Benchmark
def newStatMetricsBucketed(state: StatsReceiverState): Stat =
newStat(state.metricsBucketedStatsReceiver)
@Benchmark
def addMetricsBucketed(addState: AddState, state: StatState): Unit =
add(addState, state.metricsBucketedStat)
@Benchmark
def queryMetricsBucketed(state: QueryState): util.Map[String, Number] =
state.metricsBucketedGet()
// ----- Commons Stats ------
@Benchmark
def addStatsCommons(addState: AddState, state: StatState): Unit =
add(addState, state.statsStat)
@Benchmark
def incrStatsCommons(state: CounterState): Unit =
state.statsCounter.incr()
}
object StatsReceiverBenchmark {
private[this] val ostrich = new OstrichStatsReceiver
private[this] val metrics = new MetricsStatsReceiver(
Metrics.createDetached(),
Sink.default,
(n: String) => new Histogram(n))
private[this] val metricsBucketed = new MetricsStatsReceiver(
Metrics.createDetached(),
Sink.default,
(n: String) => new MetricsBucketedHistogram(n))
private[this] val stats = new CommonsStatsReceiver
@State(Scope.Benchmark)
class StatsReceiverState {
val ostrichStatsReceiver: StatsReceiver = ostrich
val metricsStatsReceiver: StatsReceiver = metrics
val metricsBucketedStatsReceiver: StatsReceiver = metricsBucketed
}
@State(Scope.Benchmark)
class StatState {
val ostrichStat: Stat = ostrich.stat("histo")
val metricsStat: Stat = metrics.stat("histo")
val metricsBucketedStat: Stat = metricsBucketed.stat("histo")
val statsStat: Stat = stats.stat("histo")
}
@State(Scope.Benchmark)
class CounterState {
val ostrichCounter: Counter = ostrich.counter("cnt")
val metricsCounter: Counter = metrics.counter("cnt")
val statsCounter: Counter = stats.counter("cnt")
}
@State(Scope.Thread)
class AddState {
var i = 0
}
@State(Scope.Benchmark)
class QueryState {
var statName: String = ""
val rng = new Random(31415926535897932L)
def ostrichGet(): StatsSummary = ostrich.repr.get()
def metricsGet(): util.Map[String, Number] = metrics.registry.sample()
def metricsBucketedGet(): util.Map[String, Number] = metricsBucketed.registry.sample()
@Setup(Level.Trial)
def setup(): Unit = {
val oStat = ostrich.stat("my_stat")
val mStat = metrics.stat("my_stat")
val mbStat = metricsBucketed.stat("my_stat")
(1 to 100000).foreach { x =>
val rand = rng.nextInt(x)
oStat.add(rand)
mStat.add(rand)
mbStat.add(rand)
}
}
@TearDown(Level.Trial)
def teardown(): Unit = {
ostrich.repr.clearAll()
}
}
}
| kingtang/finagle | finagle-benchmark/src/main/scala/com/twitter/finagle/stats/StatsReceiverBenchmark.scala | Scala | apache-2.0 | 4,328 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.controller.test
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Route
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import spray.json.DefaultJsonProtocol._
import spray.json._
import org.apache.openwhisk.core.controller.WhiskPackagesApi
import org.apache.openwhisk.core.entitlement.Collection
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.http.{ErrorResponse, Messages}
import scala.language.postfixOps
/**
* Tests Packages API.
*
* Unit tests of the controller service as a standalone component.
* These tests exercise a fresh instance of the service object in memory -- these
* tests do NOT communication with a whisk deployment.
*
* @Idioglossia
* "using Specification DSL to write unit tests, as in should, must, not, be"
* "using Specs2RouteTest DSL to chain HTTP requests for unit testing, as in ~>"
*/
@RunWith(classOf[JUnitRunner])
class PackagesApiTests extends ControllerTestCommon with WhiskPackagesApi {
/** Packages API tests */
behavior of "Packages API"
val creds = WhiskAuthHelpers.newIdentity()
val namespace = EntityPath(creds.subject.asString)
val collectionPath = s"/${EntityPath.DEFAULT}/${collection.path}"
def aname() = MakeName.next("packages_tests")
val parametersLimit = Parameters.sizeLimit
private def bindingAnnotation(binding: Binding) = {
Parameters(WhiskPackage.bindingFieldName, Binding.serdes.write(binding))
}
def checkCount(path: String = collectionPath, expected: Long, user: Identity = creds) = {
implicit val tid = transid()
withClue(s"count did not match") {
org.apache.openwhisk.utils.retry {
Get(s"$path?count=true") ~> Route.seal(routes(user)) ~> check {
status should be(OK)
responseAs[JsObject].fields(collection.path).convertTo[Long] shouldBe (expected)
}
}
}
}
//// GET /packages
it should "list all packages/references" in {
implicit val tid = transid()
// create packages and package bindings, and confirm API lists all of them
val providers = (1 to 4).map { i =>
if (i % 2 == 0) {
WhiskPackage(namespace, aname(), None)
} else {
val binding = Some(Binding(namespace.root, aname()))
WhiskPackage(namespace, aname(), binding)
}
}.toList
providers foreach { put(entityStore, _) }
waitOnView(entityStore, WhiskPackage, namespace, providers.length)
checkCount(expected = providers.length)
org.apache.openwhisk.utils.retry {
Get(s"$collectionPath") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
providers.length should be(response.length)
response should contain theSameElementsAs providers.map(_.summaryAsJson)
}
}
{
val path = s"/$namespace/${collection.path}"
val auser = WhiskAuthHelpers.newIdentity()
checkCount(path, 0, auser)
Get(path) ~> Route.seal(routes(auser)) ~> check {
val response = responseAs[List[JsObject]]
response should be(List.empty) // cannot list packages that are private in another namespace
}
}
}
it should "list all public packages in explicit namespace excluding bindings" in {
implicit val tid = transid()
// create packages and package bindings, set some public and confirm API lists only public packages
val namespaces = Seq(namespace, EntityPath(aname().toString), EntityPath(aname().toString))
val providers = Seq(
WhiskPackage(namespaces(0), aname(), None, publish = true),
WhiskPackage(namespaces(1), aname(), None, publish = true),
WhiskPackage(namespaces(2), aname(), None, publish = true))
val references = Seq(
WhiskPackage(namespaces(0), aname(), providers(0).bind, publish = true),
WhiskPackage(namespaces(0), aname(), providers(0).bind, publish = false),
WhiskPackage(namespaces(0), aname(), providers(1).bind, publish = true),
WhiskPackage(namespaces(0), aname(), providers(1).bind, publish = false))
(providers ++ references) foreach { put(entityStore, _) }
waitOnView(entityStore, WhiskPackage, namespaces(1), 1)
waitOnView(entityStore, WhiskPackage, namespaces(2), 1)
waitOnView(entityStore, WhiskPackage, namespaces(0), 1 + 4)
{
val expected = providers.filter(_.namespace == namespace) ++ references
checkCount(expected = expected.length)
Get(s"$collectionPath") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
response should have size expected.size
response should contain theSameElementsAs expected.map(_.summaryAsJson)
}
}
{
val path = s"/$namespace/${collection.path}"
val auser = WhiskAuthHelpers.newIdentity()
val expected = providers.filter(p => p.namespace == namespace && p.publish) ++
references.filter(p => p.publish && p.binding == None)
checkCount(path, expected.length, auser)
Get(path) ~> Route.seal(routes(auser)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
response should have size expected.size
response should contain theSameElementsAs expected.map(_.summaryAsJson)
}
}
}
it should "reject list when limit is greater than maximum allowed value" in {
implicit val tid = transid()
val exceededMaxLimit = Collection.MAX_LIST_LIMIT + 1
val response = Get(s"$collectionPath?limit=$exceededMaxLimit") ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[String] should include {
Messages.listLimitOutOfRange(Collection.PACKAGES, exceededMaxLimit, Collection.MAX_LIST_LIMIT)
}
}
}
it should "reject list when limit is not an integer" in {
implicit val tid = transid()
val notAnInteger = "string"
val response = Get(s"$collectionPath?limit=$notAnInteger") ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[String] should include {
Messages.argumentNotInteger(Collection.PACKAGES, notAnInteger)
}
}
}
it should "reject list when skip is negative" in {
implicit val tid = transid()
val negativeSkip = -1
val response = Get(s"$collectionPath?skip=$negativeSkip") ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[String] should include {
Messages.listSkipOutOfRange(Collection.PACKAGES, negativeSkip)
}
}
}
it should "reject list when skip is not an integer" in {
implicit val tid = transid()
val notAnInteger = "string"
val response = Get(s"$collectionPath?skip=$notAnInteger") ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[String] should include {
Messages.argumentNotInteger(Collection.PACKAGES, notAnInteger)
}
}
}
ignore should "list all public packages excluding bindings" in {
implicit val tid = transid()
// create packages and package bindings, set some public and confirm API lists only public packages
val namespaces = Seq(namespace, EntityPath(aname().toString), EntityPath(aname().toString))
val providers = Seq(
WhiskPackage(namespaces(0), aname(), None, publish = false),
WhiskPackage(namespaces(1), aname(), None, publish = true),
WhiskPackage(namespaces(2), aname(), None, publish = true))
val references = Seq(
WhiskPackage(namespaces(0), aname(), providers(0).bind, publish = true),
WhiskPackage(namespaces(0), aname(), providers(0).bind, publish = false),
WhiskPackage(namespaces(0), aname(), providers(1).bind, publish = true),
WhiskPackage(namespaces(0), aname(), providers(1).bind, publish = false))
(providers ++ references) foreach { put(entityStore, _) }
waitOnView(entityStore, WhiskPackage, namespaces(1), 1)
waitOnView(entityStore, WhiskPackage, namespaces(2), 1)
waitOnView(entityStore, WhiskPackage, namespaces(0), 1 + 4)
Get(s"$collectionPath?public=true") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
val expected = providers filter { _.publish }
response.length should be >= (expected.length)
expected forall { p =>
(response contains p.summaryAsJson) && p.binding == None
} should be(true)
}
}
// ?public disabled
ignore should "list all public packages including ones with same name but in different namespaces" in {
implicit val tid = transid()
// create packages and package bindings, set some public and confirm API lists only public packages
val namespaces = Seq(namespace, EntityPath(aname().toString), EntityPath(aname().toString))
val pkgname = aname()
val providers = Seq(
WhiskPackage(namespaces(0), pkgname, None, publish = false),
WhiskPackage(namespaces(1), pkgname, None, publish = true),
WhiskPackage(namespaces(2), pkgname, None, publish = true))
providers foreach { put(entityStore, _) }
waitOnView(entityStore, WhiskPackage, namespaces(0), 1)
waitOnView(entityStore, WhiskPackage, namespaces(1), 1)
waitOnView(entityStore, WhiskPackage, namespaces(2), 1)
Get(s"$collectionPath?public=true") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
val expected = providers filter { _.publish }
response.length should be >= (expected.length)
expected forall { p =>
(response contains p.summaryAsJson) && p.binding == None
} should be(true)
}
}
// confirm ?public disabled
it should "ignore ?public on list all packages" in {
implicit val tid = transid()
Get(s"$collectionPath?public=true") ~> Route.seal(routes(creds)) ~> check {
implicit val tid = transid()
// create packages and package bindings, set some public and confirm API lists only public packages
val namespaces = Seq(namespace, EntityPath(aname().toString), EntityPath(aname().toString))
val pkgname = aname()
val providers = Seq(
WhiskPackage(namespaces(0), pkgname, None, publish = true),
WhiskPackage(namespaces(1), pkgname, None, publish = true),
WhiskPackage(namespaces(2), pkgname, None, publish = true))
providers foreach { put(entityStore, _) }
waitOnView(entityStore, WhiskPackage, namespaces(0), 1)
waitOnView(entityStore, WhiskPackage, namespaces(1), 1)
waitOnView(entityStore, WhiskPackage, namespaces(2), 1)
val expected = providers filter (_.namespace == creds.namespace.name.toPath)
Get(s"$collectionPath?public=true") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
response.length should be >= (expected.length)
expected forall { p =>
(response contains p.summaryAsJson) && p.binding == None
} should be(true)
}
}
}
// ?public disabled
ignore should "reject list all public packages with invalid parameters" in {
implicit val tid = transid()
Get(s"$collectionPath?public=true&docs=true") ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
}
}
//// GET /packages/name
it should "get package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None)
put(entityStore, provider)
Get(s"$collectionPath/${provider.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskPackageWithActions]
response should be(provider.withActions())
}
}
it should "get package with updated field" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None)
put(entityStore, provider)
// `updated` field should be compared with a document in DB
val pkg = get(entityStore, provider.docid, WhiskPackage)
Get(s"$collectionPath/${provider.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskPackageWithActions]
response should be(provider.copy(updated = pkg.updated).withActions())
}
}
it should "get package reference for private package in same namespace" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None, Parameters("a", "A") ++ Parameters("b", "B"))
val reference = WhiskPackage(namespace, aname(), provider.bind, Parameters("b", "b") ++ Parameters("c", "C"))
put(entityStore, provider)
put(entityStore, reference)
Get(s"$collectionPath/${reference.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskPackageWithActions]
response should be(reference.inherit(provider.parameters).withActions())
// this is redundant in case the precedence orders on inherit are changed incorrectly
response.wp.parameters should be(Parameters("a", "A") ++ Parameters("b", "b") ++ Parameters("c", "C"))
}
}
it should "not get package reference for a private package in other namespace" in {
implicit val tid = transid()
val privateCreds = WhiskAuthHelpers.newIdentity()
val privateNamespace = EntityPath(privateCreds.subject.asString)
val provider = WhiskPackage(privateNamespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
put(entityStore, provider)
put(entityStore, reference)
Get(s"$collectionPath/${reference.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(Forbidden)
}
}
it should "get package with its actions and feeds" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.namespace.addPath(provider.name), aname(), jsDefault("??"))
val feed = WhiskAction(
provider.namespace.addPath(provider.name),
aname(),
jsDefault("??"),
annotations = Parameters(Parameters.Feed, "true"))
put(entityStore, provider)
put(entityStore, action)
put(entityStore, feed)
// it should "reject get private package from other subject" in {
val auser = WhiskAuthHelpers.newIdentity()
Get(s"/$namespace/${collection.path}/${provider.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
Get(s"$collectionPath/${provider.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskPackageWithActions]
response should be(provider withActions (List(action, feed)))
}
}
it should "get package reference with its actions and feeds" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
val action = WhiskAction(provider.namespace.addPath(provider.name), aname(), jsDefault("??"))
val feed = WhiskAction(
provider.namespace.addPath(provider.name),
aname(),
jsDefault("??"),
annotations = Parameters(Parameters.Feed, "true"))
put(entityStore, provider)
put(entityStore, reference)
put(entityStore, action)
put(entityStore, feed)
waitOnView(entityStore, WhiskAction, provider.fullPath, 2)
// it should "reject get package reference from other subject" in {
val auser = WhiskAuthHelpers.newIdentity()
Get(s"/$namespace/${collection.path}/${reference.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
Get(s"$collectionPath/${reference.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskPackageWithActions]
response should be(reference withActions List(action, feed))
}
}
it should "not get package reference with its actions and feeds from private package" in {
implicit val tid = transid()
val privateCreds = WhiskAuthHelpers.newIdentity()
val privateNamespace = EntityPath(privateCreds.subject.asString)
val provider = WhiskPackage(privateNamespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
val action = WhiskAction(provider.namespace.addPath(provider.name), aname(), jsDefault("??"))
val feed = WhiskAction(
provider.namespace.addPath(provider.name),
aname(),
jsDefault("??"),
annotations = Parameters(Parameters.Feed, "true"))
put(entityStore, provider)
put(entityStore, reference)
put(entityStore, action)
put(entityStore, feed)
// it should "reject get package reference from other subject" in {
val auser = WhiskAuthHelpers.newIdentity()
Get(s"/$namespace/${collection.path}/${reference.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
Get(s"$collectionPath/${reference.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(Forbidden)
}
}
//// PUT /packages/name
it should "create package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname(), None, annotations = Parameters("a", "b"))
// binding annotation should be removed
val someBindingAnnotation = Parameters(WhiskPackage.bindingFieldName, "???")
val content = WhiskPackagePut(annotations = Some(someBindingAnnotation ++ Parameters("a", "b")))
Put(s"$collectionPath/${provider.name}", content) ~> Route.seal(routes(creds)) ~> check {
deletePackage(provider.docid)
status should be(OK)
val response = responseAs[WhiskPackage]
checkWhiskEntityResponse(response, provider)
}
}
it should "reject create/update package when package name is reserved" in {
implicit val tid = transid()
Set(true, false) foreach { overwrite =>
RESERVED_NAMES foreach { reservedName =>
val provider = WhiskPackage(namespace, EntityName(reservedName), None)
val content = WhiskPackagePut()
Put(s"$collectionPath/${provider.name}?overwrite=$overwrite", content) ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[ErrorResponse].error shouldBe Messages.packageNameIsReserved(reservedName)
}
}
}
}
it should "not allow package update of pre-existing package with a reserved" in {
implicit val tid = transid()
RESERVED_NAMES foreach { reservedName =>
val provider = WhiskPackage(namespace, EntityName(reservedName), None)
put(entityStore, provider)
val content = WhiskPackagePut()
Put(s"$collectionPath/${provider.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[ErrorResponse].error shouldBe Messages.packageNameIsReserved(reservedName)
}
}
}
it should "allow package get/delete for pre-existing package with a reserved name" in {
implicit val tid = transid()
RESERVED_NAMES foreach { reservedName =>
val provider = WhiskPackage(namespace, EntityName(reservedName), None)
put(entityStore, provider, garbageCollect = false)
val content = WhiskPackagePut()
Get(s"$collectionPath/${provider.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
responseAs[WhiskPackage] shouldBe provider
}
Delete(s"$collectionPath/${provider.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
}
}
}
it should "create package reference with explicit namespace" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val reference = WhiskPackage(
namespace,
aname(),
provider.bind,
annotations = bindingAnnotation(provider.bind.get) ++ Parameters("a", "b"))
// binding annotation should be removed and set by controller
val someBindingAnnotation = Parameters(WhiskPackage.bindingFieldName, "???")
val content = WhiskPackagePut(reference.binding, annotations = Some(someBindingAnnotation ++ Parameters("a", "b")))
put(entityStore, provider)
// it should "reject create package reference in some other namespace" in {
val auser = WhiskAuthHelpers.newIdentity()
Put(s"/$namespace/${collection.path}/${reference.name}", content) ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
Put(s"/$namespace/${collection.path}/${reference.name}", content) ~> Route.seal(routes(creds)) ~> check {
deletePackage(reference.docid)
status should be(OK)
val response = responseAs[WhiskPackage]
checkWhiskEntityResponse(response, reference)
}
}
it should "not create package reference from private package in another namespace" in {
implicit val tid = transid()
val privateCreds = WhiskAuthHelpers.newIdentity()
val privateNamespace = EntityPath(privateCreds.subject.asString)
val provider = WhiskPackage(privateNamespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
// binding annotation should be removed and set by controller
val content = WhiskPackagePut(reference.binding)
put(entityStore, provider)
Put(s"/$namespace/${collection.path}/${reference.name}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(Forbidden)
}
}
it should "create package reference with implicit namespace" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val reference = WhiskPackage(namespace, aname(), Some(Binding(EntityPath.DEFAULT.root, provider.name)))
val content = WhiskPackagePut(reference.binding)
put(entityStore, provider)
Put(s"$collectionPath/${reference.name}", content) ~> Route.seal(routes(creds)) ~> check {
deletePackage(reference.docid)
status should be(OK)
val response = responseAs[WhiskPackage]
checkWhiskEntityResponse(
response,
WhiskPackage(
reference.namespace,
reference.name,
provider.bind,
annotations = bindingAnnotation(provider.bind.get)))
}
}
it should "reject create package reference when referencing non-existent package in same namespace" in {
implicit val tid = transid()
val binding = Some(Binding(namespace.root, aname()))
val content = WhiskPackagePut(binding)
Put(s"$collectionPath/${aname()}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[ErrorResponse].error should include(Messages.bindingDoesNotExist)
}
}
it should "reject create package reference when referencing non-existent package in another namespace" in {
implicit val tid = transid()
val privateCreds = WhiskAuthHelpers.newIdentity()
val privateNamespace = EntityPath(privateCreds.subject.asString)
val binding = Some(Binding(privateNamespace.root, aname()))
val content = WhiskPackagePut(binding)
Put(s"$collectionPath/${aname()}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(Forbidden)
}
}
it should "reject create package reference when referencing a non-package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
val content = WhiskPackagePut(Some(Binding(reference.namespace.root, reference.name)))
put(entityStore, provider)
put(entityStore, reference)
Put(s"$collectionPath/${aname()}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[ErrorResponse].error should include(Messages.bindingCannotReferenceBinding)
}
}
it should "reject create package reference when annotations are too big" in {
implicit val tid = transid()
val keys: List[Long] =
List.range(Math.pow(10, 9) toLong, (parametersLimit.toBytes / 20 + Math.pow(10, 9) + 2) toLong)
val annotations = keys map { key =>
Parameters(key.toString, "a" * 10)
} reduce (_ ++ _)
val content = s"""{"annotations":$annotations}""".parseJson.asJsObject
Put(s"$collectionPath/${aname()}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(PayloadTooLarge)
responseAs[String] should include {
Messages.entityTooBig(SizeError(WhiskEntity.annotationsFieldName, annotations.size, Parameters.sizeLimit))
}
}
}
it should "reject create package reference when parameters are too big" in {
implicit val tid = transid()
val keys: List[Long] =
List.range(Math.pow(10, 9) toLong, (parametersLimit.toBytes / 20 + Math.pow(10, 9) + 2) toLong)
val parameters = keys map { key =>
Parameters(key.toString, "a" * 10)
} reduce (_ ++ _)
val content = s"""{"parameters":$parameters}""".parseJson.asJsObject
Put(s"$collectionPath/${aname()}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(PayloadTooLarge)
responseAs[String] should include {
Messages.entityTooBig(SizeError(WhiskEntity.paramsFieldName, parameters.size, Parameters.sizeLimit))
}
}
}
it should "reject update package reference when parameters are too big" in {
implicit val tid = transid()
val keys: List[Long] =
List.range(Math.pow(10, 9) toLong, (parametersLimit.toBytes / 20 + Math.pow(10, 9) + 2) toLong)
val parameters = keys map { key =>
Parameters(key.toString, "a" * 10)
} reduce (_ ++ _)
val provider = WhiskPackage(namespace, aname())
val content = s"""{"parameters":$parameters}""".parseJson.asJsObject
put(entityStore, provider)
Put(s"$collectionPath/${aname()}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
status should be(PayloadTooLarge)
responseAs[String] should include {
Messages.entityTooBig(SizeError(WhiskEntity.paramsFieldName, parameters.size, Parameters.sizeLimit))
}
}
}
it should "update package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val content = WhiskPackagePut(publish = Some(true))
put(entityStore, provider)
// it should "reject update package owned by different user" in {
val auser = WhiskAuthHelpers.newIdentity()
Put(s"/$namespace/${collection.path}/${provider.name}?overwrite=true", content) ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
Put(s"$collectionPath/${provider.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
deletePackage(provider.docid)
val response = responseAs[WhiskPackage]
checkWhiskEntityResponse(
response,
WhiskPackage(namespace, provider.name, None, version = provider.version.upPatch, publish = true))
}
}
it should "update package reference" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind, annotations = bindingAnnotation(provider.bind.get))
// create a bogus binding annotation which should be replaced by the PUT
val someBindingAnnotation = Some(Parameters(WhiskPackage.bindingFieldName, "???") ++ Parameters("a", "b"))
val content = WhiskPackagePut(publish = Some(true), annotations = someBindingAnnotation)
put(entityStore, provider)
put(entityStore, reference)
// it should "reject update package reference owned by different user"
val auser = WhiskAuthHelpers.newIdentity()
Put(s"/$namespace/${collection.path}/${reference.name}?overwrite=true", content) ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
Put(s"$collectionPath/${reference.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
deletePackage(reference.docid)
status should be(OK)
val response = responseAs[WhiskPackage]
checkWhiskEntityResponse(
response,
WhiskPackage(
reference.namespace,
reference.name,
reference.binding,
version = reference.version.upPatch,
publish = true,
annotations = reference.annotations ++ Parameters("a", "b")))
}
}
it should "reject update package with binding" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val content = WhiskPackagePut(provider.bind)
put(entityStore, provider)
Put(s"$collectionPath/${provider.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
status should be(Conflict)
responseAs[ErrorResponse].error should include(Messages.packageCannotBecomeBinding)
}
}
it should "reject update package reference when new binding refers to non-existent package in same namespace" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
val content = WhiskPackagePut(reference.binding)
put(entityStore, reference)
Put(s"$collectionPath/${reference.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[ErrorResponse].error should include(Messages.bindingDoesNotExist)
}
}
it should "reject update package reference when new binding refers to non-existent package in another namespace" in {
implicit val tid = transid()
val privateCreds = WhiskAuthHelpers.newIdentity()
val privateNamespace = EntityPath(privateCreds.subject.asString)
val provider = WhiskPackage(privateNamespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
val content = WhiskPackagePut(reference.binding)
put(entityStore, reference)
Put(s"$collectionPath/${reference.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
status should be(Forbidden)
}
}
it should "reject update package reference when new binding refers to itself" in {
implicit val tid = transid()
// create package and valid reference binding to it
val provider = WhiskPackage(namespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
put(entityStore, provider)
put(entityStore, reference)
// manipulate package reference such that it attempts to bind to itself
val content = WhiskPackagePut(Some(Binding(namespace.root, reference.name)))
Put(s"$collectionPath/${reference.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
status should be(BadRequest)
responseAs[ErrorResponse].error should include(Messages.bindingCannotReferenceBinding)
}
// verify that the reference is still pointing to the original provider
Get(s"$collectionPath/${reference.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskPackage]
response should be(reference)
response.binding should be(provider.bind)
}
}
it should "reject update package reference when new binding refers to private package in another namespace" in {
implicit val tid = transid()
val privateCreds = WhiskAuthHelpers.newIdentity()
val privateNamespace = EntityPath(privateCreds.subject.asString)
val provider = WhiskPackage(privateNamespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
val content = WhiskPackagePut(reference.binding)
put(entityStore, provider)
put(entityStore, reference)
Put(s"$collectionPath/${reference.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
status should be(Forbidden)
}
}
//// DEL /packages/name
it should "delete package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
put(entityStore, provider)
// it should "reject deleting package owned by different user" in {
val auser = WhiskAuthHelpers.newIdentity()
Get(s"/$namespace/${collection.path}/${provider.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
Delete(s"$collectionPath/${provider.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskPackage]
response should be(provider)
}
}
it should "delete package reference regardless of package existence" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val reference = WhiskPackage(namespace, aname(), provider.bind)
put(entityStore, reference)
// it should "reject deleting package reference owned by different user" in {
val auser = WhiskAuthHelpers.newIdentity()
Get(s"/$namespace/${collection.path}/${reference.name}") ~> Route.seal(routes(auser)) ~> check {
status should be(Forbidden)
}
Delete(s"$collectionPath/${reference.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskPackage]
response should be(reference)
}
}
it should "reject delete non-empty package" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
val action = WhiskAction(provider.namespace.addPath(provider.name), aname(), jsDefault("??"))
put(entityStore, provider)
put(entityStore, action)
org.apache.openwhisk.utils.retry {
Get(s"$collectionPath/${provider.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[JsObject]
response.fields("actions").asInstanceOf[JsArray].elements.length should be(1)
}
}
Delete(s"$collectionPath/${provider.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(Conflict)
val response = responseAs[ErrorResponse]
response.error should include("Package not empty (contains 1 entity)")
response.code.id should not be empty
}
}
//// invalid resource
it should "reject invalid resource" in {
implicit val tid = transid()
val provider = WhiskPackage(namespace, aname())
put(entityStore, provider)
Get(s"$collectionPath/${provider.name}/bar") ~> Route.seal(routes(creds)) ~> check {
status should be(NotFound)
}
}
it should "return empty list for invalid namespace" in {
implicit val tid = transid()
val path = s"/whisk.systsdf/${collection.path}"
Get(path) ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
responseAs[List[JsObject]] should be(List.empty)
}
}
it should "reject bind to non-package" in {
implicit val tid = transid()
val action = WhiskAction(namespace, aname(), jsDefault("??"))
val reference = WhiskPackage(namespace, aname(), Some(Binding(action.namespace.root, action.name)))
val content = WhiskPackagePut(reference.binding)
put(entityStore, action)
Put(s"$collectionPath/${reference.name}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(Conflict)
responseAs[ErrorResponse].error should include(Messages.requestedBindingIsNotValid)
}
}
it should "report proper error when record is corrupted on delete" in {
implicit val tid = transid()
val entity = BadEntity(namespace, aname())
put(entityStore, entity)
Delete(s"$collectionPath/${entity.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
it should "report proper error when record is corrupted on get" in {
implicit val tid = transid()
val entity = BadEntity(namespace, aname())
put(entityStore, entity)
Get(s"$collectionPath/${entity.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(InternalServerError)
}
}
it should "report proper error when record is corrupted on put" in {
implicit val tid = transid()
val entity = BadEntity(namespace, aname())
put(entityStore, entity)
val content = WhiskPackagePut()
Put(s"$collectionPath/${entity.name}", content) ~> Route.seal(routes(creds)) ~> check {
status should be(InternalServerError)
responseAs[ErrorResponse].error shouldBe Messages.corruptedEntity
}
}
}
| jeremiaswerner/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/controller/test/PackagesApiTests.scala | Scala | apache-2.0 | 37,282 |
package com.mdsol.mauth
import java.util.UUID
import com.mdsol.mauth.exception.MAuthValidationException
import com.mdsol.mauth.test.utils.FakeMAuthServer.EXISTING_CLIENT_APP_UUID
import com.mdsol.mauth.util.MAuthKeysHelper
import com.mdsol.mauth.utils.ClientPublicKeyProvider
import org.scalamock.scalatest.MockFactory
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class RequestAuthenticatorSpec extends AnyFlatSpec with RequestAuthenticatorBaseSpec with Matchers with MockFactory {
val mockClientPublicKeyProvider: ClientPublicKeyProvider = mock[ClientPublicKeyProvider]
val authenticator: RequestAuthenticator = new RequestAuthenticator(mockClientPublicKeyProvider, REQUEST_VALIDATION_TIMEOUT_SECONDS, mockEpochTimeProvider)
val authenticatorV2: RequestAuthenticator =
new RequestAuthenticator(mockClientPublicKeyProvider, REQUEST_VALIDATION_TIMEOUT_SECONDS, mockEpochTimeProvider, true)
behavior of "RequestAuthenticator"
it should "validate a valid request" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_X_MWS_TIME_HEADER_VALUE.toLong + 3)
(mockClientPublicKeyProvider.getPublicKey _).expects(EXISTING_CLIENT_APP_UUID).returns(MAuthKeysHelper.getPublicKeyFromString(PUBLIC_KEY))
authenticator.authenticate(getSimpleRequest) shouldBe true
}
it should "validate a valid request with special characters" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_UNICODE_X_MWS_TIME_HEADER_VALUE.toLong + 3)
(mockClientPublicKeyProvider.getPublicKey _).expects(EXISTING_CLIENT_APP_UUID).returns(MAuthKeysHelper.getPublicKeyFromString(PUBLIC_KEY))
authenticator.authenticate(getRequestWithUnicodeCharactersInBody) shouldBe true
}
it should "validate a valid request without body" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_NO_BODY_X_MWS_TIME_HEADER_VALUE.toLong + 3)
(mockClientPublicKeyProvider.getPublicKey _).expects(EXISTING_CLIENT_APP_UUID).returns(MAuthKeysHelper.getPublicKeyFromString(PUBLIC_KEY))
authenticator.authenticate(getRequestWithoutMessageBody) shouldBe true
}
it should "fail validating request sent after 5 minutes" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_UNICODE_X_MWS_TIME_HEADER_VALUE.toLong + 600)
val expectedException = intercept[MAuthValidationException] {
authenticator.authenticate(getRequestWithUnicodeCharactersInBody)
}
expectedException.getMessage shouldBe "MAuth request validation failed because of timeout 300s"
}
it should "fail validating invalid request" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_X_MWS_TIME_HEADER_VALUE.toLong + 3)
(mockClientPublicKeyProvider.getPublicKey _).expects(EXISTING_CLIENT_APP_UUID).returns(MAuthKeysHelper.getPublicKeyFromString(PUBLIC_KEY))
authenticator.authenticate(getSimpleRequestWithWrongSignature) shouldBe false
}
it should "validate a valid request for V2" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_MCC_TIME_HEADER_VALUE.toLong + 3)
(mockClientPublicKeyProvider.getPublicKey _).expects(EXISTING_CLIENT_APP_UUID).returns(MAuthKeysHelper.getPublicKeyFromString(PUBLIC_KEY))
authenticatorV2.authenticate(getSimpleRequestV2) shouldBe true
}
it should "validate a valid request with the headers of V1 and V2" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_MCC_TIME_HEADER_VALUE.toLong + 3)
(mockClientPublicKeyProvider.getPublicKey _).expects(EXISTING_CLIENT_APP_UUID).returns(MAuthKeysHelper.getPublicKeyFromString(PUBLIC_KEY))
authenticator.authenticate(getRequestWithAllHeaders) shouldBe true
}
it should "fail validating request if disasbled V1, but V2 headers missed" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_X_MWS_TIME_HEADER_VALUE.toLong + 3)
val expectedException = intercept[MAuthValidationException] {
authenticatorV2.authenticate(getSimpleRequest)
}
expectedException.getMessage shouldBe "The service requires mAuth v2 authentication headers."
}
it should "validate a valid request with binary body for V1" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_X_MWS_TIME_HEADER_BINARY_VALUE.toLong + 3)
(mockClientPublicKeyProvider.getPublicKey _)
.expects(UUID.fromString(CLIENT_REQUEST_BINARY_APP_UUID))
.returns(MAuthKeysHelper.getPublicKeyFromString(PUBLIC_KEY2))
authenticator.authenticate(getRequestWithBinaryBodyV1) shouldBe true
}
it should "validate the request with the validated V1 headers and wrong V2 signature" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_MCC_TIME_HEADER_VALUE.toLong + 3)
(mockClientPublicKeyProvider.getPublicKey _).expects(EXISTING_CLIENT_APP_UUID).returns(MAuthKeysHelper.getPublicKeyFromString(PUBLIC_KEY))
authenticator.authenticate(getRequestWithWrongV2Signature) shouldBe true
}
it should "fail validating request with validated V1 headers and wrong V2 signature if V2 only is enabled" in {
//noinspection ConvertibleToMethodValue
(mockEpochTimeProvider.inSeconds _: () => Long).expects().returns(CLIENT_MCC_TIME_HEADER_VALUE.toLong + 3)
(mockClientPublicKeyProvider.getPublicKey _).expects(EXISTING_CLIENT_APP_UUID).returns(MAuthKeysHelper.getPublicKeyFromString(PUBLIC_KEY))
authenticatorV2.authenticate(getRequestWithWrongV2Signature) shouldBe false
}
}
| mdsol/mauth-java-client | modules/mauth-authenticator-scala/src/test/scala/com/mdsol/mauth/RequestAuthenticatorSpec.scala | Scala | mit | 6,034 |
package com.ebay.neutrino
import com.ebay.neutrino.config.{NeutrinoSettings, VirtualServer}
import org.scalatest.{FlatSpec, Matchers}
class NeutrinoNodeTest extends FlatSpec with Matchers {
it should "provide testing for NeutrinoNode update" in {
// TODO
}
}
class NeutrinoNodesTest extends FlatSpec with Matchers with NeutrinoTestSupport {
implicit val core = new NeutrinoCore(NeutrinoSettings.Empty)
def server(id: String="id", host: String="www.ebay.com", post: Int=80): VirtualServer =
VirtualServer(id, host, post)
it should "ensure apply() maps to underlying state" in {
// TODO
}
it should "rudmintary test of neutrino-nodes wrapper" in {
val nodes = neutrinoNodes()
nodes() shouldBe empty
// Add a single node
nodes.update(server(id="1"))
nodes().size should be (1)
// Add two nodes
nodes.update(server(id="1"), server(id="2"))
nodes().size should be (2)
nodes() map (_.settings.id.toInt) should be (Seq(1,2))
// Add two nodes
nodes.update(server(id="3"))
nodes().size should be (1)
nodes() map (_.settings.id.toInt) should be (Seq(3))
// Remove all nodes
nodes.update()
nodes().size should be (0)
nodes() map (_.settings.id.toInt) should be (Seq())
}
it should "test massive concurrency access for safety" in {
// TODO ...
}
it should "resolve pool by name" in {
}
}
| eBay/Neutrino | src/test/scala/com/ebay/neutrino/NeutrinoNodesTest.scala | Scala | apache-2.0 | 1,406 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.stream.table
import java.lang.{Boolean => JBoolean}
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{Types, ValidationException}
import org.apache.flink.table.expressions.utils.{Func18, Func20, RichFunc2}
import org.apache.flink.table.runtime.utils.{StreamITCase, StreamTestData, _}
import org.apache.flink.table.utils._
import org.apache.flink.test.util.AbstractTestBase
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.{Before, Test}
import scala.collection.mutable
class CorrelateITCase extends AbstractTestBase {
val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv: StreamTableEnvironment = StreamTableEnvironment.create(env)
@Before
def clear(): Unit = {
StreamITCase.clear
}
@Test
def testCrossJoin(): Unit = {
val t = testData(env).toTable(tEnv).as('a, 'b, 'c)
val func0 = new TableFunc0
val pojoFunc0 = new PojoTableFunc()
val result = t
.joinLateral(func0('c) as('d, 'e))
.select('c, 'd, 'e)
.joinLateral(pojoFunc0('c))
.where('age > 20)
.select('c, 'name, 'age)
.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList("Jack#22,Jack,22", "Anna#44,Anna,44")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testLeftOuterJoinWithoutPredicates(): Unit = {
val t = testData(env).toTable(tEnv).as('a, 'b, 'c)
val func0 = new TableFunc0
val result = t
.leftOuterJoinLateral(func0('c) as('d, 'e))
.select('c, 'd, 'e)
.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"nosharp,null,null", "Jack#22,Jack,22",
"John#19,John,19", "Anna#44,Anna,44")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
/**
* Common join predicates are temporarily forbidden (see FLINK-7865).
*/
@Test (expected = classOf[ValidationException])
def testLeftOuterJoinWithPredicates(): Unit = {
val t = testData(env).toTable(tEnv).as('a, 'b, 'c)
val func0 = new TableFunc0
val result = t
.leftOuterJoinLateral(func0('c) as ('s, 'l), 'a === 'l)
.select('c, 's, 'l)
.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = "John#19,null,null\\n" + "John#22,null,null\\n" + "Anna44,null,null\\n" +
"nosharp,null,null"
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testUserDefinedTableFunctionWithScalarFunction(): Unit = {
val t = testData(env).toTable(tEnv).as('a, 'b, 'c)
val func0 = new TableFunc0
val result = t
.joinLateral(func0('c) as('d, 'e))
.where(Func18('d, "J"))
.select('c, 'd, 'e)
.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList("Jack#22,Jack,22", "John#19,John,19")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testUserDefinedTableFunctionWithParameter(): Unit = {
val tableFunc1 = new RichTableFunc1
tEnv.registerFunction("RichTableFunc1", tableFunc1)
UserDefinedFunctionTestUtils.setJobParameters(env, Map("word_separator" -> " "))
StreamITCase.testResults = mutable.MutableList()
val result = StreamTestData.getSmall3TupleDataStream(env)
.toTable(tEnv, 'a, 'b, 'c)
.joinLateral(tableFunc1('c) as 's)
.select('a, 's)
val results = result.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList("3,Hello", "3,world")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testUserDefinedTableFunctionWithUserDefinedScalarFunction(): Unit = {
val tableFunc1 = new RichTableFunc1
val richFunc2 = new RichFunc2
tEnv.registerFunction("RichTableFunc1", tableFunc1)
tEnv.registerFunction("RichFunc2", richFunc2)
UserDefinedFunctionTestUtils.setJobParameters(
env,
Map("word_separator" -> "#", "string.value" -> "test"))
StreamITCase.testResults = mutable.MutableList()
val result = StreamTestData.getSmall3TupleDataStream(env)
.toTable(tEnv, 'a, 'b, 'c)
.joinLateral(tableFunc1(richFunc2('c)) as 's)
.select('a, 's)
val results = result.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"1,Hi",
"1,test",
"2,Hello",
"2,test",
"3,Hello world",
"3,test")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testTableFunctionConstructorWithParams(): Unit = {
val t = testData(env).toTable(tEnv).as('a, 'b, 'c)
val config = Map("key1" -> "value1", "key2" -> "value2")
val func30 = new TableFunc3(null)
val func31 = new TableFunc3("OneConf_")
val func32 = new TableFunc3("TwoConf_", config)
val result = t
.joinLateral(func30('c) as('d, 'e))
.select('c, 'd, 'e)
.joinLateral(func31('c) as ('f, 'g))
.select('c, 'd, 'e, 'f, 'g)
.joinLateral(func32('c) as ('h, 'i))
.select('c, 'd, 'f, 'h, 'e, 'g, 'i)
.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"Anna#44,Anna,OneConf_Anna,TwoConf__key=key1_value=value1_Anna,44,44,44",
"Anna#44,Anna,OneConf_Anna,TwoConf__key=key2_value=value2_Anna,44,44,44",
"Jack#22,Jack,OneConf_Jack,TwoConf__key=key1_value=value1_Jack,22,22,22",
"Jack#22,Jack,OneConf_Jack,TwoConf__key=key2_value=value2_Jack,22,22,22",
"John#19,John,OneConf_John,TwoConf__key=key1_value=value1_John,19,19,19",
"John#19,John,OneConf_John,TwoConf__key=key2_value=value2_John,19,19,19"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testTableFunctionWithVariableArguments(): Unit = {
val varArgsFunc0 = new VarArgsFunc0
tEnv.registerFunction("VarArgsFunc0", varArgsFunc0)
val result = testData(env)
.toTable(tEnv, 'a, 'b, 'c)
.select('c)
.joinLateral(varArgsFunc0("1", "2", 'c))
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"Anna#44,1",
"Anna#44,2",
"Anna#44,Anna#44",
"Jack#22,1",
"Jack#22,2",
"Jack#22,Jack#22",
"John#19,1",
"John#19,2",
"John#19,John#19",
"nosharp,1",
"nosharp,2",
"nosharp,nosharp")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testRowType(): Unit = {
val row = Row.of(
12.asInstanceOf[Integer],
true.asInstanceOf[JBoolean],
Row.of(1.asInstanceOf[Integer], 2.asInstanceOf[Integer], 3.asInstanceOf[Integer])
)
val rowType = Types.ROW(Types.INT, Types.BOOLEAN, Types.ROW(Types.INT, Types.INT, Types.INT))
val in = env.fromElements(row, row)(rowType).toTable(tEnv).as('a, 'b, 'c)
val tableFunc5 = new TableFunc5()
val result = in
.joinLateral(tableFunc5('c) as ('f0, 'f1, 'f2))
.select('c, 'f2)
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"1,2,3,3",
"1,2,3,3")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testTableFunctionCollectorOpenClose(): Unit = {
val t = testData(env).toTable(tEnv).as('a, 'b, 'c)
val func0 = new TableFunc0
val func20 = new Func20
val result = t
.joinLateral(func0('c) as('d, 'e))
.where(func20('e))
.select('c, 'd, 'e)
.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq (
"Jack#22,Jack,22",
"John#19,John,19",
"Anna#44,Anna,44"
)
assertEquals(
expected.sorted,
StreamITCase.testResults.sorted
)
}
@Test
def testTableFunctionCollectorInit(): Unit = {
val t = testData(env).toTable(tEnv).as('a, 'b, 'c)
val func0 = new TableFunc0
// this case will generate 'timestamp' member field and 'DateFormatter'
val result = t
.joinLateral(func0('c) as('d, 'e))
.where(dateFormat(currentTimestamp(), "yyyyMMdd") === 'd)
.select('c, 'd, 'e)
.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
assertEquals(
Seq(),
StreamITCase.testResults.sorted
)
}
@Test
def testFlatMap(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val func2 = new TableFunc2
val ds = testData(env).toTable(tEnv, 'a, 'b, 'c)
// test non alias
.flatMap(func2('c))
.select('f0, 'f1)
// test the output field name of flatMap is the same as the field name of the input table
.flatMap(func2(concat('f0, "#")))
.as ('f0, 'f1)
.select('f0, 'f1)
val results = ds.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = mutable.MutableList(
"Jack,4",
"22,2",
"John,4",
"19,2",
"Anna,4",
"44,2")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
private def testData(
env: StreamExecutionEnvironment)
: DataStream[(Int, Long, String)] = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "Jack#22"))
data.+=((2, 2L, "John#19"))
data.+=((3, 2L, "Anna#44"))
data.+=((4, 3L, "nosharp"))
env.fromCollection(data)
}
}
| fhueske/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/stream/table/CorrelateITCase.scala | Scala | apache-2.0 | 10,825 |
package com.twitter.finagle.thriftmux
import com.twitter.finagle.client
import com.twitter.finagle._
import com.twitter.finagle.builder.ClientBuilder
import com.twitter.finagle.builder.ClientConfig
import com.twitter.finagle.client.MethodPool
import com.twitter.finagle.service.ResponseClassifier
import com.twitter.finagle.thrift.exp.partitioning.PartitioningStrategy
import com.twitter.finagle.thrift.exp.partitioning.ThriftPartitioningService
import com.twitter.finagle.thrift.service.Filterable
import com.twitter.finagle.thrift.service.ServicePerEndpointBuilder
import com.twitter.finagle.thrift.ServiceIfaceBuilder
import com.twitter.finagle.thrift.ThriftClientRequest
import com.twitter.finagle.thriftmux.exp.partitioning.DynamicPartitioningService
import com.twitter.util.Duration
import com.twitter.util.Future
import com.twitter.util.Time
import com.twitter.util.tunable.Tunable
object MethodBuilder {
import client.MethodBuilder._
/**
* Create a [[MethodBuilder]] for a given destination.
*
* Note that metrics will be scoped (e.g. "clnt/your_client_label/method_name").
*
* The value for "your_client_label" is taken from the `withLabel` setting
* (from [[param.Label]]). If that is not set, `dest` is used.
* The value for "method_name" is set when an method-specific client
* is constructed, as in [[MethodBuilder.servicePerEndpoint]].
*
* @param dest where requests are dispatched to.
* See the [[https://twitter.github.io/finagle/guide/Names.html user guide]]
* for details on destination names.
*
* @see [[com.twitter.finagle.ThriftMux.Client.methodBuilder(String)]]
*/
def from(dest: String, thriftMuxClient: ThriftMux.Client): MethodBuilder =
from(Resolver.eval(dest), thriftMuxClient)
/**
* Create a [[MethodBuilder]] for a given destination.
*
* Note that metrics will be scoped (e.g. "clnt/your_client_label/method_name").
*
* The value for "your_client_label" is taken from the `withLabel` setting
* (from [[param.Label]]). If that is not set, `dest` is used.
* The value for "method_name" is set when an method-specific client
* is constructed, as in [[MethodBuilder.servicePerEndpoint]].
*
* @param dest where requests are dispatched to.
* See the [[https://twitter.github.io/finagle/guide/Names.html user guide]]
* for details on destination names.
*
* @see [[com.twitter.finagle.ThriftMux.Client.methodBuilder(Name)]]
*/
def from(dest: Name, thriftMuxClient: ThriftMux.Client): MethodBuilder = {
val stack = modifiedStack(thriftMuxClient.stack)
.replace(ThriftPartitioningService.role, DynamicPartitioningService.perRequestModule)
val params = thriftMuxClient.params
val mb = new client.MethodBuilder[ThriftClientRequest, Array[Byte]](
new MethodPool[ThriftClientRequest, Array[Byte]](
thriftMuxClient.withStack(stack),
dest,
param.Label.Default),
dest,
stack,
params,
Config.create(thriftMuxClient.stack, params)
)
new MethodBuilder(thriftMuxClient, mb)
}
/**
* '''NOTE:''' Prefer using [[com.twitter.finagle.ThriftMux.Client.methodBuilder]] over using
* this approach to construction. The functionality is available through
* [[ThriftMux.Client]] and [[MethodBuilder]] while addressing the various issues
* of `ClientBuilder`.
*
* Creates a [[MethodBuilder]] from the given [[ClientBuilder]].
*
* Note that metrics will be scoped (e.g. "clnt/clientbuilders_name/method_name").
*
* The value for "clientbuilders_name" is taken from the [[ClientBuilder.name]]
* configuration, using "client" if unspecified.
* The value for "method_name" is set when an method-specific client
* is constructed, as in [[MethodBuilder.servicePerEndpoint]].
*
* - The [[ClientBuilder.timeout]] configuration will be used as the default
* value for [[MethodBuilder.withTimeoutTotal]].
*
* - The [[ClientBuilder.requestTimeout]] configuration will be used as the
* default value for [[MethodBuilder.withTimeoutPerRequest]].
*
* - The [[ClientBuilder]] must have been constructed using
* [[ClientBuilder.stack]] passing an instance of a [[ThriftMux.Client]].
*
* - The [[ClientBuilder]] metrics scoped to "tries" are not included
* as they are superseded by metrics scoped to "logical".
*
* - The [[ClientBuilder]] retry policy will not be applied and must
* be migrated to using [[MethodBuilder.withRetryForClassifier]].
*
* - The [[ClientBuilder]] retries will not be applied and must
* be migrated to using [[MethodBuilder.withMaxRetries]].
*
* @see [[https://twitter.github.io/finagle/guide/Clients.html#migrating-from-clientbuilder user guide]]
*/
def from(
clientBuilder: ClientBuilder[ThriftClientRequest, Array[Byte], ClientConfig.Yes, _, _]
): MethodBuilder = {
if (!clientBuilder.params.contains[ClientConfig.DestName])
throw new IllegalArgumentException("ClientBuilder must be configured with a dest")
val dest = clientBuilder.params[ClientConfig.DestName].name
val client = clientBuilder.client.asInstanceOf[ThriftMux.Client]
from(dest, client)
}
}
/**
* `MethodBuilder` is a collection of APIs for client configuration at
* a higher level than the Finagle 6 APIs while improving upon the deprecated
* [[ClientBuilder]]. `MethodBuilder` provides:
*
* - Logical success rate metrics.
* - Retries based on application-level requests and responses (e.g. a code in
* the Thrift response).
* - Configuration of per-attempt and total timeouts.
*
* All of these can be customized per method (or endpoint) while sharing a single
* underlying Finagle client. Concretely, a single service might offer both
* `getOneTweet` as well as `deleteTweets`, whilst each having
* wildly different characteristics. The get is idempotent and has a tight latency
* distribution while the delete is not idempotent and has a wide latency
* distribution. If users want different configurations, without `MethodBuilder`
* they must create separate Finagle clients for each grouping. While long-lived
* clients in Finagle are not expensive, they are not free. They create
* duplicate metrics and waste heap, file descriptors, and CPU.
*
* = Example =
*
* Given an example IDL:
* {{{
* exception AnException {
* 1: i32 errorCode
* }
*
* service SomeService {
* i32 TheMethod(
* 1: i32 input
* ) throws (
* 1: AnException ex1,
* )
* }
* }}}
*
* This gives you a `Service` that has timeouts and retries on
* `AnException` when the `errorCode` is `0`:
* {{{
* import com.twitter.conversions.DurationOps._
* import com.twitter.finagle.ThriftMux
* import com.twitter.finagle.service.{ReqRep, ResponseClass}
* import com.twitter.util.Throw
*
* val client: ThriftMux.Client = ???
* val svc: Service[TheMethod.Args, TheMethod.SuccessType] =
* client.methodBuilder("inet!example.com:5555")
* .withTimeoutPerRequest(50.milliseconds)
* .withTimeoutTotal(100.milliseconds)
* .withRetryForClassifier {
* case ReqRep(_, Throw(AnException(errCode))) if errCode == 0 =>
* ResponseClass.RetryableFailure
* }
* .newServiceIface("the_method")
* .theMethod
* }}}
*
* = Timeouts =
*
* Defaults to using the StackClient's configuration.
*
* An example of setting a per-request timeout of 50 milliseconds and a total
* timeout of 100 milliseconds:
* {{{
* import com.twitter.conversions.DurationOps._
* import com.twitter.finagle.thriftmux.MethodBuilder
*
* val builder: MethodBuilder = ???
* builder
* .withTimeoutPerRequest(50.milliseconds)
* .withTimeoutTotal(100.milliseconds)
* }}}
*
* = Retries =
*
* Retries are intended to help clients improve success rate by trying
* failed requests additional times. Care must be taken by developers
* to only retry when it is known to be safe to issue the request multiple
* times. This is because the client cannot always be sure what the
* backend service has done. An example of a request that is safe to
* retry would be a read-only request.
*
* Defaults to using the client's [[ResponseClassifier]] to retry failures
* [[com.twitter.finagle.service.ResponseClass.RetryableFailure marked as retryable]].
* See [[withRetryForClassifier]] for details.
*
* An example of configuring classifiers for ChannelClosed and Timeout exceptions:
* {{{
* import com.twitter.finagle.service.ResponseClassifier._
* import com.twitter.finagle.thriftmux.MethodBuilder
*
* val builder: MethodBuilder = ???
* builder
* .withRetryForClassifier(RetryOnChannelClosed.orElse(RetryOnTimeout))
* }}}
*
* A [[com.twitter.finagle.service.RetryBudget]] is used to prevent retries from overwhelming
* the backend service. The budget is shared across clients created from
* an initial `MethodBuilder`. As such, even if the retry rules
* deem the request retryable, it may not be retried if there is insufficient
* budget.
*
* Finagle will automatically retry failures that are known to be safe
* to retry via [[com.twitter.finagle.service.RequeueFilter]]. This includes
* [[com.twitter.finagle.WriteException WriteExceptions]] and
* [[com.twitter.finagle.FailureFlags.Retryable retryable nacks]]. As these should have
* already been retried, we avoid retrying them again by ignoring them at this layer.
*
* Additional information regarding retries can be found in the
* [[https://twitter.github.io/finagle/guide/Clients.html#retries user guide]].
*
* The classifier is also used to determine the logical success metrics of
* the method. Logical here means after any retries are run. For example
* should a request result in retryable failure on the first attempt, but
* succeed upon retry, this is exposed through metrics as a success.
* Logical success rate metrics are scoped to
* "clnt/your_client_label/method_name/logical" and get "success" and
* "requests" counters along with a "request_latency_ms" stat.
*
* Unsuccessful requests are logged at `com.twitter.logging.Level.DEBUG` level.
* Further details, including the request and response, are available at
* `TRACE` level.
*
* @see [[com.twitter.finagle.ThriftMux.Client.methodBuilder]] to construct instances.
*
* @see The [[https://twitter.github.io/finagle/guide/MethodBuilder.html user guide]].
*/
class MethodBuilder(
thriftMuxClient: ThriftMux.Client,
mb: client.MethodBuilder[ThriftClientRequest, Array[Byte]])
extends client.BaseMethodBuilder[MethodBuilder] {
/**
* Configured client label. The `label` is used to assign a label to the underlying Thrift client.
* The label is used to display stats, etc.
*
* @see [[com.twitter.finagle.Client]]
* @see [[https://twitter.github.io/finagle/guide/Clients.html#clients]]
*/
def label: String = mb.params[param.Label].label
def withTimeoutTotal(howLong: Duration): MethodBuilder =
new MethodBuilder(thriftMuxClient, mb.withTimeout.total(howLong))
def withTimeoutTotal(howLong: Tunable[Duration]): MethodBuilder =
new MethodBuilder(thriftMuxClient, mb.withTimeout.total(howLong))
def withTimeoutPerRequest(howLong: Duration): MethodBuilder =
new MethodBuilder(thriftMuxClient, mb.withTimeout.perRequest(howLong))
def withTimeoutPerRequest(howLong: Tunable[Duration]): MethodBuilder =
new MethodBuilder(thriftMuxClient, mb.withTimeout.perRequest(howLong))
def withRetryForClassifier(classifier: ResponseClassifier): MethodBuilder =
new MethodBuilder(thriftMuxClient, mb.withRetry.forClassifier(classifier))
def withMaxRetries(value: Int): MethodBuilder =
new MethodBuilder(thriftMuxClient, mb.withRetry.maxRetries(value))
def withRetryDisabled: MethodBuilder =
new MethodBuilder(thriftMuxClient, mb.withRetry.disabled)
/**
* Set a [[PartitioningStrategy]] for a MethodBuilder endpoint to enable
* partitioning awareness. See [[PartitioningStrategy]].
*
* Default is [[com.twitter.finagle.thrift.exp.partitioning.Disabled]]
*
* @example
* To set a hashing strategy to MethodBuilder:
* {{{
* import com.twitter.finagle.ThriftMux.Client
* import com.twitter.finagle.thrift.exp.partitioning.MethodBuilderHashingStrategy
*
* val hashingStrategy = new MethodBuilderHashingStrategy[RequestType, ResponseType](...)
*
* val client: ThriftMux.Client = ???
* val builder = client.methodBuilder($address)
*
* builder
* .withPartitioningStrategy(hashingStrategy)
* .servicePerEndpoint...
* ...
* }}}
*/
def withPartitioningStrategy(strategy: PartitioningStrategy): MethodBuilder =
new MethodBuilder(thriftMuxClient, mb.filtered(partitioningFilter(strategy)))
private[this] def partitioningFilter(
partitionStrategy: PartitioningStrategy
): Filter.TypeAgnostic = {
new Filter.TypeAgnostic {
def toFilter[Req, Rep] = new SimpleFilter[Req, Rep] {
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
DynamicPartitioningService.letStrategy(partitionStrategy) {
service(request)
}
}
}
}
}
/**
* @inheritdoc
*
* This additionally causes Thrift Exceptions to be retried.
*/
def idempotent(maxExtraLoad: Double): MethodBuilder =
new MethodBuilder(
thriftMuxClient,
mb.idempotent(maxExtraLoad, sendInterrupts = true, ResponseClassifier.RetryOnThrows)
)
/**
* @inheritdoc
*
* This additionally causes Thrift Exceptions to be retried.
*/
def idempotent(maxExtraLoad: Double, minSendBackupAfterMs: Int): MethodBuilder =
new MethodBuilder(
thriftMuxClient,
mb.idempotent(
maxExtraLoad,
sendInterrupts = true,
minSendBackupAfterMs,
ResponseClassifier.RetryOnThrows)
)
/**
* @inheritdoc
*
* This additionally causes Thrift Exceptions to be retried.
*/
def idempotent(maxExtraLoad: Tunable[Double]): MethodBuilder =
new MethodBuilder(
thriftMuxClient,
mb.idempotent(maxExtraLoad, sendInterrupts = true, ResponseClassifier.RetryOnThrows)
)
/**
* @inheritdoc
*
* This additionally causes Thrift Exceptions to be retried.
*/
def idempotent(maxExtraLoad: Tunable[Double], minSendBackupAfterMs: Int): MethodBuilder =
new MethodBuilder(
thriftMuxClient,
mb.idempotent(
maxExtraLoad,
sendInterrupts = true,
minSendBackupAfterMs,
ResponseClassifier.RetryOnThrows)
)
def nonIdempotent: MethodBuilder =
new MethodBuilder(thriftMuxClient, mb.nonIdempotent)
/**
* Construct a `ServiceIface` to be used for the `methodName` function.
*
* @param methodName used for scoping metrics (e.g. "clnt/your_client_label/method_name").
*/
@deprecated("Use servicePerEndpoint", "2017-11-29")
def newServiceIface[ServiceIface <: Filterable[ServiceIface]](
methodName: String
)(
implicit builder: ServiceIfaceBuilder[ServiceIface]
): ServiceIface = {
val clientBuilder = new ClientServiceIfaceBuilder[ServiceIface](builder)
mb.configured(Thrift.param.ServiceClass(Option(clientBuilder.serviceClass)))
.newServicePerEndpoint(clientBuilder, methodName)
}
/**
* Construct a `ServicePerEndpoint` to be used for the `methodName` function.
*
* @param methodName used for scoping metrics (e.g. "clnt/your_client_label/method_name").
*/
def servicePerEndpoint[ServicePerEndpoint <: Filterable[ServicePerEndpoint]](
methodName: String
)(
implicit builder: ServicePerEndpointBuilder[ServicePerEndpoint]
): ServicePerEndpoint = {
val clientBuilder = new ClientServicePerEndpointBuilder[ServicePerEndpoint](builder)
mb.configured(Thrift.param.ServiceClass(Option(clientBuilder.serviceClass)))
.newServicePerEndpoint(clientBuilder, methodName).getServicePerEndpoint
}
/**
* Construct a `ServicePerEndpoint` to be used for the client.
*/
def servicePerEndpoint[ServicePerEndpoint <: Filterable[ServicePerEndpoint]](
implicit builder: ServicePerEndpointBuilder[ServicePerEndpoint]
): ServicePerEndpoint = {
val clientBuilder = new ClientServicePerEndpointBuilder[ServicePerEndpoint](builder)
mb.configured(Thrift.param.ServiceClass(Option(builder.serviceClass)))
.newServicePerEndpoint(clientBuilder).getServicePerEndpoint
}
/**
* Create a [[Service]] from the current configuration.
*
* @note It's very likely that you wanted/needed to use {{servicePerEndpoint}} instead.
*/
def newService(methodName: String): Service[ThriftClientRequest, Array[Byte]] =
mb.newService(methodName)
/**
* Create a [[Service]] from the current configuration.
*
* @note It's very likely that you wanted/needed to use {{servicePerEndpoint}} instead.
*/
def newService: Service[ThriftClientRequest, Array[Byte]] =
mb.newService
final private class ClientServiceIfaceBuilder[ServiceIface <: Filterable[ServiceIface]](
builder: ServiceIfaceBuilder[ServiceIface])
extends client.ServicePerEndpointBuilder[
ThriftClientRequest,
Array[Byte],
ServiceIface
] {
override def servicePerEndpoint(
service: => Service[ThriftClientRequest, Array[Byte]]
): ServiceIface = thriftMuxClient.newServiceIface(service, label)(builder)
override def serviceClass: Class[_] = builder.serviceClass
}
final private class ClientServicePerEndpointBuilder[
ServicePerEndpoint <: Filterable[ServicePerEndpoint]
](
builder: ServicePerEndpointBuilder[ServicePerEndpoint])
extends client.ServicePerEndpointBuilder[
ThriftClientRequest,
Array[Byte],
DelayedTypeAgnosticFilterable[ServicePerEndpoint]
] {
override def servicePerEndpoint(
service: => Service[ThriftClientRequest, Array[Byte]]
): DelayedTypeAgnosticFilterable[ServicePerEndpoint] = {
new DelayedTypeAgnosticFilterable(
thriftMuxClient.servicePerEndpoint(
new DelayedService(service),
label
)(builder)
)
}
override def serviceClass: Class[_] = builder.serviceClass
}
// used to delay creation of the Service until the first request
// as `mb.wrappedService` eagerly creates some metrics that are best
// avoided until the first request.
final private class DelayedService(service: => Service[ThriftClientRequest, Array[Byte]])
extends Service[ThriftClientRequest, Array[Byte]] {
private[this] lazy val svc: Service[ThriftClientRequest, Array[Byte]] =
service
def apply(request: ThriftClientRequest): Future[Array[Byte]] =
svc(request)
override def close(deadline: Time): Future[Unit] =
svc.close(deadline)
override def status: Status =
svc.status
}
// A filterable that wraps each filter with DelayedTypeAgnostic before applying
// it to the underlying servicePerEndpoint
final private class DelayedTypeAgnosticFilterable[T <: Filterable[T]](servicePerEndpoint: T)
extends Filterable[DelayedTypeAgnosticFilterable[T]] {
def getServicePerEndpoint: T = servicePerEndpoint
override def filtered(filter: Filter.TypeAgnostic): DelayedTypeAgnosticFilterable[T] =
new DelayedTypeAgnosticFilterable[T](
servicePerEndpoint.filtered(new DelayedTypeAgnostic(filter))
)
}
// used to delay creation of the Filters until the first request
// as `servicePerEndpoint.filtered` eagerly creates some metrics
// that are best avoided until the first request.
final private class DelayedTypeAgnostic(typeAgnostic: Filter.TypeAgnostic)
extends Filter.TypeAgnostic {
def toFilter[Req, Rep]: Filter[Req, Rep, Req, Rep] = new Filter[Req, Rep, Req, Rep] {
private lazy val filter: Filter[Req, Rep, Req, Rep] =
typeAgnostic.toFilter
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] =
filter(request, service)
}
}
override def toString: String = mb.toString
}
| twitter/finagle | finagle-thriftmux/src/main/scala/com/twitter/finagle/thriftmux/MethodBuilder.scala | Scala | apache-2.0 | 20,059 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.streaming.examples.complexdag
import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication
import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult
import org.apache.gearpump.cluster.{MasterHarness, TestUtil}
import org.scalatest._
import org.scalatest.prop.PropertyChecks
import scala.concurrent.Future
import scala.util.Success
class DagSpec extends PropSpec with PropertyChecks
with Matchers with BeforeAndAfterAll with MasterHarness {
override def beforeAll {
startActorSystem()
}
override def afterAll {
shutdownActorSystem()
}
protected override def config = TestUtil.DEFAULT_CONFIG
property("Dag should succeed to submit application with required arguments") {
val requiredArgs = Array.empty[String]
val masterReceiver = createMockMaster()
val args = requiredArgs
Future {
Dag.main(masterConfig, args)
}
masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME)
masterReceiver.reply(SubmitApplicationResult(Success(0)))
}
}
| manuzhang/incubator-gearpump | examples/streaming/complexdag/src/test/scala/org/apache/gearpump/streaming/examples/complexdag/DagSpec.scala | Scala | apache-2.0 | 1,867 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.util
import java.io.File
import scala.util.control.NonFatal
import sbt.io.{ Hash, IO }
import sjsonnew.{ Builder, DeserializationException, JsonFormat, Unbuilder, deserializationError }
import CacheImplicits.{ arrayFormat => _, _ }
import sbt.nio.file._
import sbt.nio.file.syntax._
sealed trait FileInfo { def file: File }
sealed trait HashFileInfo extends FileInfo {
@deprecated("Use hashArray instead", "1.3.0")
def hash: List[Byte] = hashArray.toList
private[util] def hashArray: Array[Byte]
}
sealed trait ModifiedFileInfo extends FileInfo { def lastModified: Long }
sealed trait PlainFileInfo extends FileInfo { def exists: Boolean }
sealed trait HashModifiedFileInfo extends HashFileInfo with ModifiedFileInfo
object HashFileInfo {
implicit val format: JsonFormat[HashFileInfo] = FileInfo.hash.format
}
object ModifiedFileInfo {
implicit val format: JsonFormat[ModifiedFileInfo] = FileInfo.lastModified.format
}
object PlainFileInfo {
implicit val format: JsonFormat[PlainFileInfo] = FileInfo.exists.format
}
object HashModifiedFileInfo {
implicit val format: JsonFormat[HashModifiedFileInfo] = FileInfo.full.format
}
private final case class PlainFile(file: File, exists: Boolean) extends PlainFileInfo
private final case class FileModified(file: File, lastModified: Long) extends ModifiedFileInfo
@deprecated("Kept for plugin compat, but will be removed in sbt 2.0", "1.3.0")
private final case class FileHash(file: File, override val hash: List[Byte]) extends HashFileInfo {
override val hashArray: Array[Byte] = hash.toArray
}
private final case class FileHashArrayRepr(file: File, override val hashArray: Array[Byte])
extends HashFileInfo {
override def hashCode(): Int = (file, java.util.Arrays.hashCode(hashArray)).hashCode()
override def equals(obj: Any): Boolean = obj match {
case that: FileHashArrayRepr =>
this.file == that.file && java.util.Arrays.equals(this.hashArray, that.hashArray)
case _ => false
}
}
@deprecated("Kept for plugin compat, but will be removed in sbt 2.0", "1.3.0")
private final case class FileHashModified(
file: File,
override val hash: List[Byte],
lastModified: Long
) extends HashModifiedFileInfo {
override val hashArray: Array[Byte] = hash.toArray
}
private final case class FileHashModifiedArrayRepr(
file: File,
override val hashArray: Array[Byte],
lastModified: Long
) extends HashModifiedFileInfo
final case class FilesInfo[F <: FileInfo] private (files: Set[F])
object FilesInfo {
def empty[F <: FileInfo]: FilesInfo[F] = FilesInfo(Set.empty[F])
implicit def format[F <: FileInfo: JsonFormat]: JsonFormat[FilesInfo[F]] =
projectFormat(_.files, (fs: Set[F]) => FilesInfo(fs))
def full: FileInfo.Style = FileInfo.full
def hash: FileInfo.Style = FileInfo.hash
def lastModified: FileInfo.Style = FileInfo.lastModified
def exists: FileInfo.Style = FileInfo.exists
}
object FileInfo {
/**
* Stores byte arrays as hex encoded strings, but falls back to reading an array of integers,
* which is how it used to be stored, if that fails.
*/
implicit val byteArrayFormat: JsonFormat[Array[Byte]] = new JsonFormat[Array[Byte]] {
override def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]): Array[Byte] = {
jsOpt match {
case Some(js) =>
try {
Hash.fromHex(unbuilder.readString(js))
} catch {
case _: DeserializationException =>
CacheImplicits.arrayFormat[Byte].read(jsOpt, unbuilder)
}
case None => Array.empty
}
}
override def write[J](obj: Array[Byte], builder: Builder[J]): Unit = {
builder.writeString(Hash.toHex(obj))
}
}
sealed trait Style {
type F <: FileInfo
implicit def format: JsonFormat[F]
implicit def formats: JsonFormat[FilesInfo[F]] =
projectFormat(_.files, (fs: Set[F]) => FilesInfo(fs))
def apply(file: File): F
def apply(files: Set[File]): FilesInfo[F] = FilesInfo(files map apply)
def unapply(info: F): File = info.file
def unapply(infos: FilesInfo[F]): Set[File] = infos.files map (_.file)
}
object full extends Style {
type F = HashModifiedFileInfo
implicit val format: JsonFormat[HashModifiedFileInfo] = new JsonFormat[HashModifiedFileInfo] {
def write[J](obj: HashModifiedFileInfo, builder: Builder[J]) = {
builder.beginObject()
builder.addField("file", obj.file)
builder.addField("hash", obj.hashArray)
builder.addField("lastModified", obj.lastModified)
builder.endObject()
}
def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]) = jsOpt match {
case Some(js) =>
unbuilder.beginObject(js)
val file = unbuilder.readField[File]("file")
val hash = unbuilder.readField[Array[Byte]]("hash")
val lastModified = unbuilder.readField[Long]("lastModified")
unbuilder.endObject()
FileHashModifiedArrayRepr(file, hash, lastModified)
case None => deserializationError("Expected JsObject but found None")
}
}
implicit def apply(file: File): HashModifiedFileInfo =
FileHashModifiedArrayRepr(file.getAbsoluteFile, Hash(file), IO.getModifiedTimeOrZero(file))
def apply(file: File, hash: Array[Byte], lastModified: Long): HashModifiedFileInfo =
FileHashModifiedArrayRepr(file.getAbsoluteFile, hash, lastModified)
}
object hash extends Style {
type F = HashFileInfo
implicit val format: JsonFormat[HashFileInfo] = new JsonFormat[HashFileInfo] {
def write[J](obj: HashFileInfo, builder: Builder[J]) = {
builder.beginObject()
builder.addField("file", obj.file)
builder.addField("hash", obj.hashArray)
builder.endObject()
}
def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]) = jsOpt match {
case Some(js) =>
unbuilder.beginObject(js)
val file = unbuilder.readField[File]("file")
val hash = unbuilder.readField[Array[Byte]]("hash")
unbuilder.endObject()
FileHashArrayRepr(file, hash)
case None => deserializationError("Expected JsObject but found None")
}
}
implicit def apply(file: File): HashFileInfo =
FileHashArrayRepr(file.getAbsoluteFile, computeHash(file))
def apply(file: File, bytes: Array[Byte]): HashFileInfo =
FileHashArrayRepr(file.getAbsoluteFile, bytes)
private def computeHash(file: File): Array[Byte] =
try Hash(file)
catch { case NonFatal(_) => Array.empty }
}
object lastModified extends Style {
type F = ModifiedFileInfo
implicit val format: JsonFormat[ModifiedFileInfo] = new JsonFormat[ModifiedFileInfo] {
override def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]): ModifiedFileInfo =
jsOpt match {
case Some(js) =>
unbuilder.beginObject(js)
val file = unbuilder.readField[File]("file")
val lastModified = unbuilder.readField[Long]("lastModified")
unbuilder.endObject()
FileModified(file, lastModified)
case None =>
deserializationError("Expected JsObject but found None")
}
override def write[J](obj: ModifiedFileInfo, builder: Builder[J]): Unit = {
builder.beginObject()
builder.addField("file", obj.file)
builder.addField("lastModified", obj.lastModified)
builder.endObject()
}
}
implicit def apply(file: File): ModifiedFileInfo =
FileModified(file.getAbsoluteFile, IO.getModifiedTimeOrZero(file))
def apply(file: File, lastModified: Long): ModifiedFileInfo =
FileModified(file.getAbsoluteFile, lastModified)
/**
* Returns an instance of [[FileModified]] where, for any directory, the maximum last
* modified time taken from its contents is used rather than the last modified time of the
* directory itself. The specific motivation was to prevent the doc task from re-running when
* the modified time changed for a directory classpath but none of the classfiles had changed.
*
* @param file the file or directory
* @return the [[FileModified]]
*/
private[sbt] def fileOrDirectoryMax(file: File): ModifiedFileInfo = {
val maxLastModified =
if (file.isDirectory) FileTreeView.default.list(file.toGlob / **).foldLeft(0L) {
case (max, (path, attributes)) =>
val lm = if (!attributes.isDirectory) IO.getModifiedTimeOrZero(path.toFile) else 0L
if (lm > max) lm else max
}
else IO.getModifiedTimeOrZero(file)
FileModified(file, maxLastModified)
}
}
object exists extends Style {
type F = PlainFileInfo
implicit val format: JsonFormat[PlainFileInfo] = new JsonFormat[PlainFileInfo] {
def write[J](obj: PlainFileInfo, builder: Builder[J]): Unit = {
builder.beginObject()
builder.addField("file", obj.file)
builder.addField("exists", obj.exists)
builder.endObject()
}
def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]) = jsOpt match {
case Some(js) =>
unbuilder.beginObject(js)
val file = unbuilder.readField[File]("file")
val exists = unbuilder.readField[Boolean]("exists")
unbuilder.endObject()
PlainFile(file, exists)
case None => deserializationError("Expected JsObject but found None")
}
}
implicit def apply(file: File): PlainFileInfo = {
val abs = file.getAbsoluteFile
PlainFile(abs, abs.exists)
}
def apply(file: File, exists: Boolean): PlainFileInfo = {
val abs = file.getAbsoluteFile
PlainFile(abs, exists)
}
}
}
| xuwei-k/xsbt | util-cache/src/main/scala/sbt/util/FileInfo.scala | Scala | apache-2.0 | 9,887 |
package org.scalaide.core.resources
object ScalaMarkers {
/**
* Represents the full error message found by the typechecker or by the
* builder. The full error message needs to be treated separately to a
* truncated error message because the Problems view in Eclipse can't handle
* multi line error messages.
*/
final val FullErrorMessage = "fullErrorMessage"
} | Kwestor/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/resources/ScalaMarkers.scala | Scala | bsd-3-clause | 381 |
package vexriscv.demo
import spinal.core._
import spinal.lib._
import spinal.lib.bus.avalon.AvalonMM
import spinal.lib.eda.altera.{InterruptReceiverTag, QSysify, ResetEmitterTag}
import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig}
import vexriscv.plugin._
import vexriscv.{VexRiscv, VexRiscvConfig, plugin}
/**
* Created by spinalvm on 14.07.17.
*/
//class VexRiscvAvalon(debugClockDomain : ClockDomain) extends Component{
//
//}
// make clean run DBUS=CACHED_WISHBONE IBUS=CACHED_WISHBONE MMU=no CSR=no DEBUG_PLUGIN=no
object VexRiscvCachedWishboneForSim{
def main(args: Array[String]) {
val report = SpinalVerilog{
//CPU configuration
val cpuConfig = VexRiscvConfig(
plugins = List(
// new IBusSimplePlugin(
// resetVector = 0x80000000l,
// prediction = STATIC
// ),
// new DBusSimplePlugin(
// catchAddressMisaligned = false,
// catchAccessFault = false
// ),
new IBusCachedPlugin(
resetVector = 0x80000000l,
prediction = STATIC,
config = InstructionCacheConfig(
cacheSize = 4096,
bytePerLine =32,
wayCount = 1,
addressWidth = 32,
cpuDataWidth = 32,
memDataWidth = 32,
catchIllegalAccess = true,
catchAccessFault = true,
asyncTagMemory = false,
twoCycleRam = true
)
// askMemoryTranslation = true,
// memoryTranslatorPortConfig = MemoryTranslatorPortConfig(
// portTlbSize = 4
// )
),
new DBusCachedPlugin(
config = new DataCacheConfig(
cacheSize = 4096,
bytePerLine = 32,
wayCount = 1,
addressWidth = 32,
cpuDataWidth = 32,
memDataWidth = 32,
catchAccessError = true,
catchIllegal = true,
catchUnaligned = true
),
dBusCmdMasterPipe = true, //required for wishbone
memoryTranslatorPortConfig = null
// memoryTranslatorPortConfig = MemoryTranslatorPortConfig(
// portTlbSize = 6
// )
),
new StaticMemoryTranslatorPlugin(
ioRange = _(31 downto 28) === 0xF
),
new DecoderSimplePlugin(
catchIllegalInstruction = true
),
new RegFilePlugin(
regFileReadyKind = plugin.SYNC,
zeroBoot = false
),
new IntAluPlugin,
new SrcPlugin(
separatedAddSub = false,
executeInsertion = true
),
new FullBarrelShifterPlugin,
new MulPlugin,
new DivPlugin,
new HazardSimplePlugin(
bypassExecute = true,
bypassMemory = true,
bypassWriteBack = true,
bypassWriteBackBuffer = true,
pessimisticUseSrc = false,
pessimisticWriteRegFile = false,
pessimisticAddressMatch = false
),
// new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))),
new BranchPlugin(
earlyBranch = false,
catchAddressMisaligned = true
),
new CsrPlugin(
config = CsrPluginConfig.small(mtvecInit = 0x80000020l)
),
new YamlPlugin("cpu0.yaml")
)
)
//CPU instanciation
val cpu = new VexRiscv(cpuConfig)
//CPU modifications to be an Avalon one
//cpu.setDefinitionName("VexRiscvAvalon")
cpu.rework {
for (plugin <- cpuConfig.plugins) plugin match {
case plugin: IBusSimplePlugin => {
plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus
master(plugin.iBus.toWishbone()).setName("iBusWishbone")
}
case plugin: IBusCachedPlugin => {
plugin.iBus.setAsDirectionLess()
master(plugin.iBus.toWishbone()).setName("iBusWishbone")
}
case plugin: DBusSimplePlugin => {
plugin.dBus.setAsDirectionLess()
master(plugin.dBus.toWishbone()).setName("dBusWishbone")
}
case plugin: DBusCachedPlugin => {
plugin.dBus.setAsDirectionLess()
master(plugin.dBus.toWishbone()).setName("dBusWishbone")
}
case _ =>
}
}
cpu
}
//Generate the QSys TCL script to integrate the CPU
QSysify(report.toplevel)
}
}
| SpinalHDL/VexRiscv | src/main/scala/vexriscv/demo/VexRiscvCachedWishboneForSim.scala | Scala | mit | 4,794 |
package com.hanhuy.android.protify
import android.app.Activity
import android.os.Bundle
class MainActivity extends Activity {
override def onCreate(savedInstanceState: Bundle) = {
super.onCreate(savedInstanceState)
setContentView(R.layout.main)
}
}
| tek/protify | android/src/main/scala/com/hanhuy/android/protify/MainActivity.scala | Scala | apache-2.0 | 275 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle
import java.lang.reflect.Modifier
import scala.collection.convert.WrapAsScala.asScalaSet
import scala.collection.convert.WrapAsScala.asScalaBuffer
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
import com.typesafe.config.ConfigFactory
import java.lang.reflect.Method
import com.google.common.reflect.ClassPath
// scalastyle:off multiple.string.literals
class ScalastyleDefinitionTest extends AssertionsForJUnit {
private def isChecker(ci: ClassPath.ClassInfo) = {
ci.getName().startsWith("org.scalastyle") && {
val clazz = ci.load
classOf[Checker[_]].isAssignableFrom(clazz) && !Modifier.isAbstract(clazz.getModifiers())
}
}
@Test
def checkAllCheckersInScalastyleDefinition(): Unit = {
val classLoader = this.getClass().getClassLoader()
val config = ConfigFactory.load()
val cp = ClassPath.from(this.getClass().getClassLoader())
val subTypes = asScalaBuffer(cp.getAllClasses().asList()).filter(isChecker)
val definition = ScalastyleDefinition.readFromXml(classLoader.getResourceAsStream("scalastyle_definition.xml"))
val messageHelper = new MessageHelper(config)
val checkers = definition.checkers
val missing = subTypes.map(_.getName()).diff(checkers.map(c => c.className))
assert(missing.isEmpty, "scalastyle_definition does not contain " + missing)
checkers.foreach { c =>
val checker = Class.forName(c.className).newInstance().asInstanceOf[Checker[_]]
val m = checker.getClass().getMethod("errorKey")
val errorKey = m.invoke(checker)
assert(errorKey == c.id, "errorKey and id do not match for " + c.className)
}
}
}
| firebase/scalastyle | src/test/scala/org/scalastyle/ScalastyleDefinitionTest.scala | Scala | apache-2.0 | 2,408 |
package com.telegram.api
/**
* As of September 18, 2015
*
* Bots can now download files and media sent by users.
* Added getFile(https://core.telegram.org/bots/api#getFile) and File(https://core.telegram.org/bots/api#file).
*
* This object represents a file ready to be downloaded. The file can be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>. It is
* guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile.
* Important: Maximum file size to download is 20 MB
*
* @param fileId Unique identifier for this file
* @param fileSize Optional File size, if known
* @param filePath Optional File path. Use https://api.telegram.org/file/bot<token>/<file_path> to get the file.
*/
case class TelegramFile(
fileId : String,
fileSize : Option[Int],
filePath : Option[String]
) | rub0/tbot | src/main/scala/com/telegram/api/TelegramFile.scala | Scala | gpl-3.0 | 1,003 |
package rocks.muki.graphql.schema
sealed trait SchemaFilterName {
def name: String
}
/*
* The set of filters available to be used in conjunction with `renderPretty` && `renderCompact`
* These should follow the `SchemaFilter`(s) available in sangria, defined here:
* https://github.com/sangria-graphql/sangria/blob/343d7a59eeb9392573751306f2b485bca2bee75f/src/main/scala/sangria/renderer/SchemaRenderer.scala#L298-L323
*/
object SchemaFilters {
case object WithoutSangriaBuiltIn extends SchemaFilterName {
val name = "sangria.renderer.SchemaFilter.withoutSangriaBuiltIn"
}
case object WithoutGraphQLBuiltIn extends SchemaFilterName {
val name = "sangria.renderer.SchemaFilter.withoutGraphQLBuiltIn"
}
case object WithoutIntrospection extends SchemaFilterName {
val name = "sangria.renderer.SchemaFilter.withoutIntrospection"
}
case object BuiltIn extends SchemaFilterName {
val name = "sangria.renderer.SchemaFilter.builtIn"
}
case object Introspection extends SchemaFilterName {
val name = "sangria.renderer.SchemaFilter.introspection"
}
case object All extends SchemaFilterName {
val name = "sangria.renderer.SchemaFilter.all"
}
}
| muuki88/sbt-graphql | src/main/scala/rocks/muki/graphql/schema/SchemaFilters.scala | Scala | apache-2.0 | 1,188 |
package eventstore
import akka.actor.Status.Failure
import akka.stream.actor.ActorPublisherMessage.{ Cancel, Request }
import akka.stream.actor.ActorSubscriberMessage.{ OnError, OnComplete, OnNext }
import akka.stream.actor.{ ActorPublisher, ActorSubscriber }
import eventstore.ReadDirection.Forward
import scala.concurrent.duration._
class AllStreamsPublisherSpec extends AbstractSubscriptionActorSpec {
"AllStreamsPublisher" should {
"read events from given position" in new SubscriptionScope {
connection expectMsg readEvents(123)
override def position = Some(Position(123))
}
"read events from start if no position given" in new SubscriptionScope {
connection expectMsg readEvents(0)
}
"subscribe if last position given" in new SubscriptionScope {
connection expectMsg subscribeTo
actor ! subscribeCompleted(0)
connection.expectNoMsg(duration)
actor ! StreamEventAppeared(event1)
actor ! StreamEventAppeared(event0)
actor ! StreamEventAppeared(event2)
expectEvent(event1)
expectEvent(event2)
override def position = Some(Position.Last)
}
"ignore read events with position out of interest" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 3, event0, event1, event2)
expectEvent(event0)
expectEvent(event1)
expectEvent(event2)
connection expectMsg readEvents(3)
actor ! readCompleted(3, 5, event0, event1, event2, event3, event4)
expectEvent(event3)
expectEvent(event4)
connection expectMsg readEvents(5)
actor ! readCompleted(3, 5, event0, event1, event2, event3, event4)
expectNoMsg(duration)
connection expectMsg readEvents(5)
}
"ignore read events with position out of interest when start position is given" in new SubscriptionScope {
connection expectMsg readEvents(1)
actor ! readCompleted(0, 3, event0, event1, event2)
expectEvent(event2)
expectNoMsg(duration)
connection expectMsg readEvents(3)
override def position = Some(Position(1))
}
"read events until none left and subscribe to new ones" in new SubscriptionScope {
connection expectMsg readEvents(0)
val nextPosition = 2
actor ! readCompleted(1, nextPosition, event1)
expectEvent(event1)
connection expectMsg readEvents(nextPosition)
actor ! readCompleted(nextPosition, nextPosition)
connection.expectMsg(subscribeTo)
}
"subscribe to new events if nothing to read" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection.expectMsg(subscribeTo)
actor ! subscribeCompleted(1)
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
}
"stop reading events as soon as stop received" in new SubscriptionScope {
connection expectMsg readEvents(0)
system stop actor
expectMsg(OnComplete)
expectTerminated(actor)
}
"catch events that appear in between reading and subscribing" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 2, event0, event1)
expectEvent(event0)
expectEvent(event1)
connection expectMsg readEvents(2)
actor ! readCompleted(2, 2)
expectNoMsg(duration)
connection.expectMsg(subscribeTo)
actor ! subscribeCompleted(4)
connection expectMsg readEvents(2)
actor ! StreamEventAppeared(event2)
actor ! StreamEventAppeared(event3)
actor ! StreamEventAppeared(event4)
expectNoMsg(duration)
actor ! readCompleted(2, 3, event1, event2)
expectEvent(event2)
connection expectMsg readEvents(3)
actor ! StreamEventAppeared(event5)
actor ! StreamEventAppeared(event6)
expectNoMsg(duration)
actor ! readCompleted(3, 6, event3, event4, event5)
expectEvent(event3)
expectEvent(event4)
expectEvent(event5)
expectEvent(event6)
actor ! StreamEventAppeared(event5)
actor ! StreamEventAppeared(event6)
expectNoActivity()
}
"stop subscribing if stop received when subscription not yet confirmed" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection.expectMsg(subscribeTo)
system stop actor
expectMsg(OnComplete)
expectTerminated(actor)
}
"not unsubscribe if subscription failed if stop received " in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection.expectMsg(subscribeTo)
system stop actor
expectMsg(OnComplete)
expectTerminated(actor)
}
"stop catching events that appear in between reading and subscribing if stop received" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 2, event0, event1)
expectEvent(event0)
expectEvent(event1)
connection expectMsg readEvents(2)
actor ! readCompleted(2, 2)
expectNoMsg(duration)
connection.expectMsg(subscribeTo)
actor ! subscribeCompleted(5)
connection expectMsg readEvents(2)
actor ! StreamEventAppeared(event3)
actor ! StreamEventAppeared(event4)
system stop actor
expectMsg(OnComplete)
expectTerminated(actor)
}
"continue with subscription if no events appear in between reading and subscribing" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection.expectMsg(subscribeTo)
expectNoMsg(duration)
actor ! subscribeCompleted(1)
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
expectNoActivity()
}
"continue with subscription if no events appear in between reading and subscribing and position is given" in
new SubscriptionScope {
connection expectMsg readEvents(1)
actor ! readCompleted(1, 1)
connection.expectMsg(subscribeTo)
expectNoMsg(duration)
actor ! subscribeCompleted(1)
expectNoActivity()
override def position = Some(Position(1))
}
"forward events while subscribed" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection.expectMsg(subscribeTo)
expectNoMsg(duration)
actor ! subscribeCompleted(1)
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
actor ! StreamEventAppeared(event1)
expectEvent(event1)
expectNoMsg(duration)
actor ! StreamEventAppeared(event2)
actor ! StreamEventAppeared(event3)
expectEvent(event2)
expectEvent(event3)
}
"ignore wrong events while subscribed" in new SubscriptionScope {
connection expectMsg readEvents(1)
actor ! readCompleted(1, 1)
connection.expectMsg(subscribeTo)
actor ! subscribeCompleted(2)
connection expectMsg readEvents(1)
actor ! readCompleted(1, 1)
actor ! StreamEventAppeared(event0)
actor ! StreamEventAppeared(event1)
actor ! StreamEventAppeared(event1)
actor ! StreamEventAppeared(event2)
expectEvent(event2)
actor ! StreamEventAppeared(event2)
actor ! StreamEventAppeared(event1)
actor ! StreamEventAppeared(event3)
expectEvent(event3)
actor ! StreamEventAppeared(event5)
expectEvent(event5)
actor ! StreamEventAppeared(event4)
expectNoMsg(duration)
override def position = Some(Position(1))
}
"stop subscription when stop received" in new SubscriptionScope {
connection expectMsg readEvents(1)
actor ! readCompleted(1, 1)
connection.expectMsg(subscribeTo)
actor ! subscribeCompleted(1)
actor ! StreamEventAppeared(event2)
expectEvent(event2)
system stop actor
expectMsg(OnComplete)
expectTerminated(actor)
override def position = Some(Position(1))
}
"stop actor if connection stopped" in new SubscriptionScope {
connection expectMsg readEvents(0)
system stop connection.ref
expectMsg(OnComplete)
expectTerminated(actor)
}
"not stop subscription if actor stopped and not yet subscribed" in new SubscriptionScope {
connection expectMsg readEvents(0)
system stop actor
expectMsg(OnComplete)
expectTerminated(actor)
}
"stop actor if error while reading" in new SubscriptionScope {
connection expectMsg readEvents(0)
expectTerminatedOnFailure()
}
"stop actor if error while subscribing" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo
expectTerminatedOnFailure()
override def position = Some(Position(0))
}
"stop actor if error while catching up" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo
actor ! subscribeCompleted(0)
connection expectMsg readEvents(0)
expectTerminatedOnFailure()
}
"stop actor if error while live processing" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo
actor ! subscribeCompleted(0)
expectTerminatedOnFailure()
override def position = Some(Position(0))
}
"resubscribe from same position" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo
actor ! subscribeCompleted(0)
actor ! subscribeCompleted(0)
expectNoActivity()
override def position = Some(Position(0))
}
"resubscribe from different position" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo
actor ! subscribeCompleted(1)
connection expectMsg readEvents(0)
actor ! StreamEventAppeared(event1)
actor ! StreamEventAppeared(event2)
actor ! readCompleted(0, 3, event0, event1, event2)
expectEvent(event1)
expectEvent(event2)
override def position = Some(Position(0))
}
"ignore resubscribed while catching up" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo
actor ! subscribeCompleted(0)
connection expectMsg readEvents(0)
actor ! StreamEventAppeared(event1)
actor ! StreamEventAppeared(event2)
actor ! StreamEventAppeared(event3)
actor ! subscribeCompleted(1)
actor ! StreamEventAppeared(event1)
actor ! StreamEventAppeared(event2)
actor ! StreamEventAppeared(event3)
actor ! readCompleted(0, 3, event0, event1, event2)
expectEvent(event0)
expectEvent(event1)
expectEvent(event2)
}
"use credentials if given" in new SubscriptionScope {
connection expectMsg readEvents(0).withCredentials(credentials.get)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo.withCredentials(credentials.get)
override def credentials = Some(UserCredentials("login", "password"))
}
"cancel while reading" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! Cancel
expectTerminated(actor)
}
"cancel while subscribing" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo
actor ! Cancel
expectTerminated(actor)
}
"cancel while subscribed" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo
actor ! subscribeCompleted(0)
actor ! Cancel
expectTerminated(actor)
}
"cancel while catching up" in new SubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
connection expectMsg subscribeTo
actor ! subscribeCompleted(1)
connection expectMsg readEvents(0)
actor ! Cancel
expectTerminated(actor)
}
}
"AllStreamsPublisher finite" should {
"stop immediately if last position passed" in new FiniteSubscriptionScope {
connection.expectNoMsg(duration)
expectMsg(OnComplete)
expectTerminated(actor)
override def position = Some(Position.Last)
}
"stop when no more events left" in new FiniteSubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 0)
expectMsg(OnComplete)
expectTerminated(actor)
}
"stop when retrieved last event" in new FiniteSubscriptionScope {
connection expectMsg readEvents(0)
actor ! readCompleted(0, 2, event0, event1)
expectEvent(event0)
expectEvent(event1)
connection expectMsg readEvents(2)
actor ! readCompleted(2, 2)
expectMsg(OnComplete)
expectTerminated(actor)
}
}
private trait SubscriptionScope extends AbstractScope {
def createActor() = {
val props = AllStreamsPublisher.props(
connection = connection.ref,
fromPositionExclusive = position,
resolveLinkTos = resolveLinkTos,
credentials = credentials,
readBatchSize = readBatchSize,
infinite = infinite)
val actor = system actorOf props
val publisher = ActorPublisher[IndexedEvent](actor)
val subscriber = ActorSubscriber[IndexedEvent](testActor)
publisher subscribe subscriber
expectMsgType[Any].getClass.getName shouldEqual "akka.stream.actor.ActorSubscriber$OnSubscribe"
actor
}
lazy val streamId = EventStream.All
val event0 = newEvent(0)
val event1 = newEvent(1)
val event2 = newEvent(2)
val event3 = newEvent(3)
val event4 = newEvent(4)
val event5 = newEvent(5)
val event6 = newEvent(6)
def infinite = true
def expectEvent(x: IndexedEvent) = {
actor ! Request(1)
expectMsg(OnNext(x))
}
def newEvent(x: Long) = IndexedEvent(mock[Event], Position.Exact(x))
def readEvents(x: Long) = ReadAllEvents(Position(x), readBatchSize, Forward, resolveLinkTos = resolveLinkTos)
def readCompleted(position: Long, next: Long, events: IndexedEvent*) =
ReadAllEventsCompleted(events.toList, Position.Exact(position), Position.Exact(next), Forward)
def position: Option[Position] = None
def subscribeCompleted(lastCommit: Long) = SubscribeToAllCompleted(lastCommit)
override def expectTerminatedOnFailure() = {
val failure = new ServerErrorException("test")
actor ! Failure(failure)
expectMsg(OnError(failure))
expectTerminated(actor)
val duration = 1.seconds
expectNoMsg(duration)
connection.expectNoMsg(duration)
}
}
private trait FiniteSubscriptionScope extends SubscriptionScope {
override def infinite = false
}
}
| pawelkaczor/EventStore.JVM | src/test/scala/eventstore/AllStreamsPublisherSpec.scala | Scala | bsd-3-clause | 15,186 |
package wandou.math.algebra
/**
* The basic interface including numerous convenience functions <p/> NOTE: All implementing classes must have a
* constructor that takes an int for cardinality and a no-arg constructor that can be used for marshalling the Writable
* instance <p/> NOTE: Implementations may choose to reuse the Vector.Element in the Iterable methods
*/
trait Vector extends Iterable[Vector.Element] with Cloneable {
type Element = Vector.Element
/**
* @return a formatted String suitable for output
*/
def asFormatString: String
/**
* Assign the value to all elements of the receiver
*
* @param value a Double value
* @return the modified receiver
*/
def assign(value: Double): Vector
/**
* Assign the values to the receiver
*
* @param values a Double[] of values
* @return the modified receiver
* @throws CardinalityException if the cardinalities differ
*/
def assign(values: Array[Double]): Vector
/**
* Assign the other vector values to the receiver
*
* @param other a Vector
* @return the modified receiver
* @throws CardinalityException if the cardinalities differ
*/
def assign(other: Vector): Vector
/**
* Apply the function to each element of the receiver
*
* @param function a Double => Double to apply
* @return the modified receiver
*/
def assign(function: Double => Double): Vector
/**
* Apply the function to each element of the receiver and the corresponding element of the other argument
*
* @param other a Vector containing the second arguments to the function
* @param function a (Double, Double) => Double to apply
* @return the modified receiver
* @throws CardinalityException if the cardinalities differ
*/
def assign(other: Vector, function: (Double, Double) => Double): Vector
/**
* Apply the function to each element of the receiver, using the y value as the second argument of the (Double, Double) => Double
*
* @param f a (Double, Double) => Double to be applied
* @param y a Double value to be argument to the function
* @return the modified receiver
*/
def assign(f: (Double, Double) => Double, y: Double): Vector
/**
* Return the cardinality of the recipient (the maximum number of values)
*
* @return an int
*/
def size: Int
/**
* @return true iff this implementation should be considered dense -- that it explicitly
* represents every value
*/
def isDense: Boolean
/**
* @return true iff this implementation should be considered to be iterable in index order in an efficient way.
* In particular this implies that {@link #iterator()} and {@link #iterateNonZero()} return elements
* in ascending order by index.
*/
def isSequentialAccess: Boolean
/**
* Return a copy of the recipient
*
* @return a new Vector
*/
override def clone: Vector = {
// Scala's compiler seems to complain that the clone method is the protected
// one from Object instead of this overrided one when it's called outside the
// protected scope. For instance:
// method clone in class Object cannot be accessed in ....
// Access to protected method clone not permitted because
// To bypass it, we need to implement it with following statement
throw new CloneNotSupportedException
}
/**
* Iterates over all elements <p/> * NOTE: Implementations may choose to reuse the Element returned for performance
* reasons, so if you need a copy of it, you should call {@link #getElement(int)} for the given index
*
* @return An {@link Iterator} over all elements
*/
def iterator: Iterator[Element]
/**
* Iterates over all non-zero elements. <p/> NOTE: Implementations may choose to reuse the Element returned for
* performance reasons, so if you need a copy of it, you should call {@link #getElement(int)} for the given index
*
* @return An {@link Iterator} over all non-zero elements
*/
def iterateNonZero: Iterator[Element]
/**
* Return an object of Vector.Element representing an element of this Vector. Useful when designing new iterator
* types.
*
* @param index Index of the Vector.Element required
* @return The Vector.Element Object
*/
def getElement(index: Int): Element
/**
* Return a new vector containing the values of the recipient divided by the argument
*
* @param x a Double value
* @return a new Vector
*/
def divide(x: Double): Vector
/**
* Return the dot product of the recipient and the argument
*
* @param x a Vector
* @return a new Vector
* @throws CardinalityException if the cardinalities differ
*/
def dot(x: Vector): Double
/**
* Return the value at the given index
*
* @param index an int index
* @return the Double at the index
* @throws IndexException if the index is out of bounds
*/
def get(index: Int): Double
/**
* Return the value at the given index, without checking bounds
*
* @param index an int index
* @return the Double at the index
*/
def apply(index: Int): Double
/**
* Return an empty vector of the same underlying class as the receiver
*
* @return a Vector
*/
def like(): Vector
/**
* Return a new vector containing the element by element difference of the recipient and the argument
*
* @param x a Vector
* @return a new Vector
* @throws CardinalityException if the cardinalities differ
*/
def minus(x: Vector): Vector
/**
* Return a new vector containing the normalized (L_2 norm) values of the recipient
*
* @return a new Vector
*/
def normalize: Vector
/**
* Return a new Vector containing the normalized (L_power norm) values of the recipient. <p/> See
* http://en.wikipedia.org/wiki/Lp_space <p/> Technically, when 0 < power < 1, we don't have a norm, just a metric,
* but we'll overload this here. <p/> Also supports power == 0 (number of non-zero elements) and power = {@link
* Double#POSITIVE_INFINITY} (max element). Again, see the Wikipedia page for more info
*
* @param power The power to use. Must be >= 0. May also be {@link Double#POSITIVE_INFINITY}. See the Wikipedia link
* for more on this.
* @return a new Vector x such that norm(x, power) == 1
*/
def normalize(power: Double): Vector
/**
* Return a new vector containing the log(1 + entry)/ L_2 norm values of the recipient
*
* @return a new Vector
*/
def logNormalize: Vector
/**
* Return a new Vector with a normalized value calculated as log_power(1 + entry)/ L_power norm. <p/>
*
* @param power The power to use. Must be > 1. Cannot be {@link Double#POSITIVE_INFINITY}.
* @return a new Vector
*/
def logNormalize(power: Double): Vector
/**
* Return the k-norm of the vector. <p/> See http://en.wikipedia.org/wiki/Lp_space <p/> Technically, when 0 > power
* < 1, we don't have a norm, just a metric, but we'll overload this here. Also supports power == 0 (number of
* non-zero elements) and power = {@link Double#POSITIVE_INFINITY} (max element). Again, see the Wikipedia page for
* more info.
*
* @param power The power to use.
* @see #normalize(Double)
*/
def norm(power: Double): Double
/** @return The minimum value in the Vector */
def minValue: Double
/** @return The index of the minimum value */
def minValueIndex: Int
/** @return The maximum value in the Vector */
def maxValue: Double
/** @return The index of the maximum value */
def maxValueIndex: Int
/**
* Return a new vector containing the sum of each value of the recipient and the argument
*
* @param x a Double
* @return a new Vector
*/
def plus(x: Double): Vector
/**
* Return a new vector containing the element by element sum of the recipient and the argument
*
* @param x a Vector
* @return a new Vector
* @throws CardinalityException if the cardinalities differ
*/
def plus(x: Vector): Vector
/**
* Set the value at the given index
*
* @param index an int index into the receiver
* @param value a Double value to set
* @throws IndexException if the index is out of bounds
*/
def set(index: Int, value: Double)
/**
* Set the value at the given index, without checking bounds
*
* @param index an int index into the receiver
* @param value a Double value to set
*/
def update(index: Int, value: Double)
/**
* Return the number of values in the recipient which are not the default value. For instance, for a
* sparse vector, this would be the number of non-zero values.
*
* @return an int
*/
def getNumNondefaultElements: Int
/**
* Return a new vector containing the product of each value of the recipient and the argument
*
* @param x a Double argument
* @return a new Vector
*/
def times(x: Double): Vector
/**
* Return a new vector containing the element-wise product of the recipient and the argument
*
* @param x a Vector argument
* @return a new Vector
* @throws CardinalityException if the cardinalities differ
*/
def times(x: Vector): Vector
/**
* Return a new vector containing the subset of the recipient
*
* @param offset an int offset into the receiver
* @param length the cardinality of the desired result
* @return a new Vector
* @throws CardinalityException if the length is greater than the cardinality of the receiver
* @throws IndexException if the offset is negative or the offset+length is outside of the receiver
*/
def viewPart(offset: Int, length: Int): Vector
/**
* Return the sum of all the elements of the receiver
*
* @return a Double
*/
def zSum: Double
/**
* Return the cross product of the receiver and the other vector
*
* @param other another Vector
* @return a Matrix
*/
def cross(other: Vector): Matrix
/*
* Need stories for these but keeping them here for now.
*/
// void getNonZeros(IntArrayList jx, DoubleArrayList values)
// void foreachNonZero(IntDoubleFunction f)
// (Double, Double) => Double map)
// NewVector assign(Vector y, (Double, Double) => Double function, IntArrayList
// nonZeroIndexes)
/**
* Examples speak louder than words: aggregate(plus, pow(2)) is another way to say
* getLengthSquared(), aggregate(max, abs) is norm(Double.POSITIVE_INFINITY). To sum all of the postive values,
* aggregate(plus, max(0)).
* @param aggregator used to combine the current value of the aggregation with the result of map.apply(nextValue)
* @param map a function to apply to each element of the vector in turn before passing to the aggregator
* @return the final aggregation
*/
def aggregate(aggregator: (Double, Double) => Double, map: Double => Double): Double
/**
* <p>Generalized inner product - take two vectors, iterate over them both, using the combiner to combine together
* (and possibly map in some way) each pair of values, which are then aggregated with the previous accumulated
* value in the combiner.</p>
* <p>
* Example: dot(other) could be expressed as aggregate(other, Plus, Times), and kernelized inner products (which
* are symmetric on the indices) work similarly.
* @param other a vector to aggregate in combination with
* @param aggregator
* @param combiner
* @return the final aggregation
*/
def aggregate(other: Vector, aggregator: (Double, Double) => Double, combiner: (Double, Double) => Double): Double
/** Return the sum of squares of all elements in the vector. Square root of this value is the length of the vector. */
def getLengthSquared: Double
/** Get the square of the distance between this vector and the other vector. */
def getDistanceSquared(v: Vector): Double
}
object Vector {
/**
* A holder for information about a specific item in the Vector. <p/> When using with an Iterator, the implementation
* may choose to reuse this element, so you may need to make a copy if you want to keep it
*/
trait Element {
/** @return the value of this vector element. */
def get: Double
/** @return the index of this vector element. */
def index: Int
/** @param value Set the current element to value. */
def set(value: Double)
}
} | wandoulabs/wandou-math | wandou-math/src/main/scala/wandou/math/algebra/Vector.scala | Scala | apache-2.0 | 12,350 |
package rpm4s
import org.scalatest._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import rpm4s.data.{Lead, OS, RPMType}
import rpm4s.codecs._
class LeadSpec
extends AnyFlatSpec
with Matchers
with ScalaCheckPropertyChecks
with CustomMatchers {
"Lead" should "roundtrip" in {
val value =
Lead(3, 0, RPMType.Binary, 1, "kernel-default-4.8.12-1.1", OS.Linux, 5)
roundtrip(value)
}
}
| lucidd/rpm4s | jvm/src/test/scala/rpm4s/LeadSpec.scala | Scala | mit | 521 |
package org.broadinstitute.dsde.firecloud.model
import org.broadinstitute.dsde.firecloud.HealthChecks.termsOfServiceUrl
import org.broadinstitute.dsde.firecloud.mock.MockUtils
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
class ProfileSpec extends AnyFreeSpec with Matchers {
val randomString = MockUtils.randomAlpha()
"Profile" - {
"Correctly formed profiles" - {
"BasicProfile with well-formed contact email is valid" in {
val basicProfile = BasicProfile(
firstName = randomString,
lastName = randomString,
title = randomString,
contactEmail = Some("me@abc.com"),
institute = randomString,
institutionalProgram = randomString,
programLocationCity = randomString,
programLocationState = randomString,
programLocationCountry = randomString,
pi = randomString,
nonProfitStatus = randomString,
termsOfService = Some(termsOfServiceUrl)
)
basicProfile shouldNot be(null)
}
"Profile with blank contact email is valid" in {
val profile = Profile(
firstName = randomString,
lastName = randomString,
title = randomString,
contactEmail = Some(""),
institute = randomString,
institutionalProgram = randomString,
programLocationCity = randomString,
programLocationState = randomString,
programLocationCountry = randomString,
pi = randomString,
nonProfitStatus = randomString
)
profile shouldNot be(null)
}
"Profile with empty contact email is valid" in {
val profile = Profile(
firstName = randomString,
lastName = randomString,
title = randomString,
contactEmail = Option.empty,
institute = randomString,
institutionalProgram = randomString,
programLocationCity = randomString,
programLocationState = randomString,
programLocationCountry = randomString,
pi = randomString,
nonProfitStatus = randomString
)
profile shouldNot be(null)
}
"Profile with contact email containing '+' is valid" in {
val profile = Profile(
firstName = randomString,
lastName = randomString,
title = randomString,
contactEmail = Some("a-z+a.b-x+y.z@gmail.com"),
institute = randomString,
institutionalProgram = randomString,
programLocationCity = randomString,
programLocationState = randomString,
programLocationCountry = randomString,
pi = randomString,
nonProfitStatus = randomString
)
profile shouldNot be(null)
}
}
"Incorrectly formed profiles" - {
"BasicProfile with blank required info is invalid" in {
val ex = intercept[IllegalArgumentException]{
BasicProfile(
firstName = "",
lastName = "",
title = "",
contactEmail = None,
institute = "",
institutionalProgram = "",
programLocationCity = "",
programLocationState = "",
programLocationCountry = "",
pi = "",
nonProfitStatus = "",
None
)
}
ex shouldNot be(null)
}
"Profile with invalid contact email is invalid" in {
val ex = intercept[IllegalArgumentException]{
Profile(
firstName = randomString,
lastName = randomString,
title = randomString,
contactEmail = Some("invalid contact email address"),
institute = randomString,
institutionalProgram = randomString,
programLocationCity = randomString,
programLocationState = randomString,
programLocationCountry = randomString,
pi = randomString,
nonProfitStatus = randomString
)
}
ex shouldNot be(null)
}
}
}
"ProfileUtils" - {
val pw = ProfileWrapper("123", List(
FireCloudKeyValue(Some("imastring"), Some("hello")),
FireCloudKeyValue(Some("imalong"), Some("1556724034")),
FireCloudKeyValue(Some("imnotalong"), Some("not-a-long")),
FireCloudKeyValue(Some("imnothing"), None)
))
"getString" - {
"returns None if key doesn't exist" - {
val targetKey = "nonexistent"
// assert key does not exist in sample data
pw.keyValuePairs.find(_.key.contains(targetKey)) shouldBe None
// and therefore getString returns None
val actual = ProfileUtils.getString(targetKey, pw)
actual shouldBe None
}
"returns None if key exists but value doesn't" - {
val targetKey = "imnothing"
// assert key exists in sample data with no value
val targetKV = pw.keyValuePairs.find(_.key.contains(targetKey))
targetKV.isDefined shouldBe true
targetKV.get.value shouldBe None
val actual = ProfileUtils.getString(targetKey, pw)
actual shouldBe None
}
"returns Some(String) if key and value exist" - {
val targetKey = "imastring"
val actual = ProfileUtils.getString(targetKey, pw)
actual shouldBe Some("hello")
}
}
"getLong" - {
"returns None if key doesn't exist" - {
val targetKey = "nonexistent"
// assert key does not exist in sample data
pw.keyValuePairs.find(_.key.contains(targetKey)) shouldBe None
// and therefore getString returns None
val actual = ProfileUtils.getLong(targetKey, pw)
actual shouldBe None
}
"returns None if key exists but value doesn't" - {
val targetKey = "imnothing"
// assert key exists in sample data with no value
val targetKV = pw.keyValuePairs.find(_.key.contains(targetKey))
targetKV.isDefined shouldBe true
targetKV.get.value shouldBe None
val actual = ProfileUtils.getLong(targetKey, pw)
actual shouldBe None
}
"returns None if key and value exist but value is not a Long" - {
val targetKey = "imnotalong"
// assert the key exists
ProfileUtils.getString(targetKey, pw) shouldBe Some("not-a-long")
// but can't be parsed as a Long
val actual = ProfileUtils.getLong(targetKey, pw)
actual shouldBe None
}
"returns Some(Long) if key and value exist and value is Long-able" - {
val targetKey = "imalong"
val actual = ProfileUtils.getLong(targetKey, pw)
actual shouldBe Some(1556724034L)
}
}
}
}
| broadinstitute/firecloud-orchestration | src/test/scala/org/broadinstitute/dsde/firecloud/model/ProfileSpec.scala | Scala | bsd-3-clause | 6,766 |
package streamz.example
import akka.actor.ActorSystem
import scalaz.concurrent.Task
import scalaz.stream.Process
import streamz.akka.camel._
object CamelExample {
implicit val system = ActorSystem("example")
val p: Process[Task,Unit] =
// receive from endpoint
receive[String]("seda:q1")
// in-only message exchange with endpoint and continue stream with in-message
.sendW("seda:q3")
// in-only message exchange with endpoint and continue stream with out-message
.request[Int]("bean:service?method=length")
// in-only message exchange with endpoint
.send("seda:q2")
// create concurrent task from process
val t: Task[Unit] = p.run
// run task (side effects only here) ...
t.run
val p1: Process[Task,String] = receive[String]("seda:q1")
val p2: Process[Task,Unit] = p1.send("seda:q2")
val p3: Process[Task,String] = p1.sendW("seda:q3")
val p4: Process[Task,Int] = p1.request[Int]("bean:service?method=length")
}
| Astrac/streamz | streamz-akka-camel/src/test/scala/streamz/example/CamelExample.scala | Scala | apache-2.0 | 969 |
package contents
import random._
import insult._
abstract class Contents {
val odds = set
def set: Odds[Seq[ItemSet]]
def get = odds.get
def seq(items: ItemSet*) = items
}
object NounPhraseContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(Noun))
.add(1, seq(AdjectivePhrase, Conjonction(" "), Noun))
.add(1, seq(NounLeft, Conjonction(" "), Noun))
}
}
object NounLeftContents extends Contents {
override def set = {
OddBuilder
.add(2, seq(Noun))
.add(1, seq(AdjectivePhrase, Conjonction(" "), Noun))
}
}
object NounContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(NounThing))
.add(1, seq(NounActivity))
.add(2, seq(NounThing, NounActivityGeneric))
}
}
object AdjectivePhraseContents extends Contents {
override def set = {
OddBuilder
.add(3, seq(AdjectiveStar))
.add(1, seq(AdverbStar, Conjonction(" "), AdjectiveStar))
.add(2, seq(AdjectiveStar, Conjonction(" "), AdjectiveStar))
}
}
object AdverbStarContents extends Contents {
override def set = {
OddBuilder
.add(4, seq(Adverb))
.add(1, seq(NounThing, AdverbGeneric))
}
}
object AdjectiveStarContents extends Contents {
override def set = {
OddBuilder
.add(2, seq(Adjective))
.add(1, seq(NounThing, AdjectiveGeneric))
}
}
object SentenceContents extends Contents {
override def set = {
OddBuilder
.add(4, seq(Conjonction("you "), NounPhrase))
.add(2, seq(Participle, Conjonction(" "), NounPhrase))
.add(2, seq(Conjonction("you "), NounPhrase, Conjonction(" coupled with a "), NounPhrase))
.add(1, seq(Participle, Conjonction(" "), NounPhrase, Conjonction(" coupled with a "), NounPhrase))
.add(2, seq(NounPhrase, Preposition))
// .add(1, seq(Special))
}
}
/* object NounPhraseContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(NounStar))
.add(0, seq(AdjectivePhrase, Conjonction(" "), NounRight))
.add(0, seq(ParticipialPhrase, Conjonction(" "), NounRight))
.add(0, seq(NounLeft, Conjonction(" "), NounRight))
.add(0, seq(NounLeft, Preposition))
}
}
object NounLeftContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(NounStar))
.add(1, seq(AdjectivePhrase, Conjonction(" "), NounStar))
.add(1, seq(ParticipialPhrase, Conjonction(" "), NounStar))
.add(1, seq(NounLeft, Conjonction(" "), NounStar))
}
}
object NounRightContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(NounStar))
.add(1, seq(Noun, Preposition))
.add(1, seq(NounStar, Conjonction(" "), NounRight))
}
}
object NounStarContents extends Contents {
override def set = {
OddBuilder
.add(0, seq(Noun))
.add(1, seq(Prefix, Conjonction("-"), NounStar))
.add(0, seq(NounThing, Conjonction("-"), SuffixNouns))
.add(0, seq(NounThing, Noun))
}
}
object NounContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(NounThing))
.add(1, seq(NounActivity))
}
}
object AdjectivePhraseContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(AdjectiveStar))
.add(1, seq(AdverbStar, AdjectivePhrase))
.add(1, seq(AdjectivePhrase, AdjectiveStar))
.add(1, seq(AdjectiveStar, Conjonction(", "), AdjectivePhrase))
}
}
object AdverbStarContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(Adverb))
.add(1, seq(Prefix, AdverbStar))
.add(1, seq(NounThing, Adverb))
}
}
object AdjectiveStarContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(Adjective))
.add(1, seq(Prefix, AdjectiveStar))
.add(1, seq(Adjective, Suffix))
.add(1, seq(NounThing, Adjective))
}
}
object ParticipialPhraseContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(ParticipleStar))
.add(1, seq(AdverbStar, ParticipialPhrase))
}
}
object ParticipleStarContents extends Contents {
override def set = {
OddBuilder
.add(1, seq(Participle))
.add(1, seq(Prefix, ParticipleStar))
.add(1, seq(Participle, Suffix))
.add(1, seq(NounThing, Participle))
}
}
object SentenceContents extends Contents {
override def set = {
OddBuilder
.add(0, seq(Conjonction("you "), NounPhrase))
.add(0, seq(NounPhrase))
.add(8, seq(Special))
}
} */ | HiinoFW/InsultGenerator | insult/contents/Contents.scala | Scala | mit | 4,392 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.nn.{ClassNLLCriterion, MSECriterion, ParallelCriterion}
import com.intel.analytics.bigdl.tensor.{Storage, Tensor}
import com.intel.analytics.bigdl.utils.{T, Table}
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class ParallelCriterionSpec extends FlatSpec with BeforeAndAfter with Matchers {
before {
if (!TH.hasTorch()) {
cancel("Torch is not installed")
}
}
"A ParallelCriterion " should "generate correct output and grad" in {
val seed = 100
Random.setSeed(seed)
val pc = new ParallelCriterion[Double]()
val input1 = Tensor[Double](2, 10).apply1(_ => Random.nextDouble())
val input2 = Tensor[Double](2, 10).apply1(_ => Random.nextDouble())
val input = T()
input(1.0) = input1
input(2.0) = input2
val target1 = Tensor[Double](Storage(Array(2.0, 5.0)))
val target2 = Tensor[Double](2, 10).apply1(_ => Random.nextDouble())
val target = T()
target(1.0) = target1
target(2.0) = target2
val nll = new ClassNLLCriterion[Double]()
val mse = new MSECriterion[Double]()
pc.add(nll, 0.3).add(mse, 0.2)
val start = System.nanoTime()
val loss = pc.forward(input, target)
val gradOutput = pc.backward(input, target)
val scalaTime = System.nanoTime() - start
val code = """
nll = nn.ClassNLLCriterion()
mse = nn.MSECriterion()
pc = nn.ParallelCriterion():add(nll, 0.3):add(mse, 0.2)
loss = pc:forward(input, target)
gradOutput = pc:backward(input, target)
gradOutput1 = gradOutput[1]
gradOutput2 = gradOutput[2]
""".stripMargin
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "target" -> target),
Array("loss", "gradOutput1", "gradOutput2"))
val luaLoss = torchResult("loss").asInstanceOf[Double]
val luaGradOutput1 = torchResult("gradOutput1").asInstanceOf[Tensor[Double]]
val luaGradOutput2 = torchResult("gradOutput2").asInstanceOf[Tensor[Double]]
val luaGradOutput = T(luaGradOutput1, luaGradOutput2)
luaLoss should be (loss)
luaGradOutput should be (gradOutput)
println("Test case : ParallelCriterion, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
}
| SeaOfOcean/BigDL | dl/src/test/scala/com/intel/analytics/bigdl/torch/ParallelCriterionSpec.scala | Scala | apache-2.0 | 3,128 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.