code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.internal.bsp
/**
* Compile Response
* @param originId An optional request id to know the origin of this report.
* @param statusCode A status code for the execution.
*/
final class BspCompileResult private (
val originId: Option[String],
val statusCode: Int) extends Serializable {
override def equals(o: Any): Boolean = this.eq(o.asInstanceOf[AnyRef]) || (o match {
case x: BspCompileResult => (this.originId == x.originId) && (this.statusCode == x.statusCode)
case _ => false
})
override def hashCode: Int = {
37 * (37 * (37 * (17 + "sbt.internal.bsp.BspCompileResult".##) + originId.##) + statusCode.##)
}
override def toString: String = {
"BspCompileResult(" + originId + ", " + statusCode + ")"
}
private[this] def copy(originId: Option[String] = originId, statusCode: Int = statusCode): BspCompileResult = {
new BspCompileResult(originId, statusCode)
}
def withOriginId(originId: Option[String]): BspCompileResult = {
copy(originId = originId)
}
def withOriginId(originId: String): BspCompileResult = {
copy(originId = Option(originId))
}
def withStatusCode(statusCode: Int): BspCompileResult = {
copy(statusCode = statusCode)
}
}
object BspCompileResult {
def apply(originId: Option[String], statusCode: Int): BspCompileResult = new BspCompileResult(originId, statusCode)
def apply(originId: String, statusCode: Int): BspCompileResult = new BspCompileResult(Option(originId), statusCode)
}
|
sbt/sbt
|
protocol/src/main/contraband-scala/sbt/internal/bsp/BspCompileResult.scala
|
Scala
|
apache-2.0
| 1,615
|
/*
* Copyright 2016-2020 47 Degrees Open Source <https://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package github4s.algebras
import github4s.GHResponse
import github4s.domain._
trait Activities[F[_]] {
/**
* Set a thread subscription
*
* @param id Conversation id for subscribe or unsubscribe
* @param ignored Determines if all notifications should be blocked from this thread
* @param subscribed Determines if notifications should be received from this thread
* @param headers Optional user headers to include in the request
* @return GHResponse with the Subscription
*/
def setThreadSub(
id: Long,
subscribed: Boolean,
ignored: Boolean,
headers: Map[String, String] = Map()
): F[GHResponse[Subscription]]
/**
* List the users having starred a particular repository
*
* @param owner of the repo
* @param repo name of the repo
* @param timeline Whether or not to include the date at which point a user starred the repo
* @param pagination Limit and Offset for pagination
* @param headers Optional user headers to include in the request
* @return GHResponse with the list of users starring this repo
*/
def listStargazers(
owner: String,
repo: String,
timeline: Boolean,
pagination: Option[Pagination] = None,
headers: Map[String, String] = Map()
): F[GHResponse[List[Stargazer]]]
/**
* List the repositories starred by a particular user
*
* @param username User for which we want to retrieve the starred repositories
* @param timeline Whether or not to include the date at which point a user starred the repo
* @param sort How to sort the result, can be "created" (when the repo was starred) or "updated"
* (when the repo was last pushed to)
* @param direction In which direction the results are sorted, can be "asc" or "desc"
* @param pagination Limit and Offset for pagination
* @param headers Optional user headers to include in the request
* @return GHResponse with the list of starred repositories for this user
*/
def listStarredRepositories(
username: String,
timeline: Boolean,
sort: Option[String] = None,
direction: Option[String] = None,
pagination: Option[Pagination] = None,
headers: Map[String, String] = Map()
): F[GHResponse[List[StarredRepository]]]
}
|
47deg/github4s
|
github4s/src/main/scala/github4s/algebras/Activities.scala
|
Scala
|
apache-2.0
| 2,904
|
import play.api.ApplicationLoader.Context
import play.api._
import play.api.i18n._
import play.api.routing.Router
import router.Routes
import com.softwaremill.macwire._
import com.softwaremill.tagging._
import akka.actor.ActorSystem
import akka.actor.Props
import com.mohiva.play.silhouette.api.{Environment => SilhouetteEnvironment}
import com.mohiva.play.silhouette.api.Silhouette
import com.mohiva.play.silhouette.api.SilhouetteProvider
import com.mohiva.play.silhouette.impl.authenticators.{SessionAuthenticator, SessionAuthenticatorSettings, SessionAuthenticatorService}
import com.mohiva.play.silhouette.api.services._
import utils.auth.DefaultEnv
import com.mohiva.play.silhouette.api.util.Clock
import com.mohiva.play.silhouette.api.EventBus
import com.mohiva.play.silhouette.api.RequestProvider
import com.mohiva.play.silhouette.impl.util.DefaultFingerprintGenerator
import com.mohiva.play.silhouette.impl.util.SecureRandomIDGenerator
import com.mohiva.play.silhouette.api.crypto.Base64AuthenticatorEncoder
import com.mohiva.play.silhouette.api.actions._
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._
import com.mohiva.play.silhouette.impl.providers.OAuth1Settings
import com.mohiva.play.silhouette.impl.providers.oauth1.TwitterProvider
import com.mohiva.play.silhouette.api.util.PlayHTTPLayer
import play.api.libs.ws.ahc.AhcWSComponents
import com.mohiva.play.silhouette.impl.providers.oauth2.GitHubProvider
import com.mohiva.play.silhouette.impl.providers.OAuth2Settings
import com.mohiva.play.silhouette.impl.providers.oauth2.state.CookieStateProvider
import com.mohiva.play.silhouette.impl.providers.OAuth2StateProvider
import com.mohiva.play.silhouette.impl.providers.oauth2.state.CookieStateSettings
import com.mohiva.play.silhouette.api.crypto.CookieSigner
import com.mohiva.play.silhouette.api.util.HTTPLayer
import com.mohiva.play.silhouette.impl.providers.SocialProvider
import pdi.jwt.JwtJson
import pdi.jwt.JwtAlgorithm
import com.mohiva.play.silhouette.crypto.JcaCookieSigner
import com.mohiva.play.silhouette.crypto.JcaCookieSignerSettings
import com.mohiva.play.silhouette.impl.providers.SocialProviderRegistry
import scala.util.Try
import utils.auth.providers.RapidAAFProvider
import services.InstanceOAuthDataHandler
import utils.oauth.AuthorizationCodeGenerator
import controllers._
import utils.admin.SshRepl
import ammonite.util.Bind
import services._
import akka.cluster.Cluster
import play.api.libs.ws.WSClient
import play.api.libs.ws.ahc.AhcWSClient
import domain.PublicImage
import play.api.cache.EhCacheComponents
class AppApplicationLoader extends ApplicationLoader {
def load(context: Context) = {
LoggerConfigurator(context.environment.classLoader).foreach {
_.configure(context.environment)
}
(new AppComponents(context)).application
}
}
class AppComponents(context: Context)
extends BuiltInComponentsFromContext(context) with AhcWSComponents with EhCacheComponents {
implicit lazy val executionContext = materializer.executionContext
implicit val system = actorSystem
lazy val router: Router = {
lazy val prefix = "/"
wire[Routes]
}
lazy val langs: Langs = wire[DefaultLangs]
lazy val messsages: MessagesApi = wire[DefaultMessagesApi]
// Public images
val publicImages: Seq[PublicImage] =
for {
config <- configuration.getConfig("images.public").toSeq
key <- config.subKeys
c <- config.getConfig(key)
display <- c.getString("display").orElse(Some(key))
image <- c.getString("image")
tags = c.getStringSeq("tags").getOrElse(Nil)
} yield PublicImage(display, image, tags)
// Image save handling
val imageServerConfig =
for {
server <- configuration.getString("images.server")
} yield domain.ImageServerConfig(server)
val trackingScripts = TrackingScripts(play.twirl.api.HtmlFormat.fill(List[Option[play.twirl.api.Html]](
configuration.getString("tracking.ga.id").map { trackingId =>
views.html.includes.tracking.ga(trackingId,
configuration.getBoolean("tracking.ga.errors").getOrElse(false))
}
).flatten))
// Sharder/AggregateManager setup and event-bus subscription
val keyringSharder = KeyRingSharder()
.taggedWith[services.KeyRingSharder.type]
val schedulerSharder = SchedulerSharder(imageServerConfig, keyringSharder)
.taggedWith[services.SchedulerSharder.type]
system.eventStream.subscribe(schedulerSharder, classOf[SchedulerSharder.Envelope])
val instanceSharder = InstanceSharder(keyringSharder, schedulerSharder)
.taggedWith[services.InstanceSharder.type]
system.eventStream.subscribe(instanceSharder, classOf[InstanceSharder.Envelope])
val userSharder = UserSharder(instanceSharder, schedulerSharder)
.taggedWith[services.UserSharder.type]
system.eventStream.subscribe(userSharder, classOf[UserSharder.Envelope])
val identitySharder = IdentitySharder(userSharder)
.taggedWith[services.IdentitySharder.type]
system.eventStream.subscribe(identitySharder, classOf[IdentitySharder.Envelope])
lazy val instanceCreatorLookupService: services.InstanceCreatorLookupService = wire[services.InstanceCreatorLookupService]
lazy val identityService: services.IdentityService = wire[services.IdentityService]
lazy val sessionAuthenticatorSettings = SessionAuthenticatorSettings()
lazy val clock = Clock()
lazy val eventBus = EventBus()
lazy val fingerprintGenerator = new DefaultFingerprintGenerator()
lazy val authenticatorEncoder = new Base64AuthenticatorEncoder()
lazy val authenticatorService: AuthenticatorService[SessionAuthenticator] = wire[SessionAuthenticatorService]
lazy val httpLayer: HTTPLayer = wire[PlayHTTPLayer]
lazy val idGenerator = new SecureRandomIDGenerator()
lazy val cookieStateSettings = new CookieStateSettings(secureCookie=false)
lazy val jcaCookieSignerSettings =
new JcaCookieSignerSettings(configuration.underlying.as[String]("play.crypto.secret"))
lazy val silhouetteCookieSigner: CookieSigner = wire[JcaCookieSigner]
lazy val stateProvider: OAuth2StateProvider = wire[CookieStateProvider]
lazy val socialProviders: Seq[SocialProvider] =
Seq[Try[SocialProvider]](
Try {
val settings = configuration.underlying.as[OAuth2Settings]("silhouette.github")
new GitHubProvider(httpLayer, stateProvider, settings)
},
Try {
val settings = configuration.underlying.as[RapidAAFProvider.Settings]("silhouette.rapidaaf")
new RapidAAFProvider(httpLayer, settings)
}
).map(_.toOption).flatten
lazy val socialProviderRegistry = wire[SocialProviderRegistry]
lazy val silhouetteEnv: SilhouetteEnvironment[DefaultEnv] =
SilhouetteEnvironment[DefaultEnv](identityService, authenticatorService, Seq.empty, eventBus)
lazy val securedErrorHandler = wire[DefaultSecuredErrorHandler]
lazy val securedActionModule = wire[DefaultSecuredRequestHandler]
lazy val securedAction: SecuredAction = wire[DefaultSecuredAction]
lazy val unsecuredErrorHandler = wire[DefaultUnsecuredErrorHandler]
lazy val unsecuredActionModule = wire[DefaultUnsecuredRequestHandler]
lazy val unsecuredAction: UnsecuredAction = wire[DefaultUnsecuredAction]
lazy val userAwareActionModule = wire[DefaultUserAwareRequestHandler]
lazy val userAwareAction: UserAwareAction = wire[DefaultUserAwareAction]
lazy val silhouette: Silhouette[DefaultEnv] = wire[SilhouetteProvider[DefaultEnv]]
lazy val authorizationCodeGenerator: AuthorizationCodeGenerator =
new AuthorizationCodeGenerator(configuration.underlying.as[String]("play.crypto.secret"))
lazy val instanceOAuthDataHandler: InstanceOAuthDataHandler = wire[InstanceOAuthDataHandler]
// Controllers
lazy val oauthServerController = wire[OAuthServerController]
lazy val messagingController = wire[MessagingController]
lazy val webComponentsController = wire[WebComponentsController]
lazy val accessPassController = wire[AccessPassController]
lazy val imageServerController = wire[ImageServerController]
lazy val instanceController = wire[InstanceController]
lazy val keyRingController = wire[KeyRingController]
lazy val schedulerController = wire[SchedulerController]
lazy val injectedContentControllerCacheApi = defaultCacheApi.taggedWith[InjectedContentController]
lazy val injectedContentController = wire[InjectedContentController]
lazy val publicConfigController = wire[PublicConfigController]
lazy val mainController = wire[MainController]
lazy val assetsController = wire[Assets]
// SSH admin server
lazy val sshReplBindings: Seq[Bind[_]] =
Bind("app", application) ::
Nil
lazy val classloader = application.classloader
val sshRepl = wire[SshRepl]
}
|
dit4c/dit4c
|
dit4c-portal/app/AppApplicationLoader.scala
|
Scala
|
mit
| 8,726
|
package com.twitter.scrooge.ast
import scala.collection.mutable
import com.twitter.scrooge.frontend.ScroogeInternalException
sealed abstract class Identifier extends IdNode {
// It was intentional not to override toString. Instead, use
// "fullName" to indicate its purpose.
def fullName: String
def toCamelCase: Identifier
def toTitleCase: Identifier
def toUpperCase: Identifier
def toLowerCase: Identifier
// to prevent accidental use of Identifier as String
private[scrooge] def +(str: String): String =
throw new ScroogeInternalException("do not use \"+\" operation on Identifiers")
}
object Identifier {
// constructor
def apply(str: String): Identifier = {
assert(!str.isEmpty)
val ids = str.split("\\.")
if (ids.size == 1)
SimpleID(ids.head)
else
QualifiedID(ids)
}
def toTitleCase(str: String): String = toCamelCase(str, true)
/**
* convert string to camel case, with the following fine print:
* - leading underscores are preserved
* - internal underscores are removed. Character following an underscore
* is converted to upper case.
* - first character (non underscore char) is upper case if
* firstCharUp is true, lower case if false
* - first character of the second and following parts (text between underscores)
* is always in upper case
* - if a part is all upper case it is converted to lower case (except for first character),
* in other cases case is preserved
*
* Examples: (original, camel case, title case)
* (gen_html_report, genHtmlReport, GenHtmlReport)
* (GEN_HTML_REPORT, genHtmlReport, GenHtmlReport)
* (Gen_HTMLReport, genHTMLReport, GenHTMLReport)
* (Gen_HTML_Report, genHtmlReport, GenHtmlReport)
* (GENHTMLREPORT, genhtmlreport, Genhtmlreport)
* (genhtmlreport, genhtmlreport, Genhtmlreport)
* (genHtmlReport, genHtmlReport, GenHtmlReport)
* (genHTMLReport, genHTMLReport, GenHtmlReport)
* (_genHtmlReport, _genHtmlReport, _GenHtmlReport)
*/
def toCamelCase(str: String, firstCharUp: Boolean = false): String = {
str.takeWhile(_ == '_') + str.
split('_').
filterNot(_.isEmpty).
zipWithIndex.map { case (part, ind) =>
val first = if (ind == 0 && !firstCharUp) part(0).toLower else part(0).toUpper
val isAllUpperCase = part.forall { c => c.isUpper || !c.isLetter }
val rest = if (isAllUpperCase) part.drop(1).toLowerCase else part.drop(1)
new mutable.StringBuilder(part.size).append(first).append(rest)
}.
mkString
}
}
case class SimpleID(name: String) extends Identifier {
assert(!name.contains(".") && !name.isEmpty) // name is a simple string
val fullName: String = name
def toCamelCase = SimpleID(Identifier.toCamelCase(name))
def toTitleCase = SimpleID(Identifier.toTitleCase(name))
def toUpperCase = SimpleID(name.toUpperCase)
def toLowerCase = SimpleID(name.toLowerCase)
// append and prepend only available for SimpleID
// To encourage correct usage of SimpleID, we intentionally don't use implicit
// string conversions
def append(other: String): SimpleID = {
assert(!other.isEmpty && !other.contains("."))
SimpleID(name + other)
}
def prepend(other: String): SimpleID = {
assert(!other.isEmpty && !other.contains("."))
SimpleID(other + name)
}
def addScope(scope: Identifier): QualifiedID =
QualifiedID(scope match {
case SimpleID(s) => Seq(s, this.name)
case QualifiedID(names) => names :+ name
})
}
case class QualifiedID(names: Seq[String]) extends Identifier {
assert(names.size >= 2) // at least a scope and a name
assert(!names.exists(_.isEmpty))
val fullName: String = names.mkString(".")
// case conversion only happens on the last id
def toCamelCase =
QualifiedID(names.dropRight(1) :+ Identifier.toCamelCase(names.last))
def toTitleCase =
QualifiedID(names.dropRight(1) :+ Identifier.toTitleCase(names.last))
def toUpperCase =
QualifiedID(names.dropRight(1) :+ names.last.toUpperCase)
def toLowerCase =
QualifiedID(names.dropRight(1) :+ names.last.toLowerCase)
def head: SimpleID = SimpleID(names.head)
def tail: Identifier = Identifier(names.tail.mkString("."))
def qualifier: Identifier = Identifier(names.dropRight(1).mkString("."))
def name: SimpleID = SimpleID(names.last)
}
|
elipoz/scrooge
|
scrooge-generator/src/main/scala/com/twitter/scrooge/AST/Identifier.scala
|
Scala
|
apache-2.0
| 4,408
|
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu.mra
import slamdata.Predef.{Boolean, Int, List}
import quasar.contrib.cats.data.nonEmptySet._
import scala.collection.immutable.SortedSet
import cats.{Order, Show}
import cats.data.NonEmptySet
import cats.kernel.{BoundedSemilattice, CommutativeMonoid}
import cats.syntax.functor._
import cats.syntax.order._
import cats.syntax.show._
import scalaz.@@
import scalaz.Tags.{Conjunction, Disjunction}
import scalaz.syntax.tag._
/** A sum of products of join keys. */
final class JoinKeys[S, V] private (protected val uop: Uop[NonEmptySet[JoinKey[S, V]]]) {
def ∧ (that: JoinKeys[S, V])(implicit sord: Order[S], vord: Order[V]): JoinKeys[S, V] =
and(that)
def ∨ (that: JoinKeys[S, V]): JoinKeys[S, V] =
or(that)
def and(that: JoinKeys[S, V])(implicit sord: Order[S], vord: Order[V]): JoinKeys[S, V] =
new JoinKeys(uop ∧ that.uop)
def isEmpty: Boolean =
uop.isEmpty
def mapKeys[T: Order, W: Order](f: JoinKey[S, V] => JoinKey[T, W]): JoinKeys[T, W] =
new JoinKeys(uop.map(_.map(f)))
def or(that: JoinKeys[S, V]): JoinKeys[S, V] =
new JoinKeys(uop ∨ that.uop)
def toList: List[NonEmptySet[JoinKey[S, V]]] =
uop.toList
def toSortedSet: SortedSet[NonEmptySet[JoinKey[S, V]]] =
uop.toSortedSet
def compare(that: JoinKeys[S, V])(implicit sord: Order[S], vord: Order[V]): Int =
uop.compare(that.uop)
}
object JoinKeys extends JoinKeysInstances {
def conj[S: Order, V: Order](k: JoinKey[S, V], ks: JoinKey[S, V]*): JoinKeys[S, V] =
new JoinKeys(Uop.one(NonEmptySet.of(k, ks: _*)))
def empty[S: Order, V: Order]: JoinKeys[S, V] =
new JoinKeys(Uop.empty)
def one[S: Order, V: Order](k: JoinKey[S, V]): JoinKeys[S, V] =
conj(k)
}
sealed abstract class JoinKeysInstances {
implicit def conjCommutativeMonoid[S: Order, V: Order]: CommutativeMonoid[JoinKeys[S, V] @@ Conjunction] =
new CommutativeMonoid[JoinKeys[S, V] @@ Conjunction] {
val empty = Conjunction(JoinKeys.empty[S, V])
def combine(x: JoinKeys[S, V] @@ Conjunction, y: JoinKeys[S, V] @@ Conjunction) =
Conjunction(x.unwrap ∧ y.unwrap)
}
implicit def disjBoundedSemilattice[S: Order, V: Order]: BoundedSemilattice[JoinKeys[S, V] @@ Disjunction] =
new BoundedSemilattice[JoinKeys[S, V] @@ Disjunction] {
val empty = Disjunction(JoinKeys.empty[S, V])
def combine(x: JoinKeys[S, V] @@ Disjunction, y: JoinKeys[S, V] @@ Disjunction) =
Disjunction(x.unwrap ∨ y.unwrap)
}
implicit def order[S: Order, V: Order]: Order[JoinKeys[S, V]] =
Order.from(_ compare _)
implicit def show[S: Show, V: Show]: Show[JoinKeys[S, V]] =
Show.show(jks => "JoinKeys" + jks.toSortedSet.toIterator.map(_.show).mkString("(", ", ", ")"))
}
|
slamdata/quasar
|
qsu/src/main/scala/quasar/qsu/mra/JoinKeys.scala
|
Scala
|
apache-2.0
| 3,358
|
package artisanal.pickle.maker
import models._
import parser._
import org.specs2._
import mutable._
import specification._
import scala.reflect.internal.pickling.ByteCodecs
import scala.tools.scalap.scalax.rules.scalasig._
import com.novus.salat.annotations.util._
import scala.reflect.ScalaSignature
class ListListListStringSpec extends mutable.Specification {
"a ScalaSig for case class MyRecord_ListListListString(p: List[List[List[String]]])" should {
"have the correct string" in {
val mySig = new artisanal.pickle.maker.ScalaSig(List("case class"), List("models", "MyRecord_ListListListString"), List(("r", "List[List[List[String]]]")))
val correctParsedSig = SigParserHelper.parseByteCodeFromAnnotation(classOf[MyRecord_ListListListString]).map(ScalaSigAttributeParsers.parse(_)).get
val myParsedSig = SigParserHelper.parseByteCodeFromMySig(mySig).map(ScalaSigAttributeParsers.parse(_)).get
correctParsedSig.toString === myParsedSig.toString
}
}
}
|
julianpeeters/artisanal-pickle-maker
|
src/test/scala/singleValueMember/ListSpecs/ListListListStringSpec.scala
|
Scala
|
apache-2.0
| 999
|
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.catnap
package cancelables
import cats.Applicative
import cats.effect.{CancelToken, Sync}
import monix.catnap.CancelableF
import monix.catnap.CancelableF.Empty
import monix.execution.annotations.UnsafeBecauseImpure
import monix.execution.atomic.Atomic
/**
* Represents a [[CancelableF]] that can be queried for the
* canceled status.
*
* @see [[monix.execution.cancelables.BooleanCancelable]] for
* the simpler, side-effecting version.
*/
trait BooleanCancelableF[F[_]] extends CancelableF[F] {
/**
* @return true in case this cancelable hasn't been canceled,
* or false otherwise.
*/
def isCanceled: F[Boolean]
}
object BooleanCancelableF {
/**
* Builder for [[BooleanCancelableF]] that wraps and protects
* the given cancellation token.
*
* The returned implementation guarantees idempotency by
* ensuring that the given `token` will only be executed once,
* the operation becoming a no-op on sub-sequent evaluations,
* thus being memoized.
*
* @param token is a value that can be evaluated.
*/
def apply[F[_]](token: CancelToken[F])(implicit F: Sync[F]): F[BooleanCancelableF[F]] =
F.delay(unsafeApply[F](token))
/**
* Unsafe version of [[apply]].
*
* This function is unsafe because creating the returned
* [[BooleanCancelableF]] allocates internal shared mutable
* state, thus breaking referential transparency, which can
* catch users by surprise.
*/
@UnsafeBecauseImpure
def unsafeApply[F[_]](token: CancelToken[F])(implicit F: Sync[F]): BooleanCancelableF[F] =
new Impl[F](token)
/**
* Returns an instance of a [[BooleanCancelableF]] that's
* already canceled.
*/
def alreadyCanceled[F[_]](implicit F: Applicative[F]): BooleanCancelableF[F] with Empty[F] =
new BooleanCancelableF[F] with Empty[F] {
val isCanceled = F.pure(true)
def cancel = F.unit
}
/**
* Returns a [[BooleanCancelableF]] that can never be canceled.
*
* Useful as a low-overhead instance whose `isCanceled` value
* is always `false`, thus similar in spirit with [[alreadyCanceled]].
*/
def dummy[F[_]](implicit F: Applicative[F]): BooleanCancelableF[F] =
new BooleanCancelableF[F] with Empty[F] {
val isCanceled = F.pure(false)
def cancel = F.unit
}
private final class Impl[F[_]](token: CancelToken[F])(implicit F: Sync[F]) extends BooleanCancelableF[F] {
private[this] val canceled = Atomic(false)
private[this] var ref = token
def isCanceled =
F.delay(canceled.get())
def cancel: CancelToken[F] =
F.suspend {
if (!canceled.getAndSet(true)) {
val ref = this.ref
this.ref = null.asInstanceOf[CancelToken[F]]
ref
} else {
F.unit
}
}
}
}
|
alexandru/monifu
|
monix-catnap/shared/src/main/scala/monix/catnap/cancelables/BooleanCancelableF.scala
|
Scala
|
apache-2.0
| 3,521
|
package fm.common.test.classutil
object TestClass {
}
class TestClass {
def foo: String = "test_class"
}
|
frugalmechanic/fm-common
|
jvm/src/test/scala/fm/common/test/classutil/TestClass.scala
|
Scala
|
apache-2.0
| 109
|
package monocle.generic
import monocle.{Iso, Prism}
import monocle.generic.internal.{CoproductToDisjunction, DisjunctionToCoproduct}
import shapeless.{Coproduct, Generic}
import shapeless.ops.coproduct.{CoproductToEither, EitherToCoproduct, Inject, Selector}
import scala.annotation.nowarn
@deprecated("no replacement", since = "3.0.0-M1")
object coproduct extends CoProductInstances
@nowarn
trait CoProductInstances {
@deprecated("no replacement", since = "3.0.0-M1")
def coProductPrism[C <: Coproduct, A](implicit evInject: Inject[C, A], evSelector: Selector[C, A]): Prism[C, A] =
Prism[C, A](evSelector.apply(_))(evInject.apply)
/** An isomorphism between a coproduct `S` and the sum of its parts.
*
* {{{
* type ISB = Int :+: String :+: Boolean :+: CNil
*
* val iso: Iso[ISB, Either[Int, Either[String, Boolean]]] = coProductEitherIso[ISB].apply
* }}}
*/
@deprecated("no replacement", since = "3.0.0-M1")
def coProductEitherIso[S <: Coproduct]: GenCoProductEitherIso[S] = new GenCoProductEitherIso
class GenCoProductEitherIso[S <: Coproduct] {
def apply[L, R](implicit
coproductToEither: CoproductToEither.Aux[S, Either[L, R]],
eitherToCoproduct: EitherToCoproduct.Aux[L, R, S]
): Iso[S, Either[L, R]] =
Iso(coproductToEither.apply)(eitherToCoproduct.apply)
}
/** An isomorphism between a sum type `S` (e.g. a sealed trait) and the sum of its parts.
*
* {{{
* sealed trait S
* case class A(name: String) extends S
* case class B(name: String) extends S
* case class C(otherName: String) extends S
*
* val iso: Iso[S, Either[A, Either[B, C]]] = coProductToEither[S].apply
* }}}
*/
@deprecated("no replacement", since = "3.0.0-M1")
def coProductToEither[S]: GenCoProductToEither[S] = new GenCoProductToEither
class GenCoProductToEither[S] {
def apply[C <: Coproduct, L, R](implicit
ev: Generic.Aux[S, C],
coproductToEither: CoproductToEither.Aux[C, Either[L, R]],
eitherToCoproduct: EitherToCoproduct.Aux[L, R, C]
): Iso[S, Either[L, R]] =
generic.toGeneric[S] composeIso coProductEitherIso.apply
}
/** An isomorphism between a coproduct `S` and the sum of its parts.
*
* {{{
* type ISB = Int :+: String :+: Boolean :+: CNil
*
* val iso: Iso[ISB, Either[Int, Either[String, Boolean]] = coProductDisjunctionIso[ISB].apply
* }}}
*/
@deprecated("no replacement", since = "3.0.0-M1")
def coProductDisjunctionIso[S <: Coproduct]: GenCoProductDisjunctionIso[S] = new GenCoProductDisjunctionIso
class GenCoProductDisjunctionIso[S <: Coproduct] {
def apply[L, R](implicit
coproductToDisjunction: CoproductToDisjunction.Aux[S, Either[L, R]],
disjunctionToCoproduct: DisjunctionToCoproduct.Aux[L, R, S]
): Iso[S, Either[L, R]] =
Iso(coproductToDisjunction.apply)(disjunctionToCoproduct.apply)
}
/** An isomorphism between a sum type `S` (e.g. a sealed trait) and the sum of its parts.
*
* {{{
* sealed trait S
* case class A(name: String) extends S
* case class B(name: String) extends S
* case class C(otherName: String) extends S
*
* val iso: Iso[S, Either[A, Either[B, C])] = coProductToDisjunction[S].apply
* }}}
*/
@deprecated("no replacement", since = "3.0.0-M1")
def coProductToDisjunction[S]: GenCoProductToDisjunction[S] = new GenCoProductToDisjunction
class GenCoProductToDisjunction[S] {
def apply[C <: Coproduct, L, R](implicit
ev: Generic.Aux[S, C],
coproductToDisjunction: CoproductToDisjunction.Aux[C, Either[L, R]],
disjunctionToCoproduct: DisjunctionToCoproduct.Aux[L, R, C]
): Iso[S, Either[L, R]] =
generic.toGeneric[S] composeIso coProductDisjunctionIso.apply
}
}
|
julien-truffaut/Monocle
|
generic/src/main/scala/monocle/generic/CoProduct.scala
|
Scala
|
mit
| 3,816
|
package model.attributes
trait AttributeDescription {
val name: String
val code: String
val desc: String
}
object DexterityDescription extends AttributeDescription {
override val name = "Ловкость"
override val code = "dex"
override val desc = "Влияет на шанс попадания/уворота"
}
object EnduranceDescription extends AttributeDescription {
override val name = "Выносливость"
override val code = "end"
override val desc = "Влияет на количество очков жизней"
}
object StrengthDescription extends AttributeDescription {
override val name = "Сила"
override val code = "str"
override val desc = "Влияет на наносимые повреждения"
}
|
treble-snake/gladiators
|
src/main/scala/model/attributes/AttributeDescription.scala
|
Scala
|
apache-2.0
| 761
|
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.maestro.example
import org.apache.hadoop.hive.conf.HiveConf.ConfVars._
import com.twitter.scalding.{Config, Execution}
import au.com.cba.omnia.ebenezer.scrooge.hive.Hive
import au.com.cba.omnia.maestro.api._, Maestro._
import au.com.cba.omnia.maestro.example.thrift.Customer
/** Configuration for a customer execution example */
case class CustomerJobConfig(config: Config) {
val maestro = MaestroConfig(
conf = config,
source = "customer",
domain = "customer",
tablename = "customer"
)
val upload = maestro.upload()
val load = maestro.load[Customer](none = "null")
val dateTable = maestro.partitionedHiveTable[Customer, (String, String, String)](
partition = Partition.byDate(Fields[Customer].EffectiveDate),
tablename = "by_date"
)
}
/** Customer file load job with an execution for the main program */
object CustomerJob extends MaestroJob {
def job: Execution[JobStatus] = for {
conf <- Execution.getConfig.map(CustomerJobConfig(_))
uploadInfo <- upload(conf.upload)
sources <- uploadInfo.withSources
(pipe, loadInfo) <- load[Customer](conf.load, uploadInfo.files)
loadSuccess <- loadInfo.withSuccess
count <- viewHive(conf.dateTable, pipe)
if count == loadSuccess.actual
} yield JobFinished
def attemptsExceeded = Execution.from(JobNeverReady) // Elided in the README
}
|
toddmowen/maestro
|
maestro-example/src/main/scala/au/com/cba/omnia/maestro/example/CustomerJob.scala
|
Scala
|
apache-2.0
| 2,082
|
/*
* Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software;Designed and Developed mainly by many Chinese
* opensource volunteers. you can redistribute it and/or modify it under the
* terms of the GNU General Public License version 2 only, as published by the
* Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Any questions about this component can be directed to it's project Web address
* https://code.google.com/p/opencloudb/.
*
*/
package turbo.crawler
/**
* 字符串操作适配器
* @author mclaren
*
*/
trait StringAdapter {
def isEmpty(source: String): Boolean = return source == null || source.trim().length() == 0
def isNotEmpty(source: String): Boolean = !isEmpty(source)
def equal(one: String, other: String): Boolean = {
if (one == null || other == null) false
one.trim().equals(other.trim())
}
}
|
fengshao0907/Mycat-spider
|
src/main/scala/turbo/crawler/StringAdapter.scala
|
Scala
|
apache-2.0
| 1,538
|
package models.connector
import scala.concurrent.Future
import akka.actor._
import akka.util.ByteString
import play.api.libs.iteratee._
import play.api.libs.concurrent.Execution.Implicits._
import rxtxio.Serial._
/** Mock for a serial port. Uses the rxtxio.Serial interface. */
object SerialPortMock {
/** Data that is sent to this port. */
def enumerator = enumeratorFromPort
/** Use to send data as the serial port. */
def channel = channelToPort
def enumeratorOfChannel = enumeratorToPort
def props = Props(new ManagerActor)
private val (enumeratorFromPort, channelFromPort) = Concurrent.broadcast[ByteString]
private val (enumeratorToPort, channelToPort) = Concurrent.broadcast[ByteString]
class ManagerActor extends Actor {
override def receive = {
case ListPorts => sender ! Ports(Vector("SerialPortMock"))
case Open(port, bauds, _, _, _, _) =>
val commander = sender
val operator = context.actorOf(Props(new OperatorActor(commander)))
sender ! Opened(operator, port)
}
}
class OperatorActor(commander: ActorRef) extends Actor {
@volatile var stopped = false
val iteratee = Iteratee.fold2[ByteString, Unit](()) { (_, data) =>
self ! data
Future.successful((), stopped)
}
val encoding = "ASCII"
override def preStart() = {
context watch commander
enumeratorToPort(iteratee)
}
override def postStop() = {
stopped = true
commander ! Closed
}
override def receive = {
case Close =>
if (sender != commander) sender ! Closed
context stop self
case Write(data, ack) =>
channelFromPort.push(data)
if (ack != NoAck) sender ! ack
//Confirm stuff
data.decodeString(encoding).split('\\t').toList match {
case "$" :: ">" :: pattern :: rest =>
channel push ByteString(s"$$\\t<\\t$pattern", encoding)
case _ => ()
}
case data: ByteString =>
commander ! Received(data)
}
}
}
|
knittery/knittery-ui
|
app/models/connector/SerialPortMock.scala
|
Scala
|
gpl-2.0
| 2,029
|
/*
* Copyright 2011-2013 The myBatis Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mybatis.scala.config
import org.apache.ibatis.session.{Configuration => MBConfig}
import org.apache.ibatis.executor.keygen.{Jdbc3KeyGenerator, NoKeyGenerator, SelectKeyGenerator, KeyGenerator => MBKeyGenerator}
import org.apache.ibatis.builder.MapperBuilderAssistant
import org.mybatis.scala.mapping._
import org.mybatis.scala.cache._
import java.util.ArrayList
import org.apache.ibatis.mapping.{ResultMapping => MBResultMapping, SqlSource, SqlCommandType, Discriminator}
import java.util.Properties
//eliminate a feature warning, might be a good idea to define and use traits instead of using structural types
import scala.language.reflectiveCalls
/** Configuration Space (mybatis namespace)
* @constructor Creates an empty configuration space.
* @param configuration myBatis Configuration target
* @param spaceName Space name or namespace
*/
class ConfigurationSpace(configuration : MBConfig, val spaceName : String = "_DEFAULT_") {
// == Start primary constructor code ===
private val builderAssistant = new MapperBuilderAssistant(configuration, spaceName)
builderAssistant.setCurrentNamespace(spaceName)
// == End Primary constructor code ===
// == Start of public API ===
/** Adds a statement to the space */
def += (s : Statement) : this.type = addStatement(s)
/** Adds a sequence of statements to the space */
def ++=(ss : Seq[Statement]) : this.type = {
for (s <- ss) addStatement(s)
this
}
/** Adds a mapper to the space */
def ++=(mapper : { def bind : Seq[Statement] }) : this.type = ++=(mapper.bind)
/** Adds cache support to this space.
* @param impl Cache implementation class
* @param eviction cache eviction policy (LRU,FIFO,WEAK,SOFT)
* @param flushInterval any positive integer in milliseconds.
* The default is not set, thus no flush interval is used and the cache is only flushed by calls to statements.
* @param size max number of objects that can live in the cache. Default is 1024
* @param readWrite A read-only cache will return the same instance of the cached object to all callers.
* Thus such objects should not be modified. This offers a significant performance advantage though.
* A read-write cache will return a copy (via serialization) of the cached object,
* this is slower, but safer, and thus the default is true.
* @param props implementation specific properties.
*/
def cache(
impl : T[_ <: Cache] = DefaultCache,
eviction : T[_ <: Cache] = Eviction.LRU,
flushInterval : Long = -1,
size : Int = -1,
readWrite : Boolean = true,
blocking : Boolean = false,
props : Properties = null) : this.type = {
builderAssistant.useNewCache(
impl.unwrap,
eviction.unwrap,
if (flushInterval > -1) flushInterval else null,
if (size > -1) size else null,
readWrite,
blocking,
props
)
this
}
/** Reference to an external Cache */
def cacheRef(that : ConfigurationSpace) : this.type = {
builderAssistant.useCacheRef(that.spaceName)
this
}
// == End of public API ===
private def addResultMap(rm : ResultMap[_]) : Unit = {
if (rm.fqi == null) {
rm.fqi = ConfigurationSpace.generateFQI(spaceName, rm)
if (rm.parent != null) addResultMap(rm.parent)
val resultMappings = new ArrayList[MBResultMapping]
// Mappings
for (r <- rm.constructor ++ rm.mappings) {
if (r.nestedSelect != null) addStatement(r.nestedSelect)
if (r.nestedResultMap != null) addResultMap(r.nestedResultMap)
resultMappings add
builderAssistant.buildResultMapping(
r.resultTypeClass,
r.property,
r.column,
r.javaTypeClass,
r.jdbcTypeEnum,
resolveFQI(r.nestedSelect),
resolveFQI(r.nestedResultMap),
r.notNullColumn,
r.columnPrefix,
r.typeHandlerClass,
r.flags
)
}
// Discriminator
import java.util.HashMap
var discriminator : Discriminator = null
rm.discr match {
case (column, javaType, jdbcType, typeHandler, cases) =>
val discriminatorMap = new HashMap[String,String]
for (c <- cases) {
addResultMap(c.resultMap)
discriminatorMap.put(c.value, c.resultMap.fqi.resolveIn(spaceName))
}
discriminator = builderAssistant.buildDiscriminator(
rm.resultTypeClass,
column,
if (javaType == null || javaType.isVoid) classOf[String] else javaType.unwrap,
if (jdbcType == null || jdbcType == JdbcType.UNDEFINED) null else jdbcType.unwrap,
if (typeHandler == null) null else typeHandler.unwrap,
discriminatorMap
)
case _ =>
// Skip
}
// Assemble
builderAssistant.addResultMap(
rm.fqi.id,
rm.resultTypeClass,
if (rm.parent != null) rm.parent.fqi.id else null,
discriminator,
resultMappings,
rm.autoMapping.value
)
}
}
private def resolveFQI(r : { def fqi : FQI}) : String = {
if (r == null) null else r.fqi resolveIn spaceName
}
private def addStatement(statement : Statement) : this.type = {
if (statement.fqi == null) {
statement.fqi = ConfigurationSpace.generateFQI(spaceName, statement)
statement match {
case stmt : Select =>
if (stmt.resultMap != null) addResultMap(stmt.resultMap)
builderAssistant.addMappedStatement(
stmt.fqi.resolveIn(spaceName),
buildDynamicSQL(stmt.xsql),
stmt.statementType.unwrap,
SqlCommandType.SELECT,
if (stmt.fetchSize > 0) stmt.fetchSize else null,
if (stmt.timeout > -1) stmt.timeout else null,
null,
stmt.parameterTypeClass,
resolveFQI(stmt.resultMap),
stmt.resultTypeClass,
stmt.resultSetType.unwrap,
stmt.flushCache,
stmt.useCache,
false, // TODO Issue #577
new NoKeyGenerator(),
null,
null,
stmt.databaseId,
DefaultScriptingDriver
)
case stmt : Insert[_] =>
builderAssistant.addMappedStatement(
stmt.fqi.resolveIn(spaceName),
buildDynamicSQL(stmt.xsql),
stmt.statementType.unwrap,
SqlCommandType.INSERT,
null,
if (stmt.timeout > -1) stmt.timeout else null,
null,
stmt.parameterTypeClass,
null,
classOf[Int],
ResultSetType.FORWARD_ONLY.unwrap,
stmt.flushCache,
false,
false, // TODO Issue #577
buildKeyGenerator(stmt.keyGenerator, stmt.parameterTypeClass, stmt.fqi.id, stmt.databaseId),
if (stmt.keyGenerator == null) null else stmt.keyGenerator.keyProperty,
if (stmt.keyGenerator == null) null else stmt.keyGenerator.keyColumn,
stmt.databaseId,
DefaultScriptingDriver
)
case stmt : Update[_] =>
builderAssistant.addMappedStatement(
stmt.fqi.resolveIn(spaceName),
buildDynamicSQL(stmt.xsql),
stmt.statementType.unwrap,
SqlCommandType.UPDATE,
null,
if (stmt.timeout > -1) stmt.timeout else null,
null,
stmt.parameterTypeClass,
null,
classOf[Int],
ResultSetType.FORWARD_ONLY.unwrap,
stmt.flushCache,
false,
false, // TODO Issue #577
new NoKeyGenerator(),
null,
null,
stmt.databaseId,
DefaultScriptingDriver
)
case stmt : Delete[_] =>
builderAssistant.addMappedStatement(
stmt.fqi.resolveIn(spaceName),
buildDynamicSQL(stmt.xsql),
stmt.statementType.unwrap,
SqlCommandType.DELETE,
null,
if (stmt.timeout > -1) stmt.timeout else null,
null,
stmt.parameterTypeClass,
null,
classOf[Int],
ResultSetType.FORWARD_ONLY.unwrap,
stmt.flushCache,
false,
false, // TODO Issue #577
new NoKeyGenerator(),
null,
null,
stmt.databaseId,
DefaultScriptingDriver
)
case stmt : Perform =>
builderAssistant.addMappedStatement(
stmt.fqi.resolveIn(spaceName),
buildDynamicSQL(stmt.xsql),
stmt.statementType.unwrap,
SqlCommandType.UPDATE,
null,
if (stmt.timeout > -1) stmt.timeout else null,
null,
stmt.parameterTypeClass,
null,
classOf[Int],
ResultSetType.FORWARD_ONLY.unwrap,
stmt.flushCache,
false,
false, // TODO Issue #577
new NoKeyGenerator(),
null,
null,
stmt.databaseId,
DefaultScriptingDriver
)
case unsupported =>
throw new ConfigurationException("Unsupported statement type")
//error("Unsupported statement type")
}
}
this
}
private def buildDynamicSQL(xsql : XSQL) : SqlSource
= new DynamicSQLBuilder(configuration, xsql).build
private def buildKeyGenerator(generator : KeyGenerator, parameterTypeClass : Class[_], baseId : String, databaseId : String) : MBKeyGenerator = {
generator match {
case jdbc : JdbcGeneratedKey =>
new Jdbc3KeyGenerator()
case sql : SqlGeneratedKey[_] =>
buildSqlKeyGenerator(sql, parameterTypeClass, baseId, databaseId)
case _ =>
new NoKeyGenerator()
}
}
private def buildSqlKeyGenerator(generator : SqlGeneratedKey[_], parameterTypeClass : Class[_], baseId : String, databaseId : String) : MBKeyGenerator = {
val id = baseId + SelectKeyGenerator.SELECT_KEY_SUFFIX
val useCache = false
val keyGenerator = new NoKeyGenerator()
val fetchSize = null
val timeout = null
val flushCache = false
val parameterMap = null
val resultMap = null
val resultSetTypeEnum = null
val sqlSource = buildDynamicSQL(generator.xsql)
val sqlCommandType = SqlCommandType.SELECT
val statementType = generator.statementType.unwrap
val keyProperty = generator.keyProperty
val keyColumn = generator.keyColumn
val executeBefore = generator.executeBefore
val resultTypeClass = generator.resultTypeClass
builderAssistant.addMappedStatement(
id,
sqlSource,
statementType,
sqlCommandType,
fetchSize,
timeout,
parameterMap,
parameterTypeClass,
resultMap,
resultTypeClass,
resultSetTypeEnum,
flushCache,
useCache,
false, // TODO Issue #577
keyGenerator,
keyProperty,
keyColumn,
databaseId,
DefaultScriptingDriver)
val keyStatement = configuration.getMappedStatement(id, false)
val answer = new SelectKeyGenerator(keyStatement, executeBefore)
configuration.addKeyGenerator(id, answer)
answer
}
}
private object ConfigurationSpace {
private var count : Int = 0
private[config] def generateFQI(spaceId : String, subject : AnyRef) = {
count += 1
FQI(spaceId, subject.getClass.getName.replace('.', '-') + "-" + count)
}
}
|
xujianhai/scala-3
|
mybatis-scala-core/src/main/scala/org/mybatis/scala/config/ConfigurationSpace.scala
|
Scala
|
apache-2.0
| 12,212
|
package validatr
import org.specs2.mutable._
class ValidationResultSpec extends Specification {
case class Person(name: String, age: Int)
"validationResult.prefix" >> {
ValidationResult("Must not be empty.", path = "bar" :: Nil).prefix("foo") mustEqual
ValidationResult("Must not be empty.", path = "foo" :: "bar" :: Nil)
}
}
|
underscoreio/essential-macros
|
validation/lib/src/test/scala/validatr/ValidationResultSpec.scala
|
Scala
|
apache-2.0
| 346
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.tf.loaders
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.tf.Tensorflow.typeAttr
import com.intel.analytics.bigdl.utils.tf.TensorflowSpecHelper
import org.tensorflow.framework.{DataType, NodeDef}
class CeilSpec extends TensorflowSpecHelper {
"Ceil" should "be correct for float tensor" in {
compare(
NodeDef.newBuilder()
.setName("ceil_test")
.putAttr("T", typeAttr(DataType.DT_FLOAT))
.setOp("Ceil"),
Seq(Tensor[Float](4, 32, 32, 3).rand()),
0
)
}
}
|
jenniew/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/utils/tf/loaders/CeilSpec.scala
|
Scala
|
apache-2.0
| 1,189
|
package com.danielasfregola.twitter4s
import akka.actor.ActorSystem
import com.danielasfregola.twitter4s.entities.{AccessToken, ConsumerToken}
import com.danielasfregola.twitter4s.http.clients.rest.RestClient
import com.danielasfregola.twitter4s.http.clients.rest.v2.tweets.{
TwitterSearchTweetsClient,
TwitterTimelinesClient,
TwitterTweetLookupClient
}
import com.danielasfregola.twitter4s.http.clients.rest.v2.users.TwitterUserLookupClient
import com.danielasfregola.twitter4s.util.Configurations._
import com.danielasfregola.twitter4s.util.SystemShutdown
/** Represents the functionalities offered by the V2 Twitter REST API
*/
class TwitterRestV2Client(val consumerToken: ConsumerToken, val accessToken: AccessToken)(
implicit _system: ActorSystem = ActorSystem("twitter4s-rest-v2"))
extends V2RestClients
with SystemShutdown {
protected val system = _system
protected val restClient = new RestClient(consumerToken, accessToken)
}
trait V2RestClients
extends TwitterTimelinesClient
with TwitterTweetLookupClient
with TwitterSearchTweetsClient
with TwitterUserLookupClient
object TwitterRestV2Client {
def apply(): TwitterRestV2Client = {
val consumerToken = ConsumerToken(key = consumerTokenKey, secret = consumerTokenSecret)
val accessToken = AccessToken(key = accessTokenKey, secret = accessTokenSecret)
apply(consumerToken, accessToken)
}
def apply(consumerToken: ConsumerToken, accessToken: AccessToken): TwitterRestV2Client =
new TwitterRestV2Client(consumerToken, accessToken)
def withActorSystem(system: ActorSystem): TwitterRestV2Client = {
val consumerToken = ConsumerToken(key = consumerTokenKey, secret = consumerTokenSecret)
val accessToken = AccessToken(key = accessTokenKey, secret = accessTokenSecret)
withActorSystem(consumerToken, accessToken)(system)
}
def withActorSystem(consumerToken: ConsumerToken, accessToken: AccessToken)(
system: ActorSystem): TwitterRestV2Client =
new TwitterRestV2Client(consumerToken, accessToken)(system)
}
|
DanielaSfregola/twitter4s
|
src/main/scala/com/danielasfregola/twitter4s/TwitterRestV2Client.scala
|
Scala
|
apache-2.0
| 2,060
|
package com.eevolution.context.dictionary.domain.model
import ai.x.play.json.Jsonx
import com.eevolution.context.dictionary.api.{ActiveEnabled, DomainModel, Identifiable, Traceable}
import org.joda.time.DateTime
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com
*/
/**
* Workflow Node Para Entity
* @param workflowNodeParaId Wf Node Para ID
* @param tenantId Tenant ID
* @param organizationId Organization ID
* @param isActive Is Active
* @param created Created
* @param createdBy Created By
* @param updated Updated
* @param updatedBy Updated By
* @param workflowNodeId Wf Node ID
* @param attributeName Attribute Name
* @param processParaId Process Para ID
* @param description Description
* @param attributeValue Attribute Value
* @param entityType Entity Type
* @param uuid UUID
*/
case class WorkflowNodePara(workflowNodeParaId: Int,
tenantId: Int,
organizationId : Int ,
isActive: Boolean = true,
created: DateTime = DateTime.now,
createdBy: Int,
updated: DateTime = DateTime.now,
updatedBy: Int,
workflowNodeId: Int,
attributeName: Option[String],
processParaId: Option[Int],
description: Option[String],
attributeValue: Option[String],
entityType: String ="D",
uuid: String
) extends DomainModel
with ActiveEnabled
with Identifiable
with Traceable {
override type ActiveEnabled = this.type
override type Identifiable = this.type
override type Traceable = this.type
override def Id: Int = workflowNodeParaId
override val entityName: String = "AD_Wf_Node_Para"
override val identifier: String = "AD_Wf_Node_Para_ID"
}
object WorkflowNodePara {
implicit lazy val jsonFormat = Jsonx.formatCaseClass[WorkflowNodePara]
def create(workflowNodeParaId: Int,
tenantId: Int,
organizationId : Int ,
isActive: Boolean,
created: DateTime,
createdBy: Int,
updated: DateTime,
updatedBy: Int,
workflowNodeId: Int,
attributeName: String,
processParaId: Int,
description: String,
attributeValue: String,
entityType: String,
uuid: String) = WorkflowNodePara(workflowNodeParaId, tenantId, organizationId, isActive, created,
createdBy, updated, updatedBy, workflowNodeId, None, None, None, None, entityType, uuid)
}
|
adempiere/ADReactiveSystem
|
dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/model/WorkflowNodePara.scala
|
Scala
|
gpl-3.0
| 3,636
|
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.helptosavefrontend.services
import com.google.inject.{ImplementedBy, Inject}
import javax.inject.Singleton
import uk.gov.hmrc.helptosavefrontend.connectors.HelpToSaveReminderConnector
import uk.gov.hmrc.helptosavefrontend.models.reminder.{CancelHtsUserReminder, HtsUserSchedule, UpdateReminderEmail}
import uk.gov.hmrc.helptosavefrontend.util.{Logging, Result}
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.ExecutionContext
@ImplementedBy(classOf[HelpToSaveReminderServiceImpl])
trait HelpToSaveReminderService {
def updateHtsUser(htsUser: HtsUserSchedule)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[HtsUserSchedule]
def getHtsUser(nino: String)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[HtsUserSchedule]
def cancelHtsUserReminders(
cancelHtsUserReminder: CancelHtsUserReminder
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[Unit]
def updateReminderEmail(
updateReminderEmail: UpdateReminderEmail
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[Unit]
}
@Singleton
class HelpToSaveReminderServiceImpl @Inject() (helpToSaveReminderConnector: HelpToSaveReminderConnector)
extends HelpToSaveReminderService with Logging {
def updateHtsUser(htsUserSchedule: HtsUserSchedule)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[HtsUserSchedule] =
helpToSaveReminderConnector.updateHtsUser(htsUserSchedule)
def getHtsUser(nino: String)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[HtsUserSchedule] =
helpToSaveReminderConnector.getHtsUser(nino)
def cancelHtsUserReminders(
cancelHtsUserReminder: CancelHtsUserReminder
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[Unit] =
helpToSaveReminderConnector.cancelHtsUserReminders(cancelHtsUserReminder)
def updateReminderEmail(
updateReminderEmail: UpdateReminderEmail
)(implicit hc: HeaderCarrier, ec: ExecutionContext): Result[Unit] =
helpToSaveReminderConnector.updateReminderEmail(updateReminderEmail)
}
|
hmrc/help-to-save-frontend
|
app/uk/gov/hmrc/helptosavefrontend/services/HelpToSaveReminderService.scala
|
Scala
|
apache-2.0
| 2,644
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.QueryExecution
import org.apache.spark.sql.util.QueryExecutionListener
class SessionStateSuite extends SparkFunSuite {
/**
* A shared SparkSession for all tests in this suite. Make sure you reset any changes to this
* session as this is a singleton HiveSparkSession in HiveSessionStateSuite and it's shared
* with all Hive test suites.
*/
protected var activeSession: SparkSession = _
override def beforeAll(): Unit = {
super.beforeAll()
activeSession = SparkSession.builder()
.master("local")
.config("default-config", "default")
.getOrCreate()
}
override def afterAll(): Unit = {
try {
if (activeSession != null) {
activeSession.stop()
activeSession = null
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
}
} finally {
super.afterAll()
}
}
test("fork new session and inherit RuntimeConfig options") {
val key = "spark-config-clone"
try {
activeSession.conf.set(key, "active")
// inheritance
val forkedSession = activeSession.cloneSession()
assert(forkedSession ne activeSession)
assert(forkedSession.conf ne activeSession.conf)
assert(forkedSession.conf.get(key) == "active")
// independence
forkedSession.conf.set(key, "forked")
assert(activeSession.conf.get(key) == "active")
activeSession.conf.set(key, "dontcopyme")
assert(forkedSession.conf.get(key) == "forked")
} finally {
activeSession.conf.unset(key)
}
}
test("fork new session and inherit function registry and udf") {
val testFuncName1 = FunctionIdentifier("strlenScala")
val testFuncName2 = FunctionIdentifier("addone")
try {
activeSession.udf.register(testFuncName1.funcName, (_: String).length + (_: Int))
val forkedSession = activeSession.cloneSession()
// inheritance
assert(forkedSession ne activeSession)
assert(forkedSession.sessionState.functionRegistry ne
activeSession.sessionState.functionRegistry)
assert(forkedSession.sessionState.functionRegistry.lookupFunction(testFuncName1).nonEmpty)
// independence
forkedSession.sessionState.functionRegistry.dropFunction(testFuncName1)
assert(activeSession.sessionState.functionRegistry.lookupFunction(testFuncName1).nonEmpty)
activeSession.udf.register(testFuncName2.funcName, (_: Int) + 1)
assert(forkedSession.sessionState.functionRegistry.lookupFunction(testFuncName2).isEmpty)
} finally {
activeSession.sessionState.functionRegistry.dropFunction(testFuncName1)
activeSession.sessionState.functionRegistry.dropFunction(testFuncName2)
}
}
test("fork new session and inherit experimental methods") {
val originalExtraOptimizations = activeSession.experimental.extraOptimizations
val originalExtraStrategies = activeSession.experimental.extraStrategies
try {
object DummyRule1 extends Rule[LogicalPlan] {
def apply(p: LogicalPlan): LogicalPlan = p
}
object DummyRule2 extends Rule[LogicalPlan] {
def apply(p: LogicalPlan): LogicalPlan = p
}
val optimizations = List(DummyRule1, DummyRule2)
activeSession.experimental.extraOptimizations = optimizations
val forkedSession = activeSession.cloneSession()
// inheritance
assert(forkedSession ne activeSession)
assert(forkedSession.experimental ne activeSession.experimental)
assert(forkedSession.experimental.extraOptimizations.toSet ==
activeSession.experimental.extraOptimizations.toSet)
// independence
forkedSession.experimental.extraOptimizations = List(DummyRule2)
assert(activeSession.experimental.extraOptimizations == optimizations)
activeSession.experimental.extraOptimizations = List(DummyRule1)
assert(forkedSession.experimental.extraOptimizations == List(DummyRule2))
} finally {
activeSession.experimental.extraOptimizations = originalExtraOptimizations
activeSession.experimental.extraStrategies = originalExtraStrategies
}
}
test("fork new session and inherit listener manager") {
class CommandCollector extends QueryExecutionListener {
val commands: ArrayBuffer[String] = ArrayBuffer.empty[String]
override def onFailure(funcName: String, qe: QueryExecution, ex: Exception) : Unit = {}
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {
commands += funcName
}
}
val collectorA = new CommandCollector
val collectorB = new CommandCollector
val collectorC = new CommandCollector
try {
def runCollectQueryOn(sparkSession: SparkSession): Unit = {
val tupleEncoder = Encoders.tuple(Encoders.scalaInt, Encoders.STRING)
val df = sparkSession.createDataset(Seq(1 -> "a"))(tupleEncoder).toDF("i", "j")
df.select("i").collect()
}
activeSession.listenerManager.register(collectorA)
val forkedSession = activeSession.cloneSession()
// inheritance
assert(forkedSession ne activeSession)
assert(forkedSession.listenerManager ne activeSession.listenerManager)
runCollectQueryOn(forkedSession)
activeSession.sparkContext.listenerBus.waitUntilEmpty()
assert(collectorA.commands.length == 1) // forked should callback to A
assert(collectorA.commands(0) == "collect")
// independence
// => changes to forked do not affect original
forkedSession.listenerManager.register(collectorB)
runCollectQueryOn(activeSession)
activeSession.sparkContext.listenerBus.waitUntilEmpty()
assert(collectorB.commands.isEmpty) // original should not callback to B
assert(collectorA.commands.length == 2) // original should still callback to A
assert(collectorA.commands(1) == "collect")
// <= changes to original do not affect forked
activeSession.listenerManager.register(collectorC)
runCollectQueryOn(forkedSession)
activeSession.sparkContext.listenerBus.waitUntilEmpty()
assert(collectorC.commands.isEmpty) // forked should not callback to C
assert(collectorA.commands.length == 3) // forked should still callback to A
assert(collectorB.commands.length == 1) // forked should still callback to B
assert(collectorA.commands(2) == "collect")
assert(collectorB.commands(0) == "collect")
} finally {
activeSession.listenerManager.unregister(collectorA)
activeSession.listenerManager.unregister(collectorC)
}
}
test("fork new sessions and run query on inherited table") {
def checkTableExists(sparkSession: SparkSession): Unit = {
QueryTest.checkAnswer(sparkSession.sql(
"""
|SELECT x.str, COUNT(*)
|FROM df x JOIN df y ON x.str = y.str
|GROUP BY x.str
""".stripMargin),
Row("1", 1) :: Row("2", 1) :: Row("3", 1) :: Nil)
}
val spark = activeSession
// Cannot use `import activeSession.implicits._` due to the compiler limitation.
import spark.implicits._
try {
activeSession
.createDataset[(Int, String)](Seq(1, 2, 3).map(i => (i, i.toString)))
.toDF("int", "str")
.createOrReplaceTempView("df")
checkTableExists(activeSession)
val forkedSession = activeSession.cloneSession()
assert(forkedSession ne activeSession)
assert(forkedSession.sessionState ne activeSession.sessionState)
checkTableExists(forkedSession)
checkTableExists(activeSession.cloneSession()) // ability to clone multiple times
checkTableExists(forkedSession.cloneSession()) // clone of clone
} finally {
activeSession.sql("drop table df")
}
}
test("fork new session and inherit reference to SharedState") {
val forkedSession = activeSession.cloneSession()
assert(activeSession.sharedState eq forkedSession.sharedState)
}
test("SPARK-27253: forked new session should not discard SQLConf overrides") {
val key = "default-config"
try {
// override default config
activeSession.conf.set(key, "active")
val forkedSession = activeSession.cloneSession()
assert(forkedSession ne activeSession)
assert(forkedSession.conf ne activeSession.conf)
// forked new session should not discard SQLConf overrides
assert(forkedSession.conf.get(key) == "active")
} finally {
activeSession.conf.unset(key)
}
}
}
|
maropu/spark
|
sql/core/src/test/scala/org/apache/spark/sql/SessionStateSuite.scala
|
Scala
|
apache-2.0
| 9,595
|
/*
* Copyright © 2014 TU Berlin (emma@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package compiler
import spark.SparkSpecializeSupport
trait SparkCompiler extends Compiler with SparkSpecializeSupport {
override lazy val implicitTypes: Set[u.Type] = API.implicitTypes ++ SparkAPI.implicitTypes
trait NtvAPI extends ModuleAPI {
//@formatter:off
val sym = api.Sym[org.emmalanguage.api.spark.SparkNtv.type].asModule
val select = op("select")
val project = op("project")
val equiJoin = op("equiJoin")
override lazy val ops = Set(select, project, equiJoin)
//@formatter:on
}
trait SparkExpAPI extends ModuleAPI {
//@formatter:off
val Column = api.Type[org.apache.spark.sql.Column]
val sym = api.Sym[org.emmalanguage.api.spark.SparkExp.type].asModule
// projections
val rootProj = op("rootProj")
val nestProj = op("nestProj")
val rootStruct = op("rootStruct")
val nestStruct = op("nestStruct")
// comparisons
val eq = op("eq", List(2, 1))
val ne = op("ne", List(2, 1))
val gt = op("gt")
val lt = op("lt")
val geq = op("geq")
val leq = op("leq")
// boolean
val not = op("not")
val or = op("or")
val and = op("and")
// arithmetic
val plus = op("plus")
val minus = op("minus")
val multiply = op("multiply")
val divide = op("divide")
val mod = op("mod")
// string
val startsWith = op("startsWith")
val projections = Set(rootProj, nestProj, rootStruct, nestStruct)
val comparisons = Set(eq, ne, lt, gt, leq, geq)
val boolean = Set(not, and, or)
val arithmetic = Set(plus, minus, multiply, divide, mod)
val string = Set(startsWith)
val ops = projections ++ comparisons ++ boolean ++ arithmetic ++ string
//@formatter:on
}
trait SparkAPILike extends BackendAPI {
lazy val Encoder = api.Type[org.apache.spark.sql.Encoder[Any]].typeConstructor
lazy val SparkSession = api.Type[org.apache.spark.sql.SparkSession]
lazy val implicitTypes = Set(Encoder, SparkSession)
lazy val MutableBag = new MutableBagAPI(api.Sym[org.emmalanguage.api.SparkMutableBag[Any, Any]].asClass)
lazy val MutableBag$ = new MutableBag$API(api.Sym[org.emmalanguage.api.SparkMutableBag.type].asModule)
lazy val Ops = new OpsAPI(api.Sym[org.emmalanguage.api.spark.SparkOps.type].asModule)
lazy val Ntv = new NtvAPI {}
lazy val Exp = new SparkExpAPI {}
}
object SparkAPI extends SparkAPILike {
lazy val DataBag = new DataBagAPI(api.Sym[org.emmalanguage.api.SparkRDD[Any]].asClass)
lazy val DataBag$ = new DataBag$API(api.Sym[org.emmalanguage.api.SparkRDD.type].asModule)
}
object SparkAPI2 extends SparkAPILike {
lazy val DataBag = new DataBagAPI(api.Sym[org.emmalanguage.api.SparkDataset[Any]].asClass)
lazy val DataBag$ = new DataBag$API(api.Sym[org.emmalanguage.api.SparkDataset.type].asModule)
}
}
|
aalexandrov/emma
|
emma-spark/src/main/scala/org/emmalanguage/compiler/SparkCompiler.scala
|
Scala
|
apache-2.0
| 3,664
|
package es.weso.rbe.interval
import es.weso.rbe._
import es.weso.collection._
import IntOrUnbounded._
import es.weso.rbe.deriv._
case class IntervalChecker[A](rbe: Rbe[A]) extends BagChecker[A] {
type Matched[B] = Either[String,B]
def isOk[B](m: Matched[B]): Boolean = m.isRight
lazy val derivChecker = DerivChecker(rbe)
def check(bag:Bag[A], open: Boolean):
Matched[Bag[A]] = {
if (rbe.containsRepeats) {
derivChecker.check(bag,open)
} else {
if (!open && extraSymbols(bag).isEmpty == false)
Left(s"$rbe doesn't match bag $bag. Open: $open, Extra symbols: ${extraSymbols(bag)}")
else
if (IntervalChecker.interval(rbe,bag).contains(1))
Right(bag)
else
// Question: Check using derivatives to obtain better error message
// TODO: Could it be optimized knowing that it will fail?
derivChecker.check(bag,open)
}
}
private def extraSymbols(bag: Bag[A]): Seq[A] = {
bag.elems.map(_._1).filter(!rbe.symbols.contains(_)).toSeq
}
}
object IntervalChecker {
def interval[A](rbe: Rbe[A], bag: Bag[A]): Interval = {
rbe match {
case Fail(_) => Interval(1,0)
case Empty => Interval(0,Unbounded)
case Symbol(a,n,m) => {
val wa = bag.multiplicity(a)
Interval(divIntLimitUp(wa, m),divIntLimitDown(wa,n))
}
case And(v1,v2) => interval(v1,bag) & interval(v2,bag)
case Or(v1,v2) => interval(v1,bag) + interval(v2,bag)
case Star(v) => {
if (rbe.noSymbolsInBag(bag)) Interval(0,Unbounded)
else {
val ie = interval(v,bag)
if (ie.isEmpty) ie
else Interval(1,Unbounded)
}
}
case Plus(v) => {
if (rbe.noSymbolsInBag(bag)) Interval(0,0)
else {
val ie = interval(v,bag)
if (ie.isEmpty) ie
else Interval(1,ie.m)
}
}
// Adding Repetitions on expressions breaks the single-occurrence bag expression
// This case is handled by detecting repetitions and invoking the derivatives algorithm
case Repeat(v,n,m) =>
throw new Exception("Intervals algorithm doesn't work with repetitions. RBE expr: " + this)
}
}
}
|
labra/rbe
|
src/main/scala/es/weso/rbe/interval/IntervalChecker.scala
|
Scala
|
mit
| 2,313
|
package beam.integration
import beam.utils.TestConfigUtils.testConfig
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
trait IntegrationSpecCommon {
private val LAST_ITER_CONF_PATH = "matsim.modules.controler.lastIteration"
protected var totalIterations: Int = 1
val configFileName = "test/input/beamville/beam.conf"
val configLocation = ConfigFactory.parseString("config=" + configFileName)
lazy val baseConfig: Config = testConfig(configFileName)
.resolve()
.withValue("beam.outputs.events.fileOutputFormats", ConfigValueFactory.fromAnyRef("xml"))
.withValue(LAST_ITER_CONF_PATH, ConfigValueFactory.fromAnyRef(totalIterations - 1))
.withFallback(configLocation)
.resolve
def isOrdered[A](s: Seq[A])(cf: (A, A) => Boolean): Boolean = {
val z1 = s.drop(1)
val z2 = s.dropRight(1)
val zip = z2 zip z1
zip.forall { case (a, b) => cf(a, b) }
}
}
|
colinsheppard/beam
|
src/test/scala/beam/integration/IntegrationSpecCommon.scala
|
Scala
|
gpl-3.0
| 924
|
package common.implicits
import scala.concurrent.{ExecutionContext, Future}
object RichFuture {
/**
* Adds various extension methods to the Future[Option] class.
*
* @param futureOfOption a Future[Option] to be extended
* @tparam A a type that was lifted to an inner Option of a Future[Option]
*/
implicit class RichFutureOfOption[A](futureOfOption: Future[Option[A]]) {
/**
* Converts this Future[Option] to a Future.failed with the given error, if the inner Option of this Future is
* not defined (None).
*
* @param error an error to be wrapped into a Future.failed
* @param ec an execution context for the Future
* @return a Future.failed with a given error.
*/
def failOnNone(error: Throwable)(implicit ec: ExecutionContext): Future[A] = futureOfOption.flatMap {
case Some(value) => Future.successful(value)
case _ => Future.failed(error)
}
}
}
|
sysgears/apollo-universal-starter-kit
|
modules/core/server-scala/src/main/scala/common/implicits/RichFuture.scala
|
Scala
|
mit
| 949
|
package org.genericConfig.admin.shared.configTree
/**
* Copyright (C) 2016 Gennadi Heimann genaheimann@gmail.com
*
* Created by Gennadi Heimann 02.06.2020
*/
class ConfigTreeItemUserPropDTO {
}
|
gennadij/admin
|
shared/src/main/scala/org/genericConfig/admin/shared/configTree/ConfigTreeItemUserPropDTO.scala
|
Scala
|
apache-2.0
| 200
|
package mesosphere.marathon.plugin.auth
import mesosphere.marathon.plugin.http.HttpResponse
import mesosphere.marathon.plugin.plugin.Plugin
/**
* Base trait for all authorizer implementations.
* An authorizer is able to authorize an action on a resource based on an identity:
*
* [[Identity]] is a custom implementation that represents a person or system that has access to Marathon.
* [[AuthorizedAction]] is the action
*/
trait Authorizer extends Plugin {
/**
* Decide whether it is allowed for the given principal to perform the given action on the given resource.
* @param principal the identity that tries to access the resource
* @param action the action that the user tries to perform.
* @param resource the resource the user wants to access.
* @tparam Resource the type of the resource for action and resource.
* @return true if the user is authorized to access the resource to do the defined action.
*/
def isAuthorized[Resource](principal: Identity, action: AuthorizedAction[Resource], resource: Resource): Boolean
/**
* This method is called in the case that the identity is not authorized to access the resource.
* The main purpose of this implementation is to customize the http response (e.g. response code, redirect etc.)
* @param principal the identity that has tried to access a resource with a given action.
* @param response the response to customize.
*/
def handleNotAuthorized(principal: Identity, response: HttpResponse)
}
|
ss75710541/marathon
|
plugin-interface/src/main/scala/mesosphere/marathon/plugin/auth/Authorizer.scala
|
Scala
|
apache-2.0
| 1,518
|
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops.rnn.cell
import org.platanios.tensorflow.api.core.Shape
import org.platanios.tensorflow.api.core.types.{IsNotQuantized, TF}
import org.platanios.tensorflow.api.implicits.Implicits._
import org.platanios.tensorflow.api.implicits.helpers.OutputToShape
import org.platanios.tensorflow.api.ops.{NN, Op, Output}
import org.platanios.tensorflow.api.ops.basic.Basic
import org.platanios.tensorflow.api.ops.math.Math
/** A basic Long-Short Term Memory (LSTM) cell.
*
* The implementation is based on: ["Recurrent Neural Network Regularization", Zaremba et al](http://arxiv.org/abs/1409.2329).
*
* We add `forgetBias` (which defaults to 1) to the biases of the forget gate in order to reduce the scale of
* forgetting in the beginning of training.
*
* This cell does not allow for cell clipping, a projection layer, or for peep-hole connections. For advanced
* models, please use the full `lstmCell` op.
*
* Input tensors must be two-dimensional.
*
* @group RNNCellOps
* @param kernel Kernel matrix to use.
* @param bias Bias vector to use.
* @param activation Activation function to use.
* @param forgetBias Forget bias added to the forget gate.
* @param name Name scope for the created ops.
*
* @author Emmanouil Antonios Platanios
*/
class BasicLSTMCell[T: TF : IsNotQuantized] protected (
val kernel: Output[T],
val bias: Output[T],
val activation: Output[T] => Output[T],
val forgetBias: Float = 1.0f,
val name: String = "BasicLSTMCell"
) extends RNNCell[Output[T], LSTMState[T], Shape, (Shape, Shape)] {
private val numUnits = bias.shape(0) / 4
override def outputShape: Shape = {
Shape(numUnits)
}
override def stateShape: (Shape, Shape) = {
(Shape(numUnits), Shape(numUnits))
}
@throws[IllegalArgumentException]
override def forward(input: Tuple[Output[T], LSTMState[T]]): Tuple[Output[T], LSTMState[T]] = {
Op.nameScope(name) {
val output = input.output
if (output.rank != 2)
throw new IllegalArgumentException(s"Input must be rank-2 (provided rank-${output.rank}).")
if (output.shape(1) == -1)
throw new IllegalArgumentException(s"Last axis of input shape (${output.shape}) must be known.")
val one = Basic.constant(1)
// Parameters of gates are concatenated into one multiply for efficiency.
val lstmMatrix = NN.addBias(Math.matmul(Basic.concatenate(Seq(output, input.state.m), axis = 1), kernel), bias)
// i = input gate, j = new input, f = forget gate, o = output gate
val lstmMatrixBlocks = Basic.splitEvenly(lstmMatrix, 4, axis = one)
val (i, j, f, o) = (lstmMatrixBlocks(0), lstmMatrixBlocks(1), lstmMatrixBlocks(2), lstmMatrixBlocks(3))
val forgetBiasTensor = Basic.constant(forgetBias).castTo[T]
val c = Math.add(
Math.multiply(input.state.c, Math.sigmoid(f + forgetBiasTensor)),
Math.multiply(Math.sigmoid(i), activation(j)))
val m = Math.multiply(activation(c), Math.sigmoid(o))
LSTMTuple(m, LSTMState(c, m))
}
}
}
object BasicLSTMCell {
def apply[T: TF : IsNotQuantized](
kernel: Output[T],
bias: Output[T],
activation: Output[T] => Output[T],
forgetBias: Float = 1.0f,
name: String = "BasicLSTMCell"
): BasicLSTMCell[T] = {
new BasicLSTMCell(kernel, bias, activation, forgetBias, name)
}
}
|
eaplatanios/tensorflow_scala
|
modules/api/src/main/scala/org/platanios/tensorflow/api/ops/rnn/cell/BasicLSTMCell.scala
|
Scala
|
apache-2.0
| 4,049
|
package io.peregrine
import com.twitter.finagle.http.Cookie
import org.jboss.netty.handler.codec.http.DefaultCookie
class CookieBuilder {
private var secure : Option[Boolean] = None
private var httpOnly: Option[Boolean] = None
private var name : Option[String] = None
private var value : Option[String] = None
private var path : Option[String] = None
def secure(secure: Boolean): CookieBuilder = {
this.secure = Option(secure)
this
}
def httpOnly(httpOnly: Boolean): CookieBuilder = {
this.httpOnly = Option(httpOnly)
this
}
def name(cookieName: String): CookieBuilder = {
this.name = Option(cookieName)
this
}
def value(value: String): CookieBuilder = {
this.value = Option(value)
this
}
def path(path: String): CookieBuilder = {
this.path = Option(path)
this
}
def build(): Cookie = {
if (name.isEmpty) throw new Exception("name cannot be empty")
val cookie = new DefaultCookie(name.getOrElse(""), value.getOrElse(""))
cookie.setHttpOnly(httpOnly.getOrElse(false))
cookie.setSecure(secure.getOrElse(false))
cookie.setPath(path.getOrElse("/"))
new Cookie(cookie)
}
}
|
dvarelap/stilt
|
src/main/scala/io/peregrine/CookieBuilder.scala
|
Scala
|
apache-2.0
| 1,187
|
package org.trustedanalytics.sparktk.frame.internal.rdd
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import scala.reflect.ClassTag
//implicit conversion for PairRDD
import org.apache.spark.SparkContext._
/**
*
* This is a wrapper to encapsulate methods that may need to be serialized to executed on Spark worker nodes.
* If you don't know what this means please read about Closure Mishap
* [[http://ampcamp.berkeley.edu/wp-content/uploads/2012/06/matei-zaharia-part-1-amp-camp-2012-spark-intro.pdf]]
* and Task Serialization
* [[http://stackoverflow.com/questions/22592811/scala-spark-task-not-serializable-java-io-notserializableexceptionon-when]]
*/
object MiscFrameFunctions extends Serializable {
/**
* take an input RDD and return another RDD which contains the subset of the original contents
* @param rdd input RDD
* @param offset rows to be skipped before including rows in the new RDD
* @param count total rows to be included in the new RDD
* @param limit limit on number of rows to be included in the new RDD
*/
def getPagedRdd[T: ClassTag](rdd: RDD[T], offset: Long, count: Long, limit: Int): RDD[T] = {
val sumsAndCounts = MiscFrameFunctions.getPerPartitionCountAndAccumulatedSum(rdd)
val capped = limit match {
case -1 => count
case _ => Math.min(count, limit)
}
//Start getting rows. We use the sums and counts to figure out which
//partitions we need to read from and which to just ignore
val pagedRdd: RDD[T] = rdd.mapPartitionsWithIndex((i, rows) => {
val (ct: Long, sum: Long) = sumsAndCounts(i)
val thisPartStart = sum - ct
if (sum < offset || thisPartStart >= offset + capped) {
//println("skipping partition " + i)
Iterator.empty
}
else {
val start = Math.max(offset - thisPartStart, 0)
val numToTake = Math.min((capped + offset) - thisPartStart, ct) - start
//println(s"partition $i: starting at $start and taking $numToTake")
rows.slice(start.toInt, start.toInt + numToTake.toInt)
}
})
pagedRdd
}
/**
* take input RDD and return the subset of the original content
* @param rdd input RDD
* @param offset rows to be skipped before including rows in the result
* @param count total rows to be included in the result
* @param limit limit on number of rows to be included in the result
*/
def getRows[T: ClassTag](rdd: RDD[T], offset: Long, count: Int, limit: Int): Seq[T] = {
val pagedRdd = getPagedRdd(rdd, offset, count, limit)
val rows: Seq[T] = pagedRdd.collect()
rows
}
/**
* Return the count and accumulated sum of rows in each partition
*/
def getPerPartitionCountAndAccumulatedSum[T](rdd: RDD[T]): Map[Int, (Long, Long)] = {
//Count the rows in each partition, then order the counts by partition number
val counts = rdd.mapPartitionsWithIndex(
(i: Int, rows: Iterator[T]) => Iterator.single((i, rows.size.toLong)))
.collect()
.sortBy(_._1)
//Create cumulative sums of row counts by partition, e.g. 1 -> 200, 2-> 400, 3-> 412
//if there were 412 rows divided into two 200 row partitions and one 12 row partition
val sums = counts.scanLeft((0L, 0L)) {
(t1, t2) => (t2._1, t1._2 + t2._2)
}
.drop(1) //first one is (0,0), drop that
.toMap
//Put the per-partition counts and cumulative counts together
val sumsAndCounts = counts.map {
case (part, count) => (part, (count, sums(part)))
}.toMap
sumsAndCounts
}
/**
* Remove duplicate rows identified by the key
* @param pairRdd rdd which has (key, value) structure in each row
*/
def removeDuplicatesByKey(pairRdd: RDD[(List[Any], Row)]): RDD[Row] = {
pairRdd.reduceByKey((x, y) => x).map(x => x._2)
}
}
|
shibanis1/spark-tk
|
core/src/main/scala/org/trustedanalytics/sparktk/frame/internal/rdd/MiscFrameFunctions.scala
|
Scala
|
apache-2.0
| 3,815
|
/*
* Distributed as part of Scalala, a linear algebra library.
*
* Copyright (C) 2008- Daniel Ramage
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 USA
*/
package scalala;
package tensor;
import domain.{IterableDomain,Domain1};
/**
* Implementation trait for a Tensor1 view of a slice of keys from a Tensor.
*
* @author dramage
*/
trait Tensor1SliceLike
[@specialized(Int,Long) K1, +D1<:IterableDomain[K1],
@specialized(Int,Long) K2, +D2<:Domain1[K2],
@specialized(Int,Long,Float,Double,Boolean) V, +Coll<:Tensor[K1,V],
+This<:Tensor1Slice[K1,K2,V,Coll]]
extends TensorSliceLike[K1, D1, K2, D2, V, Coll, This]
with Tensor1Like[K2, V, D2, This];
/**
* A Tensor1 view of a slice of keys from a Tensor.
*
* @author dramage
*/
trait Tensor1Slice
[@specialized(Int,Long) K1, @specialized(Int,Long) K2,
@specialized(Int,Long,Float,Double,Boolean) V, +Coll<:Tensor[K1,V]]
extends TensorSlice[K1,K2,V,Coll] with Tensor1[K2,V]
with Tensor1SliceLike[K1, IterableDomain[K1], K2, Domain1[K2], V, Coll, Tensor1Slice[K1, K2, V, Coll]];
|
scalala/Scalala
|
src/main/scala/scalala/tensor/Tensor1Slice.scala
|
Scala
|
lgpl-2.1
| 1,728
|
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package types
import com.intellij.lang.ASTNode
import com.intellij.psi._
import com.intellij.psi.scope.PsiScopeProcessor
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.base.ScConstructor
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeParametersOwner
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticClass
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, Success, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, ModCount}
import scala.annotation.tailrec
/**
* @author Alexander Podkhalyuzin, ilyas
*/
class ScParameterizedTypeElementImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScParameterizedTypeElement {
override def toString: String = "ParametrizedTypeElement: " + getText
def typeArgList = findChildByClass(classOf[ScTypeArgs])
def typeElement = findChildByClass(classOf[ScTypeElement])
def findConstructor = {
getContext match {
case constr: ScConstructor => Some(constr)
case _ => None
}
}
private var desugarizedTypeModCount: Long = 0L
private var desugarizedType: Option[ScTypeElement] = null
//computes desugarized type either for existential type or one of kind projector types
@Cached(synchronized = true, ModCount.getModificationCount, getManager)
def computeDesugarizedType: Option[ScTypeElement] = {
val inlineSyntaxIds = Set("?", "+?", "-?")
def kindProjectorFunctionSyntax(elem: ScTypeElement): Option[ScTypeElement] = {
def convertParameterized(param: ScParameterizedTypeElement): String = {
param.typeElement.getText match {
case v@("+" | "-") => //λ[(-[A], +[B]) => Function2[A, Int, B]]
param.typeArgList.typeArgs match {
case Seq(simple) => v ++ simple.getText
case _ => "" //should have only one type arg
}
case _ => param.getText //it's a higher kind type
}
}
def convertSimpleType(simple: ScSimpleTypeElement) = simple.getText.replaceAll("`", "")
elem match {
case fun: ScFunctionalTypeElement =>
fun.returnTypeElement match {
case Some(ret) =>
val typeName = "Λ$"
val paramText = fun.paramTypeElement match {
case tuple: ScTupleTypeElement =>
val paramList = tuple.components.map {
case parameterized: ScParameterizedTypeElement => convertParameterized(parameterized)
case simple: ScSimpleTypeElement => convertSimpleType(simple)
case _ => return None //something went terribly wrong
}
paramList.mkString(sep = ", ")
case simple: ScSimpleTypeElement => simple.getText.replaceAll("`", "")
case parameterized: ScParameterizedTypeElement => convertParameterized(parameterized)
case _ => return None
}
val lambdaText = s"({type $typeName[$paramText] = ${ret.getText}})#$typeName"
val newTE = ScalaPsiElementFactory.createTypeElementFromText(lambdaText, getContext, this)
Option(newTE)
case _ => None
}
case _ => None
}
}
def kindProjectorInlineSyntax(e: PsiElement): Option[ScTypeElement] = {
def generateName(i: Int): String = { //kind projector generates names the same way
val res = ('α' + (i % 25)).toChar.toString
if (i < 25) res
else res + (i / 25)
}
val (paramOpt: Seq[Option[String]], body: Seq[String]) = typeArgList.typeArgs.zipWithIndex.map {
case (simple: ScSimpleTypeElement, i) if inlineSyntaxIds.contains(simple.getText) =>
val name = generateName(i)
(Some(simple.getText.replace("?", name)), name)
case (param: ScParameterizedTypeElement, i) if inlineSyntaxIds.contains(param.typeElement.getText) =>
val name = generateName(i)
(Some(param.getText.replace("?", name)), name)
case (a, _) => (None, a.getText)
}.unzip
val paramText = paramOpt.flatten.mkString(start = "[", sep = ", ", end = "]")
val bodyText = body.mkString(start = "[", sep = ", ", end = "]")
val typeName = "Λ$"
val inlineText = s"({type $typeName$paramText = ${typeElement.getText}$bodyText})#$typeName"
val newTE = ScalaPsiElementFactory.createTypeElementFromText(inlineText, getContext, this)
Option(newTE)
}
def existentialType: Option[ScTypeElement] = {
val forSomeBuilder = new StringBuilder
var count = 1
forSomeBuilder.append(" forSome {")
val typeElements = typeArgList.typeArgs.map {
case w: ScWildcardTypeElement =>
forSomeBuilder.append("type _" + "$" + count +
w.lowerTypeElement.fold("")(te => s" >: ${te.getText}") +
w.upperTypeElement.fold("")(te => s" <: ${te.getText}"))
forSomeBuilder.append("; ")
val res = s"_$$$count"
count += 1
res
case t => t.getText
}
forSomeBuilder.delete(forSomeBuilder.length - 2, forSomeBuilder.length)
forSomeBuilder.append("}")
val newTypeText = s"(${typeElement.getText}${typeElements.mkString("[", ", ", "]")} ${forSomeBuilder.toString()})"
val newTypeElement = ScalaPsiElementFactory.createTypeElementFromText(newTypeText, getContext, this)
Option(newTypeElement)
}
val kindProjectorEnabled = ScalaPsiUtil.kindProjectorPluginEnabled(this)
def isKindProjectorFunctionSyntax(element: PsiElement): Boolean = {
typeElement.getText match {
case "Lambda" | "λ" if kindProjectorEnabled => true
case _ => false
}
}
@tailrec
def isKindProjectorInlineSyntax(element: PsiElement): Boolean = {
element match {
case simple: ScSimpleTypeElement if kindProjectorEnabled && inlineSyntaxIds.contains(simple.getText) => true
case parametrized: ScParameterizedTypeElement if kindProjectorEnabled =>
isKindProjectorInlineSyntax(parametrized.typeElement)
case _ => false
}
}
typeArgList.typeArgs.find {
case e: ScFunctionalTypeElement if isKindProjectorFunctionSyntax(e) => true
case e if isKindProjectorInlineSyntax(e) => true
case e: ScWildcardTypeElementImpl => true
case _ => false
} match {
case Some(fun) if isKindProjectorFunctionSyntax(fun) => kindProjectorFunctionSyntax(fun)
case Some(e) if isKindProjectorInlineSyntax(e) => kindProjectorInlineSyntax(e)
case Some(_) => existentialType
case _ => None
}
}
protected def innerType(ctx: TypingContext): TypeResult[ScType] = {
computeDesugarizedType match {
case Some(typeElement) =>
return typeElement.getType(TypingContext.empty)
case _ =>
}
val tr = typeElement.getType(ctx)
val res = tr.getOrElse(return tr)
//todo: possible refactoring to remove parameterized type inference in simple type
typeElement match {
case s: ScSimpleTypeElement =>
s.reference match {
case Some(ref) =>
if (ref.isConstructorReference) {
ref.resolveNoConstructor match {
case Array(ScalaResolveResult(to: ScTypeParametersOwner, subst: ScSubstitutor))
if to.isInstanceOf[PsiNamedElement] =>
return tr //all things were done in ScSimpleTypeElementImpl.innerType
case Array(ScalaResolveResult(to: PsiTypeParameterListOwner, subst: ScSubstitutor))
if to.isInstanceOf[PsiNamedElement] =>
return tr //all things were done in ScSimpleTypeElementImpl.innerType
case _ =>
}
}
ref.bind() match {
case Some(ScalaResolveResult(e: PsiMethod, _)) =>
return tr //all things were done in ScSimpleTypeElementImpl.innerType
case _ =>
}
case _ =>
}
case _ =>
}
val args: scala.Seq[ScTypeElement] = typeArgList.typeArgs
if (args.isEmpty) return tr
val argTypesWrapped = args.map {_.getType(ctx)}
val argTypesgetOrElseped = argTypesWrapped.map {_.getOrAny}
def fails(t: ScType) = (for (f@Failure(_, _) <- argTypesWrapped) yield f).foldLeft(Success(t, Some(this)))(_.apply(_))
//Find cyclic type references
argTypesWrapped.find(_.isCyclic) match {
case Some(_) => fails(ScParameterizedType(res, Seq(argTypesgetOrElseped.toSeq: _*)))
case None =>
val typeArgs = args.map(_.getType(ctx))
val result = ScParameterizedType(res, typeArgs.map(_.getOrAny))
(for (f@Failure(_, _) <- typeArgs) yield f).foldLeft(Success(result, Some(this)))(_.apply(_))
}
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitParameterizedTypeElement(this)
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitParameterizedTypeElement(this)
case _ => super.accept(visitor)
}
}
override def processDeclarations(processor: PsiScopeProcessor,
state: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
if (ScalaPsiUtil.kindProjectorPluginEnabled(this)) {
computeDesugarizedType match {
case Some(projection: ScTypeProjection) =>
projection.typeElement match {
case paren: ScParenthesisedTypeElement => paren.typeElement match {
case Some(compound: ScCompoundTypeElement) =>
compound.refinement match {
case Some(ref) => ref.types match {
case Seq(alias: ScTypeAliasDefinition) =>
for (tp <- alias.typeParameters) {
val text = tp.getText
val lowerBound = text.indexOf(">:")
val upperBound = text.indexOf("<:")
//we have to call processor execute so both `+A` and A resolve: Lambda[`+A` => (A, A)]
processor.execute(tp, state)
processor.execute(new ScSyntheticClass(getManager, s"`$text`", Any), state)
if (lowerBound < 0 && upperBound > 0) {
processor.execute(new ScSyntheticClass(getManager, text.substring(0, upperBound), Any), state)
} else if (upperBound < 0 && lowerBound > 0) {
processor.execute(new ScSyntheticClass(getManager, text.substring(0, lowerBound), Any), state)
} else if (upperBound > 0 && lowerBound > 0) {
val actualText = text.substring(0, math.min(lowerBound, upperBound))
processor.execute(new ScSyntheticClass(getManager, actualText, Any), state)
}
}
case _ =>
}
case _ =>
}
case _ =>
}
case _ =>
}
val manager = getManager
processor.execute(new ScSyntheticClass(manager, "+", Any), state)
processor.execute(new ScSyntheticClass(manager, "-", Any), state)
case _ =>
}
}
super.processDeclarations(processor, state, lastParent, place)
}
}
|
LPTK/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScParameterizedTypeElementImpl.scala
|
Scala
|
apache-2.0
| 11,912
|
/*
* Copyright (c) 2015 Robert Conrad - All Rights Reserved.
* Unauthorized copying of this file, via any medium is strictly prohibited.
* This file is proprietary and confidential.
* Last modified by rconrad, 1/3/15 4:19 PM
*/
package base.entity.kv.impl
import base.entity.kv.{ Key, KvTest }
/**
* {{ Describe the high level purpose of FactoryImplTest here. }}
* {{ Include relevant details here. }}
* {{ Do not skip writing good doc! }}
* @author rconrad
*/
abstract class KeyFactoryImplTest[T <: Key] extends KvTest {
def factory: KeyFactoryImpl
def create(): Iterable[Key]
test("del") {
val keys = create()
assert(keys.size > 1)
keys.foreach(k => assert(k.exists().await()))
assert(factory.del(keys).await() == keys.size)
keys.foreach(k => assert(!k.exists().await()))
}
}
|
robconrad/base-api
|
project-entity/src/test/scala/base/entity/kv/impl/KeyFactoryImplTest.scala
|
Scala
|
mit
| 823
|
package org.apache.spot.dns.model
import org.apache.log4j.Logger
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spot.SuspiciousConnectsArgumentParser.SuspiciousConnectsConfig
import org.apache.spot.dns.DNSSchema._
import org.apache.spot.dns.DNSWordCreation
import org.apache.spot.lda.SpotLDAWrapper
import org.apache.spot.lda.SpotLDAWrapper.{SpotLDAInput, SpotLDAOutput}
import org.apache.spot.utilities.DomainProcessor.DomainInfo
import org.apache.spot.utilities.data.validation.InvalidDataHandler
import org.apache.spot.utilities.{CountryCodes, DomainProcessor, Quantiles, TopDomains}
import scala.util.{Failure, Success, Try}
/**
* A probabilistic model of the DNS queries issued by each client IP.
*
* The model uses a topic-modelling approach that:
* 1. Simplifies DNS log entries into words.
* 2. Treats the DNS queries of each client into a collection of words.
* 3. Decomposes common query behaviors using a collection of "topics" that represent common profiles
* of query behavior. These "topics" are probability distributions on words.
* 4. Each client IP has a mix of topics corresponding to its behavior.
* 5. Query probability at IP is estimated by simplifying query into word, and then
* combining the word probabilities per topic using the topic mix of the particular IP.
*
* Create these models using the factory in the companion object.
*
* @param inTopicCount Number of topics to use in the topic model.
* @param inIpToTopicMix Per-IP topic mix.
* @param inWordToPerTopicProb Per-word, an array of probability of word given topic per topic.
* @param inTimeCuts Quantile cut-offs for discretizing the time of day in word construction.
* @param inFrameLengthCuts Quantile cut-offs for discretizing the frame length in word construction.
* @param inSubdomainLengthCuts Quantile cut-offs for discretizing subdomain length in word construction.
* @param inNumberPeriodsCuts Quantile cut-offs for discretizing domain number-of-periods count in word construction.
* @param inEntropyCuts Quantile cut-offs for discretizing the subdomain entropy in word construction.
*/
class DNSSuspiciousConnectsModel(inTopicCount: Int,
inIpToTopicMix: Map[String, Array[Double]],
inWordToPerTopicProb: Map[String, Array[Double]],
inTimeCuts: Array[Double],
inFrameLengthCuts: Array[Double],
inSubdomainLengthCuts: Array[Double],
inNumberPeriodsCuts: Array[Double],
inEntropyCuts: Array[Double]) {
val topicCount = inTopicCount
val ipToTopicMix = inIpToTopicMix
val wordToPerTopicProb = inWordToPerTopicProb
val timeCuts = inTimeCuts
val frameLengthCuts = inFrameLengthCuts
val subdomainLengthCuts = inSubdomainLengthCuts
val numberPeriodsCuts = inNumberPeriodsCuts
val entropyCuts = inEntropyCuts
/**
* Use a suspicious connects model to assign estimated probabilities to a dataframe of
* DNS log events.
*
* @param sc Spark Context
* @param sqlContext Spark SQL context
* @param inDF Dataframe of DNS log events, containing at least the columns of [[DNSSuspiciousConnectsModel.ModelSchema]]
* @param userDomain Domain associated to network data (ex: 'intel')
* @return Dataframe with a column named [[org.apache.spot.dns.DNSSchema.Score]] that contains the
* probability estimated for the network event at that row
*/
def score(sc: SparkContext, sqlContext: SQLContext, inDF: DataFrame, userDomain: String): DataFrame = {
val topDomainsBC = sc.broadcast(TopDomains.TopDomains)
val ipToTopicMixBC = sc.broadcast(ipToTopicMix)
val wordToPerTopicProbBC = sc.broadcast(wordToPerTopicProb)
val scoreFunction =
new DNSScoreFunction(frameLengthCuts,
timeCuts,
subdomainLengthCuts,
entropyCuts,
numberPeriodsCuts,
topicCount,
ipToTopicMixBC,
wordToPerTopicProbBC,
topDomainsBC,
userDomain)
val scoringUDF = udf((timeStamp: String,
unixTimeStamp: Long,
frameLength: Int,
clientIP: String,
queryName: String,
queryClass: String,
queryType: Int,
queryResponseCode: Int) =>
scoreFunction.score(timeStamp,
unixTimeStamp,
frameLength,
clientIP,
queryName,
queryClass,
queryType,
queryResponseCode))
inDF.withColumn(Score, scoringUDF(DNSSuspiciousConnectsModel.modelColumns: _*))
}
}
/**
* Contains dataframe schema information as well as the train-from-dataframe routine
* (which is a kind of factory routine) for [[DNSSuspiciousConnectsModel]] instances.
*
*/
object DNSSuspiciousConnectsModel {
val ModelSchema = StructType(List(TimestampField,
UnixTimestampField,
FrameLengthField,
ClientIPField,
QueryNameField,
QueryClassField,
QueryTypeField,
QueryResponseCodeField))
val modelColumns = ModelSchema.fieldNames.toList.map(col)
val DomainStatsSchema = StructType(List(TopDomainField, SubdomainLengthField, SubdomainEntropyField, NumPeriodsField))
/**
* Create a new DNS Suspicious Connects model by training it on a data frame and a feedback file.
*
* @param sparkContext
* @param sqlContext
* @param logger
* @param config Analysis configuration object containing CLI parameters.
* Contains the path to the feedback file in config.scoresFile
* @param inputRecords Data used to train the model.
* @param topicCount Number of topics (traffic profiles) used to build the model.
* @return A new [[DNSSuspiciousConnectsModel]] instance trained on the dataframe and feedback file.
*/
def trainNewModel(sparkContext: SparkContext,
sqlContext: SQLContext,
logger: Logger,
config: SuspiciousConnectsConfig,
inputRecords: DataFrame,
topicCount: Int): DNSSuspiciousConnectsModel = {
logger.info("Training DNS suspicious connects model from " + config.inputPath)
val selectedRecords = inputRecords.select(modelColumns: _*)
val totalRecords = selectedRecords.unionAll(DNSFeedback.loadFeedbackDF(sparkContext,
sqlContext,
config.feedbackFile,
config.duplicationFactor))
val countryCodesBC = sparkContext.broadcast(CountryCodes.CountryCodes)
val topDomainsBC = sparkContext.broadcast(TopDomains.TopDomains)
val userDomain = config.userDomain
// create quantile cut-offs
val timeCuts =
Quantiles.computeDeciles(totalRecords
.select(UnixTimestamp)
.rdd
.flatMap({ case Row(unixTimeStamp: Long) =>
Try {
unixTimeStamp.toDouble
} match {
case Failure(_) => Seq()
case Success(timestamp) => Seq(timestamp)
}
}))
val frameLengthCuts =
Quantiles.computeDeciles(totalRecords
.select(FrameLength)
.rdd
.flatMap({ case Row(frameLen: Int) =>
Try {
frameLen.toDouble
} match {
case Failure(_) => Seq()
case Success(frameLength) => Seq(frameLength)
}
}))
val domainStatsRecords = createDomainStatsDF(sparkContext, sqlContext, countryCodesBC, topDomainsBC, userDomain, totalRecords)
val subdomainLengthCuts =
Quantiles.computeQuintiles(domainStatsRecords
.filter(domainStatsRecords(SubdomainLength).gt(0))
.select(SubdomainLength)
.rdd
.flatMap({ case Row(subdomainLength: Int) =>
Try {
subdomainLength.toDouble
} match {
case Failure(_) => Seq()
case Success(subdomainLength) => Seq(subdomainLength)
}
}))
val entropyCuts =
Quantiles.computeQuintiles(domainStatsRecords
.filter(domainStatsRecords(SubdomainEntropy).gt(0))
.select(SubdomainEntropy)
.rdd
.flatMap({ case Row(subdomainEntropy: Double) =>
Try {
subdomainEntropy.toDouble
} match {
case Failure(_) => Seq()
case Success(subdomainEntropy) => Seq(subdomainEntropy)
}
}))
val numberPeriodsCuts =
Quantiles.computeQuintiles(domainStatsRecords
.filter(domainStatsRecords(NumPeriods).gt(0))
.select(NumPeriods)
.rdd
.flatMap({ case Row(numberPeriods: Int) =>
Try {
numberPeriods.toDouble
} match {
case Failure(_) => Seq()
case Success(numberPeriods) => Seq(numberPeriods)
}
}))
// simplify DNS log entries into "words"
val dnsWordCreator = new DNSWordCreation(frameLengthCuts,
timeCuts,
subdomainLengthCuts,
entropyCuts,
numberPeriodsCuts,
topDomainsBC,
userDomain)
val dataWithWord = totalRecords.withColumn(Word, dnsWordCreator.wordCreationUDF(modelColumns: _*))
// aggregate per-word counts at each IP
val ipDstWordCounts =
dataWithWord
.select(ClientIP, Word)
.filter(dataWithWord(Word).notEqual(InvalidDataHandler.WordError))
.map({ case Row(destIP: String, word: String) => (destIP, word) -> 1 })
.reduceByKey(_ + _)
.map({ case ((ipDst, word), count) => SpotLDAInput(ipDst, word, count) })
val SpotLDAOutput(ipToTopicMixDF, wordToPerTopicProb) = SpotLDAWrapper.runLDA(sparkContext,
sqlContext,
ipDstWordCounts,
config.topicCount,
logger,
config.ldaPRGSeed,
config.ldaAlpha,
config.ldaBeta,
config.ldaMaxiterations)
// Since DNS is still broadcasting ip to topic mix, we need to convert data frame to Map[String, Array[Double]]
val ipToTopicMix = ipToTopicMixDF
.rdd
.map({ case (ipToTopicMixRow: Row) => ipToTopicMixRow.toSeq.toArray })
.map({
case (ipToTopicMixSeq) => (ipToTopicMixSeq(0).asInstanceOf[String], ipToTopicMixSeq(1).asInstanceOf[Seq[Double]]
.toArray)
})
.collectAsMap
.toMap
new DNSSuspiciousConnectsModel(topicCount,
ipToTopicMix,
wordToPerTopicProb,
timeCuts,
frameLengthCuts,
subdomainLengthCuts,
numberPeriodsCuts,
entropyCuts)
}
/**
* Add domain statistics fields to a data frame.
*
* @param sparkContext Spark context.
* @param sqlContext Spark SQL context.
* @param countryCodesBC Broadcast of the country codes set.
* @param topDomainsBC Broadcast of the most-popular domains set.
* @param userDomain Domain associated to network data (ex: 'intel')
* @param inDF Incoming dataframe. Schema is expected to provide the field [[QueryName]]
* @return A new dataframe with the new columns added. The new columns have the schema [[DomainStatsSchema]]
*/
def createDomainStatsDF(sparkContext: SparkContext,
sqlContext: SQLContext,
countryCodesBC: Broadcast[Set[String]],
topDomainsBC: Broadcast[Set[String]],
userDomain: String,
inDF: DataFrame): DataFrame = {
val queryNameIndex = inDF.schema.fieldNames.indexOf(QueryName)
val domainStatsRDD: RDD[Row] = inDF.rdd.map(row =>
Row.fromTuple(createTempFields(countryCodesBC, topDomainsBC, userDomain, row.getString(queryNameIndex))))
sqlContext.createDataFrame(domainStatsRDD, DomainStatsSchema)
}
case class TempFields(topDomainClass: Int, subdomainLength: Integer, subdomainEntropy: Double, numPeriods: Integer)
/**
*
* @param countryCodesBC Broadcast of the country codes set.
* @param topDomainsBC Broadcast of the most-popular domains set.
* @param userDomain Domain associated to network data (ex: 'intel')
* @param url URL string to anlayze for domain and subdomain information.
* @return [[TempFields]]
*/
def createTempFields(countryCodesBC: Broadcast[Set[String]],
topDomainsBC: Broadcast[Set[String]],
userDomain: String,
url: String): TempFields = {
val DomainInfo(_, topDomainClass, subdomain, subdomainLength, subdomainEntropy, numPeriods) =
DomainProcessor.extractDomainInfo(url, topDomainsBC, userDomain)
TempFields(topDomainClass = topDomainClass,
subdomainLength = subdomainLength,
subdomainEntropy = subdomainEntropy,
numPeriods = numPeriods)
}
}
|
kpeiruza/incubator-spot
|
spot-ml/src/main/scala/org/apache/spot/dns/model/DNSSuspiciousConnectsModel.scala
|
Scala
|
apache-2.0
| 13,135
|
package gapt.expr.formula
import gapt.expr.Expr
object Iff {
def apply( a: Expr, b: Expr ): Formula = And( Imp( a, b ), Imp( b, a ) )
def unapply( f: Formula ): Option[( Formula, Formula )] =
f match {
case And( Imp( a, b ), Imp( b_, a_ ) ) if a == a_ && b == b_ => Some( ( a, b ) )
case _ => None
}
}
|
gapt/gapt
|
core/src/main/scala/gapt/expr/formula/Iff.scala
|
Scala
|
gpl-3.0
| 328
|
package com.nyavro.manythanks.ws.auth
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.{HttpEntity, MediaTypes, StatusCodes}
import akka.http.scaladsl.testkit.ScalatestRouteTest
import com.nyavro.manythanks.ws.user.{User, UserFormat}
import org.scalamock.scalatest.MockFactory
import org.scalatest.{Matchers, WordSpec}
import spray.json.{JsValue, _}
import scala.concurrent.Future
class AuthRouteTest extends WordSpec with Matchers with ScalatestRouteTest with TokenFormat with UserFormat with MockFactory {
"AuthRoute" should {
"Create user and return user's token" in {
val newUser = User(Some(3L), "26 10 30", "Mars", "test1")
val mockToken = Token(Some(123L), Some(1L), "123token321")
case class UserToken(userId:Long, token:String)
implicit val userTokenFormat = jsonFormat2(UserToken)
val authService = stub[AuthService]
authService.signUp _ when newUser returning Future(Some(mockToken))
val authRoute = new AuthRoute(authService)
Post(
"/auth/signUp",
HttpEntity(MediaTypes.`application/json`, newUser.toJson.toString())
) ~> authRoute.route ~> check {
response.status should be(StatusCodes.Created)
UserToken(1L, "123token321") should be(userTokenFormat.read(responseAs[JsValue]))
}
}
"Retrieve token of existing user" in {
val mockToken = Token(Some(234L), Some(2L), "555tkntkn731")
val login = "test"
val password = "pwd"
val authRoute = new AuthRoute(
new AuthService {
override def authenticate(token: String): Future[Option[User]] = ???
override def signUp(user: User): Future[Option[Token]] = ???
override def signIn(lgn: String, pwd: String) = Future(
if (lgn == login && pwd == password) Some(mockToken)
else None
)
}
)
Post(
"/auth/signIn",
HttpEntity(MediaTypes.`application/json`, s"""{"login":"$login","password":"$password"}""")
) ~> authRoute.route ~> check {
response.status should be(StatusCodes.OK)
mockToken should be (tokensFormat.read(responseAs[JsValue]))
}
Post(
"/auth/signIn",
HttpEntity(MediaTypes.`application/json`, s"""{"login":"$login","password":"invalid"}""")
) ~> authRoute.route ~> check {
response.status should be(StatusCodes.OK)
}
}
}
}
|
nyavro/manythanks
|
webService/src/test/scala/com/nyavro/manythanks/ws/auth/AuthRouteTest.scala
|
Scala
|
apache-2.0
| 2,450
|
package com.sksamuel.elastic4s.requests.snapshots
import com.sksamuel.elastic4s.{Index, Indexes}
import com.sksamuel.elastic4s.ext.OptionImplicits._
case class RestoreSnapshotRequest(snapshotName: String,
repositoryName: String,
indices: Indexes = Indexes.Empty,
ignoreUnavailable: Option[Boolean] = None,
includeGlobalState: Option[Boolean] = None,
renamePattern: Option[String] = None,
renameReplacement: Option[String] = None,
partial: Option[Boolean] = None,
includeAliases: Option[Boolean] = None,
waitForCompletion: Option[Boolean] = None) {
require(snapshotName.nonEmpty, "snapshot name must not be null or empty")
require(repositoryName.nonEmpty, "repo must not be null or empty")
def partial(partial: Boolean): RestoreSnapshotRequest = copy(partial = partial.some)
def includeAliases(includeAliases: Boolean): RestoreSnapshotRequest = copy(includeAliases = includeAliases.some)
def includeGlobalState(global: Boolean): RestoreSnapshotRequest = copy(includeGlobalState = global.some)
def waitForCompletion(wait: Boolean): RestoreSnapshotRequest = copy(waitForCompletion = wait.some)
def renamePattern(pattern: String): RestoreSnapshotRequest = copy(renamePattern = pattern.some)
def renameReplacement(str: String): RestoreSnapshotRequest = copy(renameReplacement = str.some)
def index(index: Index): RestoreSnapshotRequest = copy(indices = index.toIndexes)
def indices(indices: Indexes): RestoreSnapshotRequest = copy(indices = indices)
}
|
sksamuel/elastic4s
|
elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/snapshots/RestoreSnapshotRequest.scala
|
Scala
|
apache-2.0
| 1,777
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.sql.catalyst.expressions.codegen.{CodeFormatter, CodeGenerator}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.execution.{SparkPlan, WholeStageCodegenExec}
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.Utils
abstract class BenchmarkQueryTest extends QueryTest with SharedSQLContext with BeforeAndAfterAll {
// When Utils.isTesting is true, the RuleExecutor will issue an exception when hitting
// the max iteration of analyzer/optimizer batches.
assert(Utils.isTesting, "spark.testing is not set to true")
/**
* Drop all the tables
*/
protected override def afterAll(): Unit = {
try {
// For debugging dump some statistics about how much time was spent in various optimizer rules
logWarning(RuleExecutor.dumpTimeSpent())
spark.sessionState.catalog.reset()
} finally {
super.afterAll()
}
}
override def beforeAll() {
super.beforeAll()
RuleExecutor.resetMetrics()
}
protected def checkGeneratedCode(plan: SparkPlan): Unit = {
val codegenSubtrees = new collection.mutable.HashSet[WholeStageCodegenExec]()
plan foreach {
case s: WholeStageCodegenExec =>
codegenSubtrees += s
case _ =>
}
codegenSubtrees.toSeq.foreach { subtree =>
val code = subtree.doCodeGen()._2
try {
// Just check the generated code can be properly compiled
CodeGenerator.compile(code)
} catch {
case e: Exception =>
val msg =
s"""
|failed to compile:
|Subtree:
|$subtree
|Generated code:
|${CodeFormatter.format(code)}
""".stripMargin
throw new Exception(msg, e)
}
}
}
}
|
michalsenkyr/spark
|
sql/core/src/test/scala/org/apache/spark/sql/BenchmarkQueryTest.scala
|
Scala
|
apache-2.0
| 2,687
|
package avrohugger
package format
package specific
package converters
import SchemaAccessors._
import matchers.TypeMatcher
import stores.ClassStore
import types._
import treehugger.forest._
import definitions._
import treehuggerDSL._
import org.apache.avro.{LogicalTypes, Schema}
import scala.language.postfixOps
import scala.collection.JavaConverters._
object ScalaConverter {
def asScalaIteratorConverter(partialVersion: String): String =
partialVersion match {
case "2.11" => "scala.collection.JavaConverters.asScalaIteratorConverter"
case "2.12" => "scala.collection.JavaConverters.asScalaIteratorConverter"
case "2.13" => "scala.jdk.CollectionConverters.IteratorHasAsScala"
case _ => "scala.jdk.CollectionConverters.IteratorHasAsScala"
}
def mapAsScalaMapConverter(partialVersion: String): String =
partialVersion match {
case "2.11" => "scala.collection.JavaConverters.mapAsScalaMapConverter"
case "2.12" => "scala.collection.JavaConverters.mapAsScalaMapConverter"
case "2.13" => "scala.jdk.CollectionConverters.MapHasAsScala"
case _ => "scala.jdk.CollectionConverters.MapHasAsScala"
}
def asScalaBufferConverter(partialVersion: String): String = {
partialVersion match {
case "2.11" => "scala.collection.JavaConverters.asScalaBufferConverter"
case "2.12" => "scala.collection.JavaConverters.asScalaBufferConverter"
case "2.13" => "scala.jdk.CollectionConverters.ListHasAsScala"
case _ => "scala.jdk.CollectionConverters.ListHasAsScala"
}
}
def checkCustomArrayType(
arrayType: AvroScalaArrayType,
elementType: Type,
seqArgs: Typed,
defaultConversion: Tree) = {
val classTagIdent = REF(s"scala.reflect.ClassTag(classOf[$elementType])")
val arrayConversion = ARRAY(seqArgs).APPLY(classTagIdent).AS(TYPE_ARRAY(elementType))
arrayType match {
case ScalaArray => arrayConversion
case ScalaList => LIST(seqArgs)
case ScalaSeq => SEQ(seqArgs)
case ScalaVector => VECTOR(seqArgs)
}
}
val CharSequenceClass = definitions.getClass("java.lang.CharSequence")
// takes as args a REF wrapped according to field Type
def convertFromJava(
classStore: ClassStore,
namespace: Option[String],
schema: Schema,
schemaAccessor: Tree,
isUnionMember: Boolean,
tree: Tree,
typeMatcher: TypeMatcher,
classSymbol: ClassSymbol,
targetScalaPartialVersion: String): Tree = {
schema.getType match {
case Schema.Type.ARRAY => {
val elementSchema = schema.getElementType
val elementType = typeMatcher.toScalaType(classStore, namespace, elementSchema)
val JavaList = RootClass.newClass("java.util.List[_]")
val applyParam = REF("array") DOT("iterator")
val elementConversion = convertFromJava(
classStore,
namespace,
elementSchema,
if (isUnionMember) arrayAccessor(unionAccessor(schemaAccessor, schema.getFullName, asScalaBufferConverter(targetScalaPartialVersion))) else arrayAccessor(schemaAccessor),
false,
REF("x"),
typeMatcher,
classSymbol,
targetScalaPartialVersion)
val seqArgs = {
SEQARG(
REF(asScalaIteratorConverter(targetScalaPartialVersion)).APPLY(applyParam).DOT("asScala").DOT("toSeq")
.MAP(LAMBDA(PARAM("x")) ==> BLOCK(elementConversion))
)
}
val arrayType = typeMatcher.avroScalaTypes.array
val resultExpr = BLOCK(
checkCustomArrayType(arrayType, elementType, seqArgs, LIST(seqArgs))
)
val arrayConversion = CASE(ID("array") withType(JavaList)) ==> resultExpr
val errorMessage = INTERP("s", LIT(s"expected array with type $JavaList, found "), LIT("array"))
val errorExpr = NEW("org.apache.avro.AvroRuntimeException", errorMessage)
val conversionCases = List(arrayConversion)
val arrayMatchError = CASE(WILDCARD) ==> errorExpr
tree MATCH(conversionCases:_*)
}
case Schema.Type.STRING =>
LogicalType.foldLogicalTypes(
schema = schema,
default = tree TOSTRING) {
case UUID =>
typeMatcher.avroScalaTypes.uuid match {
case JavaUuid => {
val UuidClass = RootClass.newClass("java.util.UUID")
val resultExpr = BLOCK(UuidClass.DOT("fromString").APPLY(REF("chars").TOSTRING))
val charSequenceConversion = CASE(ID("chars") withType CharSequenceClass) ==> resultExpr
tree MATCH charSequenceConversion
}
}
}
case Schema.Type.MAP => {
val JavaMap = RootClass.newClass("java.util.Map[_,_]")
val resultExpr = {
BLOCK(
REF(mapAsScalaMapConverter(targetScalaPartialVersion))
.APPLY(REF("map"))
.DOT("asScala")
.DOT("toMap")
.MAP(LAMBDA(PARAM("kvp")) ==> BLOCK(
VAL("key") := REF("kvp._1").DOT("toString"),
VAL("value") := REF("kvp._2"),
PAREN(REF("key"), convertFromJava(
classStore,
namespace,
schema.getValueType,
if (isUnionMember) mapAccessor(unionAccessor(schemaAccessor, schema.getFullName, asScalaBufferConverter(targetScalaPartialVersion))) else mapAccessor(schemaAccessor),
false,
REF("value"),
typeMatcher,
classSymbol,
targetScalaPartialVersion)))
)
)
}
val mapConversion = CASE(ID("map") withType(JavaMap)) ==> resultExpr
tree MATCH(mapConversion)
}
case Schema.Type.FIXED => tree
case Schema.Type.BYTES => {
val JavaBuffer = RootClass.newClass("java.nio.ByteBuffer")
val resultExpr = schema.getLogicalType match {
case decimal: LogicalTypes.Decimal => {
val Decimal = RootClass.newClass("org.apache.avro.LogicalTypes.Decimal")
Block(
VAL("schema") := {if (isUnionMember) unionAccessor(schemaAccessor, schema.getFullName, asScalaBufferConverter(targetScalaPartialVersion)) else schemaAccessor},
VAL("decimalType") := REF("schema").DOT("getLogicalType").APPLY().AS(Decimal),
REF("BigDecimal").APPLY(classSymbol.DOT("decimalConversion").DOT("fromBytes").APPLY(REF("buffer"),REF("schema"),REF("decimalType")))
)
}
case _ => Block(
VAL("dup") := REF("buffer").DOT("duplicate").APPLY(),
VAL("array") := NEW("Array[Byte]", REF("dup").DOT("remaining")),
REF("dup") DOT "get" APPLY(REF("array")),
REF("array")
)
}
val bufferConversion = CASE(ID("buffer") withType (JavaBuffer)) ==> resultExpr
tree MATCH bufferConversion
}
case Schema.Type.UNION => {
val types = schema.getTypes().asScala.toList
// check if it's the kind of union that we support (i.e. nullable fields)
if (types.length != 2 ||
!types.map(x => x.getType).contains(Schema.Type.NULL) ||
types.filterNot(x => x.getType == Schema.Type.NULL).length != 1) {
sys.error("Unions beyond nullable fields are not supported")
}
// the union represents a nullable field, the kind of union supported in avrohugger
else {
val typeParamSchema = types.find(x => x.getType != Schema.Type.NULL).get
val nullConversion = CASE(NULL) ==> NONE
val someExpr = SOME(convertFromJava(
classStore,
namespace,
typeParamSchema,
schemaAccessor,
true,
tree,
typeMatcher,
classSymbol,
targetScalaPartialVersion))
val someConversion = CASE(WILDCARD) ==> someExpr
val conversionCases = List(nullConversion, someConversion)
tree MATCH(conversionCases:_*)
}
}
case Schema.Type.ENUM => {
typeMatcher.avroScalaTypes.enum match {
case EnumAsScalaString => tree TOSTRING
case JavaEnum | ScalaEnumeration | ScalaCaseObjectEnum => tree
}
}
case Schema.Type.LONG => {
Option(schema.getLogicalType()) match {
case Some(logicalType) => {
if (logicalType.getName == "timestamp-millis") {
typeMatcher.avroScalaTypes.timestampMillis match {
case JavaSqlTimestamp => {
val TimestampClass = RootClass.newClass("java.sql.Timestamp")
val resultExpr = BLOCK(NEW(TimestampClass, REF("l")))
val longConversion = CASE(ID("l") withType (LongClass)) ==> resultExpr
tree MATCH longConversion
}
case JavaTimeInstant => {
val InstantClass = RootClass.newClass("java.time.Instant")
val resultExpr = BLOCK(InstantClass.DOT("ofEpochMilli").APPLY(REF("l")))
val longConversion = CASE(ID("l") withType (LongClass)) ==> resultExpr
tree MATCH longConversion
}
}
}
else tree
}
case None => tree
}
}
case Schema.Type.INT => {
Option(schema.getLogicalType()) match {
case Some(logicalType) => {
if (logicalType.getName == "date") {
typeMatcher.avroScalaTypes.date match {
case JavaSqlDate => {
val IntegerClass = RootClass.newClass("Integer")
val SqlDateClass = RootClass.newClass("java.sql.Date")
val resultExpr = BLOCK(NEW(SqlDateClass, REF("i").DOT("toLong").DOT("*").APPLY(LIT(86400000L))))
val integerConversion = CASE(ID("i") withType (IntegerClass)) ==> resultExpr
tree MATCH integerConversion
}
case JavaTimeLocalDate => {
val IntegerClass = RootClass.newClass("Integer")
val LocalDateClass = RootClass.newClass("java.time.LocalDate")
val resultExpr = BLOCK(LocalDateClass.DOT("ofEpochDay").APPLY(REF("i").DOT("toInt")))
val integerConversion = CASE(ID("i") withType (IntegerClass)) ==> resultExpr
tree MATCH integerConversion
}
}
}
else tree
}
case None => tree
}
}
case _ => tree
}
}
}
|
julianpeeters/avrohugger
|
avrohugger-core/src/main/scala/format/specific/converters/ScalaConverter.scala
|
Scala
|
apache-2.0
| 10,633
|
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package schedoscope.example.osm.datamart
import org.scalatest.{ FlatSpec, Matchers }
import org.schedoscope.dsl.Field._
import org.schedoscope.test.{ rows, test }
import schedoscope.example.osm.datahub.{ Restaurants, Shops, Trainstations }
case class ShopProfilesTest() extends FlatSpec
with Matchers {
val shops = new Shops() with rows {
set(v(id, "122546"),
v(shopName, "Netto"),
v(shopType, "supermarket"),
v(area, "t1y87ki"))
set(v(id, "274850441"),
v(shopName, "Schanzenbaeckerei"),
v(shopType, "bakery"),
v(area, "t1y87ki"))
set(v(id, "279023080"),
v(shopName, "Edeka Linow"),
v(shopType, "supermarket"),
v(area, "t1y77d8"))
}
val restaurants = new Restaurants() with rows {
set(v(id, "267622930"),
v(restaurantName, "Cuore Mio"),
v(restaurantType, "italian"),
v(area, "t1y06x1"))
set(v(id, "288858596"),
v(restaurantName, "Jam Jam"),
v(restaurantType, "japanese"),
v(area, "t1y87ki"))
set(v(id, "302281521"),
v(restaurantName, "Walddoerfer Croque Cafe"),
v(restaurantType, "burger"),
v(area, "t1y17m9"))
}
val trainstations = new Trainstations() with rows {
set(v(id, "122317"),
v(stationName, "Hagenbecks Tierpark"),
v(area, "t1y140d"))
set(v(id, "122317"),
v(stationName, "Boenningstedt"),
v(area, "t1y87ki"))
}
"datamart.ShopProfiles" should "load correctly from datahub.shops, datahub.restaurants, datahub.trainstations" in {
new ShopProfiles() with test {
basedOn(shops, restaurants, trainstations)
then()
numRows shouldBe 3
row(v(id) shouldBe "122546",
v(shopName) shouldBe "Netto",
v(shopType) shouldBe "supermarket",
v(area) shouldBe "t1y87ki",
v(cntCompetitors) shouldBe 1,
v(cntRestaurants) shouldBe 1,
v(cntTrainstations) shouldBe 1)
}
}
}
|
hpzorn/schedoscope
|
schedoscope-tutorial/src/test/scala/schedoscope/example/osm/datamart/ShopProfilesTest.scala
|
Scala
|
apache-2.0
| 2,528
|
package ru.reajames
import org.scalatest._
import ReajamesStreamTests._
import org.scalatest.concurrent.ScalaFutures
import java.util.concurrent.atomic.AtomicInteger
import scala.concurrent.{ExecutionContext, Future}
import org.scalatest.time.{Milliseconds, Seconds, Span}
import java.util.concurrent.{ExecutorService, Executors, ThreadFactory}
import javax.jms.{Message, Session, TextMessage, Destination => JDestination}
/**
* Common tests for testing in a stream environment.
*/
trait ReajamesStreamTests extends FlatSpecLike with Matchers with BeforeAndAfterAll with ScalaFutures
with FfmqConnectionFactoryAware {
val connectionHolder = new ConnectionHolder(connectionFactory, Some("embedded-amq"))
implicit def executionContext: ExecutionContext
def receiveMessages(receiver: JmsReceiver, messagesToReceive: Int = 1): Future[List[String]]
def receiveMessages[T](receiver: JmsReceiver, messagesToReceive: Int, extractor: PartialFunction[Message, T]): Future[List[T]]
def sendMessages(sender: JmsSender[String], messages: List[String]): Unit
def pipeline[T](receiver: JmsReceiver, messageAmount: Int, sender: JmsSender[T])(mapper: PartialFunction[Message, T]): Unit
def sendThroughProcessor(messages: Iterator[String], sender: JmsSender[String]): Future[Unit]
def delayFor100ms: Future[_]
"Jms connectors" should "send and receive messages processing it within a stream" in {
val queue = Queue("send-and-receive" + counter.getAndIncrement())
val messagesToSend = (1 to 10).map(_.toString).toList
val received = receiveMessages(new JmsReceiver(connectionHolder, queue), messagesToSend.size)
sendMessages(new JmsSender[String](connectionHolder, permanentDestination(queue)(string2textMessage)), messagesToSend)
whenReady(received, timeout(Span(30, Seconds))) {
_ == messagesToSend
}
}
"JmsSender" should "allow to send to an appropriate JMS destination" in {
val messagesToSend = List.fill(3)("jms-destination-routing-" + counter.getAndIncrement())
val received = Future.sequence(
messagesToSend.map(q => receiveMessages(new JmsReceiver(connectionHolder, Queue(q))))
)
// Using a message as a queue name to send message to
val sendMessagesToDifferentQueues: DestinationAwareMessageFactory[String] =
(session, elem) => (session.createTextMessage(elem), Queue(elem)(session))
sendMessages(new JmsSender[String](connectionHolder, sendMessagesToDifferentQueues), messagesToSend)
whenReady(received, timeout(Span(30, Seconds))) {
_ == messagesToSend
}
}
"Jms pipeline" should "respond to destination specified in the JMSReplyTo header" in {
val messagesToSend = List.fill(3)("reply-to-" + counter.getAndIncrement())
val messagesReceivedByClients = Future.sequence(
messagesToSend.map(q => receiveMessages(new JmsReceiver(connectionHolder, Queue(q))))
)
val serverIn = Queue("in-" + counter.getAndIncrement())
// Main pipeline
val mainReceiver = new JmsReceiver(connectionHolder, serverIn)
val mainSender = new JmsSender[(String, JDestination)](connectionHolder, replyTo(string2textMessage))
pipeline(mainReceiver, messagesToSend.size, mainSender)(extractTextAndJMSReplyTo)
// just enriches a newly created text message with JMSReplyTo
val replyToElemName = (session: Session, elem: String) =>
mutate(session.createTextMessage(elem))(_.setJMSReplyTo(Queue(elem)(session)))
sendMessages(new JmsSender(connectionHolder, serverIn, replyToElemName), messagesToSend)
whenReady(messagesReceivedByClients, timeout(Span(30, Seconds))) {
_ == messagesToSend
}
}
"Jms components" should "create a channel through a temporary queue" in {
val messagesToSend = List("message 1", "message 2", "message 3")
val serverIn = Queue("server-in-" + counter.getAndIncrement())
val clientIn = TemporaryQueue(Some("client-in"))
val result =
receiveMessages(new JmsReceiver(connectionHolder, clientIn), messagesToSend.size)
pipeline(new JmsReceiver(connectionHolder, serverIn), messagesToSend.size,
new JmsSender[(String, JDestination)](connectionHolder, replyTo(string2textMessage)))(extractTextAndJMSReplyTo)
val clientRequests = new JmsSender[String](connectionHolder, serverIn, enrichReplyTo(clientIn)(string2textMessage))
sendMessages(clientRequests, messagesToSend)
whenReady(result, timeout(Span(30, Seconds))) { _ == messagesToSend }
}
"ReajamesStreamTests" should "delay future completion for 100 millis" in {
for (i <- 1 to 2) {
val delayedFor100ms = delayFor100ms
whenReady(delayedFor100ms, timeout = timeout(Span(300, Milliseconds)))( _ => ())
}
}
"JmsSender" should "allow to detect a failure to recreate a sending stream" in {
val queue = Queue("recreate-sending-stream-" + counter.getAndIncrement())
def createSendingStream(messages: Iterator[String]): Future[Unit] = {
val mf: MessageFactory[String] =
(session, element) =>
if (element != "1") session.createTextMessage(element)
else throw new IllegalArgumentException(s"Could not send $element due to the test case!")
sendThroughProcessor(messages, new JmsSender(connectionHolder, queue, mf)).recoverWith {
case th => delayFor100ms.flatMap(_ => createSendingStream(messages))
}
}
val sent = createSendingStream(Iterator("1", "2"))
whenReady(sent, timeout(Span(30, Seconds)))(_ should equal(()))
whenReady(receiveMessages(new JmsReceiver(connectionHolder, queue)),
timeout(Span(30, Seconds)))(_ == List("2"))
}
"JmsReceiver" should "allow to detect a failure to recreate a receiving stream" in {
val queue = Queue("recreate-receiving-stream-" + counter.getAndIncrement())
def createReceivingStream(messagesToReceive: Int): Future[Seq[String]] = {
receiveMessages(new JmsReceiver(connectionHolder, queue), messagesToReceive, {
case text: TextMessage =>
if (text.getText != "1") text.getText
else throw new IllegalArgumentException(s"Could not process ${text.getText} due to the test case!")
}).recoverWith {
case th => delayFor100ms.flatMap(_ => createReceivingStream(messagesToReceive))
}
}
sendMessages(new JmsSender(connectionHolder, queue, string2textMessage), List("1"))
val received = createReceivingStream(1)
Thread.sleep(300) // sleeping for propagating a failure in a stream
sendMessages(new JmsSender(connectionHolder, queue, string2textMessage), List("2"))
whenReady(received, timeout(Span(30, Seconds)))(_ == List("2"))
}
override protected def afterAll(): Unit = stopBroker()
/**
* Helper function for extracting text and JMSReplyTo header.
*
* @return payload and JMSReplyTo
*/
def extractTextAndJMSReplyTo: PartialFunction[Message, (String, JDestination)] = {
case msg: TextMessage => (msg.getText, msg.getJMSReplyTo)
}
/**
* Helper function for extracting payload of a message.
* @return text
*/
def extractText: PartialFunction[Message, String] = {
case msg: TextMessage => msg.getText
}
}
object ReajamesStreamTests {
private[reajames] val counter = new AtomicInteger(0)
private val executor: ExecutorService = Executors.newCachedThreadPool(createDaemon)
implicit val executionContext = ExecutionContext.fromExecutorService(executor)
private def createDaemon = new ThreadFactory {
def newThread(r: Runnable): Thread = {
val thread = new Thread(r)
thread.setDaemon(true)
thread
}
}
}
|
dobrynya/reajames
|
src/test/scala/ru/reajames/ReajamesStreamTests.scala
|
Scala
|
apache-2.0
| 7,603
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.graphframes.examples
import org.graphframes.{GraphFrameTestSparkContext, SparkFunSuite}
class GraphsSuite extends SparkFunSuite with GraphFrameTestSparkContext {
test("empty graph") {
for (empty <- Seq(Graphs.empty[Int], Graphs.empty[Long], Graphs.empty[String])) {
assert(empty.vertices.count() === 0L)
assert(empty.edges.count() === 0L)
}
}
test("chain graph") {
val sqlContext = this.sqlContext
import sqlContext.implicits._
val chain0 = Graphs.chain(0L)
assert(chain0.vertices.count() === 0L)
assert(chain0.edges.count() === 0L)
val chain1 = Graphs.chain(1L)
assert(chain1.vertices.as[Long].collect() === Array(0L))
assert(chain1.edges.count() === 0L)
val chain2 = Graphs.chain(2L)
assert(chain2.vertices.as[Long].collect().toSet === Set(0L, 1L))
assert(chain2.edges.as[(Long, Long)].collect() === Array((0L, 1L)))
val chain3 = Graphs.chain(3L)
assert(chain3.vertices.as[Long].collect().toSet === Set(0L, 1L, 2L))
assert(chain3.edges.as[(Long, Long)].collect().toSet === Set((0L, 1L), (1L, 2L)))
withClue("Constructing a large chain graph shouldn't OOM the driver.") {
Graphs.chain(1e10.toLong)
}
}
}
|
graphframes/graphframes
|
src/test/scala/org/graphframes/examples/GraphsSuite.scala
|
Scala
|
apache-2.0
| 2,027
|
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.math
/**
* @author Paul Bernard
*/
abstract class AbstractSolver1D[F <: Double => Double]{
val MAX_FUNCTION_EVALUATIONS = 100
private var maxEvaluations : Int = MAX_FUNCTION_EVALUATIONS
private var lowerBoundEnforced : Boolean = false
private var upperBoundEnforced : Boolean = false
protected var root, xMin, xMax, fxMin, fxMax : Double = 0.0
protected var evaulationNumber: Int = 0
protected var lowerBound, upperBound: Double = 0.0
def this(maxEvaluations: Int, lowerBoundEnforced: Boolean,
upperBoundenforced: Boolean) {
this
setMaxEvaluation(maxEvaluations)
this.lowerBoundEnforced = lowerBoundEnforced
this.upperBoundEnforced = upperBoundEnforced
}
def setMaxEvaluation(evaluations: Int): Unit = {
this.maxEvaluations = math.max(1, evaluations)
}
def getMaxEvaluations : Int = {
this.maxEvaluations
}
def setLowerBound(lowerBound: Double): Unit = {
this.lowerBound = lowerBound
this.lowerBoundEnforced = true
}
def setUpperBound(upperBound: Double): Unit = {
}
}
|
quantintel/spectrum
|
financial/src/main/scala/org/quantintel/ql/math/AbstractSolver1D.scala
|
Scala
|
apache-2.0
| 1,775
|
/*
* Copyright (C) 2016 Christopher Batey and Dogan Narinc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scassandra.server.priming.routes
import com.typesafe.scalalogging.LazyLogging
import org.scassandra.server.priming.ActivityLog
import org.scassandra.server.priming.cors.CorsSupport
import org.scassandra.server.priming.json.PrimingJsonImplicits
import spray.http.StatusCodes
import spray.routing.HttpService
trait ActivityVerificationRoute extends HttpService with LazyLogging with CorsSupport {
import PrimingJsonImplicits._
implicit val activityLog: ActivityLog
val activityVerificationRoute =
cors {
path("connection") {
get {
complete {
activityLog.retrieveConnections()
}
} ~
delete {
complete {
logger.debug("Deleting all recorded connections")
activityLog.clearConnections()
StatusCodes.OK
}
}
} ~
path("query") {
get {
complete {
logger.debug("Request for recorded queries")
activityLog.retrieveQueries()
}
} ~
delete {
complete {
logger.debug("Deleting all recorded queries")
activityLog.clearQueries()
StatusCodes.OK
}
}
} ~
path("prepared-statement-preparation") {
get {
complete {
logger.debug("Request for recorded prepared statement preparations")
activityLog.retrievePreparedStatementPreparations()
}
} ~
delete {
complete {
logger.debug("Deleting all recorded prepared statement preparations")
activityLog.clearPreparedStatementPreparations()
StatusCodes.OK
}
}
} ~
path("prepared-statement-execution") {
get {
complete {
logger.debug("Request for recorded prepared statement executions")
activityLog.retrievePreparedStatementExecutions()
}
} ~
delete {
complete {
logger.debug("Deleting all recorded prepared statement executions")
activityLog.clearPreparedStatementExecutions()
StatusCodes.OK
}
}
} ~
path("batch-execution") {
get {
complete {
logger.debug("Request for recorded batch executions")
activityLog.retrieveBatchExecutions()
}
} ~
delete {
complete {
logger.debug("Deleting all recorded batch executions")
activityLog.clearBatchExecutions()
StatusCodes.OK
}
}
}
}
}
|
mikefero/cpp-driver
|
gtests/src/integration/scassandra/server/server/src/main/scala/org/scassandra/server/priming/routes/ActivityVerificationRoute.scala
|
Scala
|
apache-2.0
| 3,251
|
package reaktor.scct.report
import java.io.File
import io.Source
import xml.{Text, Node, NodeSeq}
import reaktor.scct._
class HtmlReporter(project: ProjectData, writer: HtmlReportWriter) extends HtmlHelper {
val data = project.coverage
object files {
val packages = "packages.html"
val summary = "summary.html"
}
def report = {
summaryReport
packageListReport
packageReports
sourceFileReports
resources
}
def summaryReport {
writer.write(files.summary, projectSummaryReport ++ packageSummaryReport)
}
def projectSummaryReport = {
val projects = data.forProjects
if (projects.size > 1) {
val header = headerRow("Total", data.percentage)
val items = for ((name, projectData) <- projects) yield
headerRow(name, projectData.percentage)
table(header, items.toList)
} else {
val header = headerRow(projects.head._1, data.percentage)
table(header, NodeSeq.Empty)
}
}
def packageSummaryReport = {
val items = for ((name, packageData) <- data.forPackages) yield
itemRow(name, packageData.percentage, packageReportFileName(name))
table(NodeSeq.Empty, items.toList)
}
def packageListReport {
val html =
<div class="content">
<div class="pkgRow header">
<a href={files.summary}>Summary { format(data.percentage) }</a>
</div>
{
for ((pkg, packageData) <- data.forPackages) yield {
<div class="pkgRow pkgLink">
<a href={packageReportFileName(pkg)}>
{ pkg } { format(packageData.percentage) }
</a>
</div> ++
<div class="pkgRow pkgContent">
{ for ((clazz, classData) <- packageData.forClasses) yield
<div class="pkgRow">
<a href={ classHref(clazz) }>
<span class="className">{ classNameHeader(clazz) }</span> { format(classData.percentage) }
</a>
</div>
}
</div>
}
}
</div>
writer.write(files.packages, html)
}
def packageReports {
for ((pkg, packageData) <- data.forPackages) {
val header = headerRow(pkg, packageData.percentage)
val items = classItemRows(packageData)
writer.write(packageReportFileName(pkg), table(header, items))
}
}
def resources {
val rs = List("class.png", "object.png", "package.png", "trait.png", "filter_box_left.png", "filter_box_right.png",
"jquery-1.6.1.min.js", "jquery-ui-1.8.4.custom.min.js", "style.css", "main.js", "index.html")
rs.foreach { name =>
writer.write(name, IO.readResourceBytes("/html-reporting/"+name))
}
}
def sourceFileReports {
for ((sourceFile, sourceData) <- data.forSourceFiles) {
val report = SourceFileHtmlReporter.report(sourceFile, sourceData, project)
writer.write(sourceReportFileName(sourceFile), report)
}
}
}
|
mtkopone/scct
|
src/main/scala/reaktor/scct/report/HtmlReporter.scala
|
Scala
|
apache-2.0
| 2,983
|
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check.extractor.regex
import io.gatling.core.check.DefaultMultipleFindCheckBuilder
import io.gatling.core.check.extractor.Extractor
import io.gatling.core.session.{ Expression, RichExpression }
trait RegexCheckType
trait RegexOfType { self: RegexCheckBuilder[String] =>
def ofType[X: GroupExtractor] = new RegexCheckBuilder[X](pattern, patterns)
}
object RegexCheckBuilder {
def regex(pattern: Expression[String], patterns: Patterns) =
new RegexCheckBuilder[String](pattern, patterns) with RegexOfType
}
class RegexCheckBuilder[X: GroupExtractor](
private[regex] val pattern: Expression[String],
private[regex] val patterns: Patterns
)
extends DefaultMultipleFindCheckBuilder[RegexCheckType, CharSequence, X] {
import RegexExtractorFactory._
override def findExtractor(occurrence: Int): Expression[Extractor[CharSequence, X]] = pattern.map(newRegexSingleExtractor[X](_, occurrence, patterns))
override def findAllExtractor: Expression[Extractor[CharSequence, Seq[X]]] = pattern.map(newRegexMultipleExtractor[X](_, patterns))
override def countExtractor: Expression[Extractor[CharSequence, Int]] = pattern.map(newRegexCountExtractor(_, patterns))
}
|
timve/gatling
|
gatling-core/src/main/scala/io/gatling/core/check/extractor/regex/RegexCheckBuilder.scala
|
Scala
|
apache-2.0
| 1,824
|
/*
* Copyright 2011-2017 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.yaidom.integrationtest
import eu.cdevreeze.yaidom.core.EName
import eu.cdevreeze.yaidom.parse.DocumentParserUsingSax
import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite
/**
* CD store test case, using yaidom instead of XPath.
*
* Acknowledgments: The example comes from http://www.java-only.com/LoadTutorial.javaonly?id=60
*
* The original example uses Java, DOM and the standard Java XPath API. The corresponding yaidom code is far more
* verbose in its expressions that replace the XPath expressions. Yet in total the yaidom examples are far more concise,
* the yaidom Elem expressions are very easy to understand semantically, the yaidom Elems are immutable and thread-safe,
* and the yaidom examples are straightforward and contain far less cruft (like setting up factory objects, compiling expressions,
* obtaining and iterating over result sets, etc.).
*
* @author Chris de Vreeze
*/
class CdStoreTest extends AnyFunSuite with BeforeAndAfterAll {
test("testQueryArtistElems") {
val parser = DocumentParserUsingSax.newInstance()
val doc = parser.parse(classOf[CdStoreTest].getResourceAsStream("cdstore.xml"))
// Instead of XPath: //cd[@genre='metal']/artist
val artistElms =
for {
cdElm <- doc.documentElement \\ { e => e.localName == "cd" && e.attributeOption(EName("genre")).contains("metal") }
artistElm <- cdElm \ (_.localName == "artist")
} yield artistElm
val artists = artistElms map {
_.text
}
assertResult(List("An other artist")) {
artists
}
// The same for-comprehension written in a slightly different way
val artistElms2 =
for {
cdElm <- (doc.documentElement \\ (_.localName == "cd")) filter {
_.attributeOption(EName("genre")).contains("metal")
}
artistElm <- cdElm \ (_.localName == "artist")
} yield artistElm
val artists2 = artistElms2 map {
_.text
}
assertResult(List("An other artist")) {
artists2
}
}
test("testQueryArtistAsText") {
val parser = DocumentParserUsingSax.newInstance()
val doc = parser.parse(classOf[CdStoreTest].getResourceAsStream("cdstore.xml"))
// Instead of XPath: //cd[@genre='metal']/artist/text()
val artists =
for {
cdElm <- doc.documentElement \\ { e => e.localName == "cd" && e.attributeOption(EName("genre")).contains("metal") }
artistElm <- cdElm \ (_.localName == "artist")
} yield artistElm.text
val artistsConcatenated = artists.mkString
assertResult("An other artist") {
artistsConcatenated
}
// The same for-comprehension written in a slightly different way
val artists2 =
for {
cdElm <- (doc.documentElement \\ (_.localName == "cd")) filter {
_.attributeOption(EName("genre")).contains("metal")
}
artistElm <- cdElm \ (_.localName == "artist")
} yield artistElm.text
val artistsConcatenated2 = artists2.mkString
assertResult("An other artist") {
artistsConcatenated2
}
// Or, knowing that there is precisely one such artist
val artists3 =
for {
cdElm <- (doc.documentElement \\ (_.localName == "cd")) filter {
_.attributeOption(EName("genre")).contains("metal")
}
artistElm <- cdElm \ (_.localName == "artist")
} yield artistElm.text
val firstArtist = artists3.headOption.getOrElse("")
assertResult("An other artist") {
firstArtist
}
}
test("testQueryPrice") {
val parser = DocumentParserUsingSax.newInstance()
val doc = parser.parse(classOf[CdStoreTest].getResourceAsStream("cdstore.xml"))
// Instead of XPath: //cd[@genre='metal']/price/text()
val prices =
for {
cdElm <- doc.documentElement \\ { e => e.localName == "cd" && e.attributeOption(EName("genre")).contains("metal") }
artistElm <- cdElm \ (_.localName == "price")
} yield artistElm.text
val price = prices.headOption.getOrElse(sys.error("Expected price")).toDouble
assertResult(10) {
price.toInt
}
}
}
|
dvreeze/yaidom
|
jvm/src/test/scala/eu/cdevreeze/yaidom/integrationtest/CdStoreTest.scala
|
Scala
|
apache-2.0
| 4,752
|
package ornicar.scalalib
trait OrnicarOption {
implicit final def toOrnicarOption[A](o: Option[A]) = new OrnicarOptionWrapper(o)
}
final class OrnicarOptionWrapper[A](private val self: Option[A]) extends AnyVal {
def ??[B: Zero](f: A => B): B = self.fold(Zero[B].zero)(f)
def ifTrue(b: Boolean): Option[A] = self filter (_ => b)
def ifFalse(b: Boolean): Option[A] = self filter (_ => !b)
// typesafe getOrElse
def |(default: => A): A = self getOrElse default
def unary_~(implicit z: Zero[A]): A = self getOrElse z.zero
}
|
ornicar/scalalib
|
src/main/scala/Option.scala
|
Scala
|
mit
| 544
|
/**
* (c) Copyright 2012 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.shell.ddl
import scala.collection.JavaConversions._
import org.specs2.mutable._
import org.kiji.schema.KConstants
import org.kiji.schema.shell.DDLException
import org.kiji.schema.shell.DDLParser
class TestUseDbCommands extends CommandTestCase {
"UseInstanceCommand" should {
"require the db exists" in {
val usecmd = new UseInstanceCommand(env, "missing-instance")
usecmd.exec() must throwA[DDLException]
}
"select the database we request" in {
val usecmd = new UseInstanceCommand(env, "foo")
val env2 = usecmd.exec()
env2.instanceURI.getInstance() mustEqual "foo"
}
}
"Parsed USE statements" should {
"select the foo instance and then the default instance" in {
val parser1: DDLParser = new DDLParser(env)
val res1 = parser1.parseAll(parser1.statement, "USE foo;")
res1.successful mustEqual true
val env2 = res1.get.exec()
env2.instanceURI.getInstance() mustEqual "foo"
val parser2: DDLParser = new DDLParser(env2)
val res2 = parser2.parseAll(parser2.statement, "USE DEFAULT INSTANCE;")
res2.successful mustEqual true
val env3 = res2.get.exec()
env3.instanceURI.getInstance() mustEqual KConstants.DEFAULT_INSTANCE_NAME
}
}
}
|
alexandre-normand/kiji-schema-shell
|
src/test/scala/org/kiji/schema/shell/ddl/TestUseDbCommands.scala
|
Scala
|
apache-2.0
| 1,997
|
package com.twitter.mycollector
import com.twitter.finagle.Http
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.server.{Closer, TwitterServer}
import com.twitter.util.{Await, Closable, Future}
import com.twitter.zipkin.anormdb.AnormDBSpanStoreFactory
import com.twitter.zipkin.collector.SpanReceiver
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.receiver.scribe.ScribeSpanReceiverFactory
import com.twitter.zipkin.zookeeper.ZooKeeperClientFactory
import com.twitter.zipkin.web.ZipkinWebFactory
import com.twitter.zipkin.query.ThriftQueryService
import com.twitter.zipkin.query.constants.DefaultAdjusters
import com.twitter.zipkin.tracegen.ZipkinSpanGenerator
object Main extends TwitterServer with Closer
with ZooKeeperClientFactory
with ScribeSpanReceiverFactory
with ZipkinWebFactory
with AnormDBSpanStoreFactory
with ZipkinSpanGenerator
{
val genSampleTraces = flag("genSampleTraces", false, "Generate sample traces")
def main() {
val store = newAnormSpanStore()
if (genSampleTraces())
Await.result(generateTraces(store))
val receiver = newScribeSpanReceiver(store, statsReceiver.scope("scribeSpanReceiver"))
val query = new ThriftQueryService(store, adjusters = DefaultAdjusters)
val webService = newWebServer(query, statsReceiver.scope("web"))
val web = Http.serve(webServerPort(), webService)
val closer = Closable.sequence(web, receiver, store)
closeOnExit(closer)
println("running and ready")
Await.all(web, receiver, store)
}
}
|
jeffreywugz/zipkin
|
zipkin-example/src/main/scala/com/twitter/zipkin/example/Main.scala
|
Scala
|
apache-2.0
| 1,541
|
/* Copyright 2009-2021 EPFL, Lausanne */
object MyTuple2 {
abstract class A
case class B(i: Int) extends A
case class C(a: A) extends A
def foo(): Int = {
val t = (B(2), C(B(3)))
t match {
case (B(x), C(y)) => x
}
} ensuring( _ == 3)
}
|
epfl-lara/stainless
|
frontends/benchmarks/verification/invalid/MyTuple2.scala
|
Scala
|
apache-2.0
| 268
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io.{IOException, ObjectInputStream, ObjectOutputStream}
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConversions._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import org.apache.spark.internal.Logging
import org.apache.spark.streaming.dstream.{DStream, InputDStream, ReceiverInputDStream}
import org.apache.spark.streaming.event._
import org.apache.spark.streaming.scheduler.{Job, StreamingListenerEventSourceStarted}
import org.apache.spark.util.Utils
final private[streaming] class DStreamGraph extends Serializable with Logging {
private val inputStreams = new ArrayBuffer[InputDStream[_]]()
val eventSourceToBoundStreams =
new ConcurrentHashMap[EventSource, mutable.LinkedHashSet[DStream[_]]]
var rememberDuration: Duration = null
var checkpointInProgress = false
var zeroTime: Time = null
var startTime: Time = null
var batchDuration: Duration = null
var defaultTimer: Option[TimerEventSource] = None
private val defaultTimerStreams = new ArrayBuffer[DStream[_]]()
private val defaultTimerListeners = new ArrayBuffer[EventListener]()
def eventSources = {
eventSourceToBoundStreams.keySet().toSet
}
def start(time: Time, ssc: StreamingContext) {
this.synchronized {
require(zeroTime == null, "DStream graph computation already started")
zeroTime = time
startTime = time
// initialize the default timer
val timer = ssc.timer(time + batchDuration, Time(Long.MaxValue),
batchDuration, "DefaultTimer")
defaultTimer = Some(timer)
// Output streams that are not bound to any other
// event source should be bound to the default timer
defaultTimerStreams
.filter(_.boundEventSources.isEmpty)
.foreach(_.bind(timer))
defaultTimerStreams.clear()
defaultTimerListeners.foreach(timer.addListener)
defaultTimerListeners.clear()
val outputStreams = getOutputStreams()
outputStreams.foreach(_.initialize(zeroTime))
outputStreams.foreach(_.remember(rememberDuration))
outputStreams.foreach(_.validateAtStart)
inputStreams.par.foreach(_.start())
eventSources.par.foreach { eventSource =>
eventSource.start()
ssc.scheduler.listenerBus.post(StreamingListenerEventSourceStarted(eventSource))
}
}
}
def restart(time: Time, ssc: StreamingContext) {
this.synchronized {
startTime = time
eventSources.par.foreach { eventSource =>
eventSource.restart()
ssc.scheduler.listenerBus.post(StreamingListenerEventSourceStarted(eventSource))
}
}
}
def stop(ssc: StreamingContext) {
this.synchronized {
eventSources.par.foreach { eventSource =>
eventSource.stop()
ssc.scheduler.listenerBus.post(StreamingListenerEventSourceStarted(eventSource))
}
inputStreams.par.foreach(_.stop())
}
}
def setContext(ssc: StreamingContext) {
this.synchronized {
eventSources.foreach(_.setContext(ssc))
getOutputStreams().foreach(_.setContext(ssc))
}
}
def setBatchDuration(duration: Duration) {
this.synchronized {
require(batchDuration == null,
s"Batch duration already set as $batchDuration. Cannot set it again.")
batchDuration = duration
}
}
def remember(duration: Duration) {
this.synchronized {
require(rememberDuration == null,
s"Remember duration already set as $rememberDuration. Cannot set it again.")
rememberDuration = duration
}
}
def addInputStream(inputStream: InputDStream[_]) {
this.synchronized {
inputStream.setGraph(this)
inputStreams += inputStream
}
}
def addEventListener(listener: EventListener) {
eventSources.foreach(_.addListener(listener))
if (defaultTimer.isEmpty) {
this.synchronized {
defaultTimerListeners += listener
}
}
}
def removeEventListener[T <: EventListener : ClassTag]() {
eventSources.foreach(_.removeListeners[T]())
if (defaultTimer.isEmpty) {
this.synchronized {
val c = implicitly[ClassTag[T]].runtimeClass
val listenersToDrop = defaultTimerListeners.filter(l => c.isAssignableFrom(l.getClass))
defaultTimerListeners --= listenersToDrop
}
}
}
def bind(stream: DStream[_], eventSource: EventSource) {
val boundStreams = Option(eventSourceToBoundStreams.get(eventSource))
.getOrElse {
val emptySet = mutable.LinkedHashSet.empty[DStream[_]]
eventSourceToBoundStreams.put(eventSource, emptySet)
emptySet
}
boundStreams += stream
if (stream.graph == null) {
stream.setGraph(this)
}
}
def bind(stream: DStream[_]) {
defaultTimer match {
case Some(timer) => bind(stream, timer)
case _ => this.synchronized {
defaultTimerStreams += stream
if (stream.graph == null) {
stream.setGraph(this)
}
}
}
}
def getInputStreams(): Array[InputDStream[_]] = this.synchronized { inputStreams.toArray }
def getOutputStreams(): Array[DStream[_]] = {
(eventSourceToBoundStreams.values.flatten.toArray ++ defaultTimerStreams).distinct
}
def getReceiverInputStreams(): Array[ReceiverInputDStream[_]] = this.synchronized {
inputStreams.filter(_.isInstanceOf[ReceiverInputDStream[_]])
.map(_.asInstanceOf[ReceiverInputDStream[_]])
.toArray
}
def getBoundStreams(eventSource: EventSource): Seq[DStream[_]] = {
eventSourceToBoundStreams.getOrDefault(eventSource, mutable.LinkedHashSet.empty).toSeq
}
def getInputStreamName(streamId: Int): Option[String] = synchronized {
inputStreams.find(_.id == streamId).map(_.name)
}
def deleteEvents(events: Seq[Event]) {
this.synchronized {
events.foreach(e =>
getBoundStreams(e.eventSource)
.foreach(_.deleteEvent(e)))
}
}
def generateJobs(event: Event): Seq[Job] = {
logDebug(s"Generating jobs for event $event")
val jobs = this.synchronized {
getBoundStreams(event.eventSource)
.flatMap { stream =>
val jobOption = stream.generateJob(event)
jobOption.foreach(_.setCallSite(stream.creationSite))
jobOption
}
}
logDebug(s"Generated ${jobs.length} jobs for event $event")
jobs
}
def clearMetadata(event: Event) {
logDebug("Clearing metadata for event " + event)
this.synchronized {
getBoundStreams(event.eventSource).foreach(_.clearMetadata(event))
}
logDebug("Cleared old metadata for event " + event)
}
def updateCheckpointData(event: Event) {
logInfo("Updating checkpoint data for event " + event)
this.synchronized {
getBoundStreams(event.eventSource).foreach(_.updateCheckpointData(event))
}
logInfo("Updated checkpoint data for event " + event)
}
def clearCheckpointData(event: Event) {
logInfo("Clearing checkpoint data for event " + event)
this.synchronized {
getBoundStreams(event.eventSource).foreach(_.clearCheckpointData(event))
}
logInfo("Cleared checkpoint data for event " + event)
}
def restoreCheckpointData() {
logInfo("Restoring checkpoint data")
this.synchronized {
getOutputStreams().foreach(_.restoreCheckpointData())
}
logInfo("Restored checkpoint data")
}
def validate() {
this.synchronized {
require(batchDuration != null, "Batch duration has not been set")
// assert(batchDuration >= Milliseconds(100), "Batch duration of " + batchDuration +
// " is very low")
require((getOutputStreams() ++ defaultTimerStreams).nonEmpty,
"No output operations registered, so nothing to execute")
}
}
/**
* Get the maximum remember duration across all the input streams. This is a conservative but
* safe remember duration which can be used to perform cleanup operations.
*/
def getMaxInputStreamRememberDuration(): Duration = {
// If an InputDStream is not used, its `rememberDuration` will be null and we can ignore them
inputStreams.map(_.rememberDuration).filter(_ != null).maxBy(_.milliseconds)
}
/**
* Get the maximum remember duration across all the input streams considering events up to the
* given event and ignoring events past it. This is a conservative but safe remember duration
* which can be used to perform cleanup operations.
*/
def getMaxInputStreamRememberDuration(event: Event): Duration = {
// If an InputDStream is not used, its `rememberDuration` will be null and we can ignore them
inputStreams.map(_.rememberDuration(event)).filter(_ != null).maxBy(_.milliseconds)
}
@throws(classOf[IOException])
private def writeObject(oos: ObjectOutputStream): Unit = Utils.tryOrIOException {
logDebug("DStreamGraph.writeObject used")
this.synchronized {
checkpointInProgress = true
logDebug("Enabled checkpoint mode")
oos.defaultWriteObject()
checkpointInProgress = false
logDebug("Disabled checkpoint mode")
}
}
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
logDebug("DStreamGraph.readObject used")
this.synchronized {
checkpointInProgress = true
ois.defaultReadObject()
checkpointInProgress = false
}
}
}
|
mashin-io/rich-spark
|
streaming/src/main/scala/org/apache/spark/streaming/DStreamGraph.scala
|
Scala
|
apache-2.0
| 10,236
|
package org.receiver2d.engine.ecs.components
import org.receiver2d.engine.Receiver2D
import org.receiver2d.engine.math.Vec2
import org.receiver2d.engine.physics.Mechanics
/**
* A built-in component that enables physics support for all entities that use
* it.
*
* @param density this affects the calculated mass of the body
* @param restitution the elastic quality of collisions with this body
* @param friction friction coefficient
* @param kinematic Whether or not our body responds to forces, or simply exerts them
* (setting this option to false allows violating Newton's Third Law for the body)
* @param gravity Whether or not the body is affected by gravity
* @param velocity the starting velocity of the body
*/
class Rigidbody(var density: Float = 1f,
var restitution: Float = 0f,
var friction: Float = 0f,
var kinematic: Boolean = true,
var gravity: Boolean = true,
var velocity: Vec2 = Vec2.ZERO,
var angularVelocity: Float = 0f) extends Component {
val affectsChildren = true
override val logic = false
private var forces: Vec2 = Vec2.ZERO
private var lastUpdate: Double = -1
/**
* Update the rigidbody with the physics engine.
* @param args a list of arguments
*/
def tick(args: Any*) = this.synchronized {
val now = Receiver2D.now
val dT: Float = (now - lastUpdate).toFloat
if (lastUpdate > 0) {
if (gravity) forces += Mechanics.gravityForce
val accel = forces * invMass
entity.transform.synchronized {
entity.transform translate ((velocity + accel * dT / 2) * dT)
entity.transform rotate (angularVelocity * dT)
}
velocity += accel * dT
}
forces = Vec2.ZERO
lastUpdate = now
}
def applyForce(force: Vec2): Unit = this.synchronized {
forces += force
}
def mass: Float = entity.mesh.area * density
def invMass = if (kinematic) 1 / mass else 0f
/**
* see: http://www.efunda.com/math/areas/MomentOfInertia.cfm?Moment=z
* @return the polar moment of inertia
*/
def momentOfInertia: Float =
mass * entity.mesh.tris.map(tri => {
val a = tri(0) distance tri(1)
val b = tri(1) distance tri(2)
val c = tri(2) distance tri(0)
val s = (a + b + c) / 2
val h_a = 2 * Math.sqrt(s*(s-a)*(s-b)*(s-c)) / a
val h_b = 2 * Math.sqrt(s*(s-a)*(s-b)*(s-c)) / b
val area = a * h_a / 2
val I_a = a*h_a*h_a*h_a / 36
val I_b = b*h_b*h_b*h_b / 36
// adding I_a + I_b produces I along z-axis
(I_a + I_b) +
(area / entity.mesh.area) *
((tri.fold(Vec2.ZERO)(_ + _) / tri.length) distance2 entity.mesh.com)
}).sum.toFloat
}
|
Prince781/Receiver2D
|
src/main/scala/org/receiver2d/engine/ecs/components/Rigidbody.scala
|
Scala
|
gpl-2.0
| 2,767
|
/*
* Copyright (C) 2011 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.sampling.combine
import org.openmole.core.dsl._
import org.openmole.core.dsl.extension._
import org.openmole.core.workflow.domain.DiscreteFromContextDomain
import org.openmole.plugin.domain.modifier.CanGetName
object ZipWithNameSampling {
implicit def isSampling[D, T]: IsSampling[ZipWithNameSampling[D, T]] = s ⇒ {
def validate: Validate = s.discrete(s.factor.domain).validate
def inputs: PrototypeSet = Seq(s.factor.value)
def outputs: Iterable[Val[_]] = List(s.factor.value, s.name)
def apply: FromContext[Iterator[Iterable[Variable[_]]]] = FromContext { p ⇒
import p._
for {
v ← s.discrete(s.factor.domain).domain.from(context)
} yield List(Variable(s.factor.value, v), Variable(s.name, s.getName(v)))
}
Sampling(
apply,
outputs,
inputs = inputs,
validate = validate
)
}
}
case class ZipWithNameSampling[D, T](factor: Factor[D, T], name: Val[String])(implicit val discrete: DiscreteFromContextDomain[D, T], val getName: CanGetName[T])
|
openmole/openmole
|
openmole/plugins/org.openmole.plugin.sampling.combine/src/main/scala/org/openmole/plugin/sampling/combine/ZipWithNameSampling.scala
|
Scala
|
agpl-3.0
| 1,756
|
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.reactivestreams.suites.iteratee
import com.websudos.phantom.dsl._
import com.websudos.phantom.reactivestreams._
import com.websudos.phantom.tables.{JodaRow, TestDatabase}
import com.websudos.util.testing._
import org.scalameter.api.{Gen => MeterGen, gen => _, _}
import org.scalatest.time.SpanSugar._
import scala.concurrent.{Await, Future}
class IterateeBenchmarkPerformanceTest extends PerformanceTest.Quickbenchmark with TestDatabase.connector.Connector {
TestDatabase.primitivesJoda.insertSchema()
val limit = 10000
val sampleGenLimit = 30000
val fs = for {
step <- 1 to 3
rows = Iterator.fill(limit)(gen[JodaRow])
batch = rows.foldLeft(Batch.unlogged)((b, row) => {
val statement = TestDatabase.primitivesJoda.insert
.value(_.pkey, row.pkey)
.value(_.intColumn, row.int)
.value(_.timestamp, row.bi)
b.add(statement)
})
w = batch.future()
f = w map (_ => println(s"step $step was completed successfully"))
r = Await.result(f, 200 seconds)
} yield f map (_ => r)
Await.ready(Future.sequence(fs), 20 seconds)
val sizes: MeterGen[Int] = MeterGen.range("size")(limit, sampleGenLimit, limit)
performance of "Enumerator" in {
measure method "enumerator" in {
using(sizes) in {
size => Await.ready(TestDatabase.primitivesJoda.select.limit(size).fetchEnumerator run Iteratee.forEach { r => }, 10 seconds)
}
}
}
}
|
levinson/phantom
|
phantom-reactivestreams/src/test/scala/com/websudos/phantom/reactivestreams/suites/iteratee/IterateeBenchmarkPerformanceTest.scala
|
Scala
|
bsd-2-clause
| 2,953
|
package es.weso.wiFetcher.dao
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.BeforeAndAfter
import org.scalatest.Matchers
import org.scalatest.FunSuite
import java.io.FileNotFoundException
import es.weso.wiFetcher.entities.Dataset
import es.weso.wiFetcher.dao.entity.DatasetDAOImpl
import scala.collection.mutable.ListBuffer
import es.weso.wiFetcher.entities.Indicator
import es.weso.wiFetcher.entities.IndicatorType
import es.weso.wiFetcher.entities.IndicatorHighLow
import es.weso.wiFetcher.entities.Provider
import scala.collection.mutable.HashMap
@RunWith(classOf[JUnitRunner])
class DatasetDAOImplSuite extends FunSuite with BeforeAndAfter
with Matchers{
val indicators : ListBuffer[Indicator] = {
val list :ListBuffer[Indicator] = ListBuffer.empty
list += Indicator("A",
IndicatorType.Primary,
HashMap("en" -> "test indicator"),
HashMap("en" -> "test indicator description"),
0,
0,
0,
0.5,
IndicatorHighLow.High,
"",
null,
ListBuffer(Provider("", "", "", "")), false)
list += Indicator("B",
IndicatorType.Primary,
HashMap("en" -> "test indicator 2"),
HashMap("en" -> "test indicator description 2"),
0,
0,
0,
0.5,
IndicatorHighLow.High,
"",
null,
ListBuffer(Provider("", "", "", "")), false)
list += Indicator("C",
IndicatorType.Primary,
HashMap("en" -> "test indicator 3"),
HashMap("en" -> "test indicator description 3"),
0,
0,
0,
0.5,
IndicatorHighLow.High,
"",
null,
ListBuffer(Provider("", "", "", "")), false)
list
}
test("Load correct all datasets and verify that all data is loaded") {
val datasetDao : DatasetDAOImpl = new DatasetDAOImpl(indicators.toList)
val datasets : List[Dataset] = datasetDao.getDatasets
datasets.size should be (6)
indicators.foreach(indicator => {
datasets.contains(Dataset(indicator.id + "-Ordered")) should be (true)
datasets.contains(Dataset(indicator.id + "-Imputed")) should be (true)
})
}
}
|
weso/wiFetcher
|
test/es/weso/wiFetcher/dao/DatasetDAOImplSuite.scala
|
Scala
|
apache-2.0
| 2,225
|
package com.tierline.scala.activemodel
import org.squeryl._
import org.squeryl.PrimitiveTypeMode._
trait TableSupport[K, T] {
}
|
tierline/scala-activemodel
|
src/main/scala/com/tierline/scala/activemodel/TableSupport.scala
|
Scala
|
mit
| 131
|
package de.fuberlin.wiwiss.silk.preprocessing.transformer
import scala.xml.Node
/**
* Represents a Transformer
*
*/
trait Transformer{
def apply(values: List[String]): List[String]
}
|
fusepoolP3/p3-silk
|
silk-tools/silk-freetext-preprocessing/src/main/scala/de/fuberlin/wiwiss/silk/preprocessing/transformer/Transformer.scala
|
Scala
|
apache-2.0
| 191
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.ml.classification
import org.apache.flink.ml.common.LabeledVector
import org.apache.flink.ml.math.DenseVector
object Classification {
/** Centered data of fisheriris data set
*
*/
val trainingData = Seq[LabeledVector](
LabeledVector(1.0000, DenseVector(-0.2060, -0.2760)),
LabeledVector(1.0000, DenseVector(-0.4060, -0.1760)),
LabeledVector(1.0000, DenseVector(-0.0060, -0.1760)),
LabeledVector(1.0000, DenseVector(-0.9060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.3060, -0.1760)),
LabeledVector(1.0000, DenseVector(-0.4060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.2060, -0.0760)),
LabeledVector(1.0000, DenseVector(-1.6060, -0.6760)),
LabeledVector(1.0000, DenseVector(-0.3060, -0.3760)),
LabeledVector(1.0000, DenseVector(-1.0060, -0.2760)),
LabeledVector(1.0000, DenseVector(-1.4060, -0.6760)),
LabeledVector(1.0000, DenseVector(-0.7060, -0.1760)),
LabeledVector(1.0000, DenseVector(-0.9060, -0.6760)),
LabeledVector(1.0000, DenseVector(-0.2060, -0.2760)),
LabeledVector(1.0000, DenseVector(-1.3060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.5060, -0.2760)),
LabeledVector(1.0000, DenseVector(-0.4060, -0.1760)),
LabeledVector(1.0000, DenseVector(-0.8060, -0.6760)),
LabeledVector(1.0000, DenseVector(-0.4060, -0.1760)),
LabeledVector(1.0000, DenseVector(-1.0060, -0.5760)),
LabeledVector(1.0000, DenseVector(-0.1060, 0.1240)),
LabeledVector(1.0000, DenseVector(-0.9060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.0060, -0.1760)),
LabeledVector(1.0000, DenseVector(-0.2060, -0.4760)),
LabeledVector(1.0000, DenseVector(-0.6060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.5060, -0.2760)),
LabeledVector(1.0000, DenseVector(-0.1060, -0.2760)),
LabeledVector(1.0000, DenseVector(0.0940, 0.0240)),
LabeledVector(1.0000, DenseVector(-0.4060, -0.1760)),
LabeledVector(1.0000, DenseVector(-1.4060, -0.6760)),
LabeledVector(1.0000, DenseVector(-1.1060, -0.5760)),
LabeledVector(1.0000, DenseVector(-1.2060, -0.6760)),
LabeledVector(1.0000, DenseVector(-1.0060, -0.4760)),
LabeledVector(1.0000, DenseVector(0.1940, -0.0760)),
LabeledVector(1.0000, DenseVector(-0.4060, -0.1760)),
LabeledVector(1.0000, DenseVector(-0.4060, -0.0760)),
LabeledVector(1.0000, DenseVector(-0.2060, -0.1760)),
LabeledVector(1.0000, DenseVector(-0.5060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.8060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.9060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.5060, -0.4760)),
LabeledVector(1.0000, DenseVector(-0.3060, -0.2760)),
LabeledVector(1.0000, DenseVector(-0.9060, -0.4760)),
LabeledVector(1.0000, DenseVector(-1.6060, -0.6760)),
LabeledVector(1.0000, DenseVector(-0.7060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.7060, -0.4760)),
LabeledVector(1.0000, DenseVector(-0.7060, -0.3760)),
LabeledVector(1.0000, DenseVector(-0.6060, -0.3760)),
LabeledVector(1.0000, DenseVector(-1.9060, -0.5760)),
LabeledVector(1.0000, DenseVector(-0.8060, -0.3760)),
LabeledVector(-1.0000, DenseVector(1.0940, 0.8240)),
LabeledVector(-1.0000, DenseVector(0.1940, 0.2240)),
LabeledVector(-1.0000, DenseVector(0.9940, 0.4240)),
LabeledVector(-1.0000, DenseVector(0.6940, 0.1240)),
LabeledVector(-1.0000, DenseVector(0.8940, 0.5240)),
LabeledVector(-1.0000, DenseVector(1.6940, 0.4240)),
LabeledVector(-1.0000, DenseVector(-0.4060, 0.0240)),
LabeledVector(-1.0000, DenseVector(1.3940, 0.1240)),
LabeledVector(-1.0000, DenseVector(0.8940, 0.1240)),
LabeledVector(-1.0000, DenseVector(1.1940, 0.8240)),
LabeledVector(-1.0000, DenseVector(0.1940, 0.3240)),
LabeledVector(-1.0000, DenseVector(0.3940, 0.2240)),
LabeledVector(-1.0000, DenseVector(0.5940, 0.4240)),
LabeledVector(-1.0000, DenseVector(0.0940, 0.3240)),
LabeledVector(-1.0000, DenseVector(0.1940, 0.7240)),
LabeledVector(-1.0000, DenseVector(0.3940, 0.6240)),
LabeledVector(-1.0000, DenseVector(0.5940, 0.1240)),
LabeledVector(-1.0000, DenseVector(1.7940, 0.5240)),
LabeledVector(-1.0000, DenseVector(1.9940, 0.6240)),
LabeledVector(-1.0000, DenseVector(0.0940, -0.1760)),
LabeledVector(-1.0000, DenseVector(0.7940, 0.6240)),
LabeledVector(-1.0000, DenseVector(-0.0060, 0.3240)),
LabeledVector(-1.0000, DenseVector(1.7940, 0.3240)),
LabeledVector(-1.0000, DenseVector(-0.0060, 0.1240)),
LabeledVector(-1.0000, DenseVector(0.7940, 0.4240)),
LabeledVector(-1.0000, DenseVector(1.0940, 0.1240)),
LabeledVector(-1.0000, DenseVector(-0.1060, 0.1240)),
LabeledVector(-1.0000, DenseVector(-0.0060, 0.1240)),
LabeledVector(-1.0000, DenseVector(0.6940, 0.4240)),
LabeledVector(-1.0000, DenseVector(0.8940, -0.0760)),
LabeledVector(-1.0000, DenseVector(1.1940, 0.2240)),
LabeledVector(-1.0000, DenseVector(1.4940, 0.3240)),
LabeledVector(-1.0000, DenseVector(0.6940, 0.5240)),
LabeledVector(-1.0000, DenseVector(0.1940, -0.1760)),
LabeledVector(-1.0000, DenseVector(0.6940, -0.2760)),
LabeledVector(-1.0000, DenseVector(1.1940, 0.6240)),
LabeledVector(-1.0000, DenseVector(0.6940, 0.7240)),
LabeledVector(-1.0000, DenseVector(0.5940, 0.1240)),
LabeledVector(-1.0000, DenseVector(-0.1060, 0.1240)),
LabeledVector(-1.0000, DenseVector(0.4940, 0.4240)),
LabeledVector(-1.0000, DenseVector(0.6940, 0.7240)),
LabeledVector(-1.0000, DenseVector(0.1940, 0.6240)),
LabeledVector(-1.0000, DenseVector(0.1940, 0.2240)),
LabeledVector(-1.0000, DenseVector(0.9940, 0.6240)),
LabeledVector(-1.0000, DenseVector(0.7940, 0.8240)),
LabeledVector(-1.0000, DenseVector(0.2940, 0.6240)),
LabeledVector(-1.0000, DenseVector(0.0940, 0.2240)),
LabeledVector(-1.0000, DenseVector(0.2940, 0.3240)),
LabeledVector(-1.0000, DenseVector(0.4940, 0.6240)),
LabeledVector(-1.0000, DenseVector(0.1940, 0.1240))
)
val expectedWeightVector = DenseVector(-1.95, -3.45)
}
|
yew1eb/flink
|
flink-libraries/flink-ml/src/test/scala/org/apache/flink/ml/classification/Classification.scala
|
Scala
|
apache-2.0
| 6,883
|
import sbt._
import sbt.Keys._
import sbt.plugins.JvmPlugin
import com.typesafe.sbt.packager.MappingsHelper.contentOf
/**
* SBT plugin to build the frontend through yarn.
*
*/
object YarnPlugin extends AutoPlugin {
override val requires: Plugins = JvmPlugin
override val trigger: PluginTrigger = NoTrigger
object Commands {
val install = Seq("yarn", "install")
val setup = Seq("yarn", "setup")
val formatValidate = Seq("yarn", "format:validate")
val test = Seq("yarn", "test")
val dist = Seq("yarn", "dist", "--")
}
object autoImport {
/**
* Install Javascript packages with yarn.
*/
val yarnInstall: TaskKey[Unit] = taskKey[Unit](s"execute: ${Commands.install}")
/**
* Setup the project.
*
* In our case, install Elm packages.
*/
val yarnSetup: TaskKey[Unit] = taskKey[Unit](s"execute: ${Commands.setup}")
/**
* Check Elm formatting.
*/
val yarnFormatValidate: TaskKey[Unit] = taskKey[Unit](s"execute: ${Commands.formatValidate}")
/**
* Run frontend tests.
*/
val yarnTest: TaskKey[Unit] = taskKey[Unit](s"execute: ${Commands.test}")
/**
* Build the webpack bundles through yarn dist.
*/
val yarnDist: TaskKey[Seq[File]] = taskKey[Seq[File]](s"execute: ${Commands.dist}")
}
import autoImport._
override def projectSettings: Seq[_root_.sbt.Def.Setting[_]] = Seq(
cleanFiles ++= Seq(
baseDirectory.value / "dist", // The webpack output
baseDirectory.value / "node_modules", // The node modules
baseDirectory.value / "elm-stuff", // Elm packages
baseDirectory.value / "tests" / "elm-stuff" // Elm packages for webui tests
),
yarnSetup := {
val base = baseDirectory.value
val setup = FileFunction.cached(
streams.value.cacheDirectory / "yarn-setup",
inStyle = FilesInfo.hash,
outStyle = FilesInfo.exists
) { _: Set[File] =>
execute(Commands.setup, base, streams.value.log)
(base / "elm-stuff" / "packages").get.toSet ++ (base / "tests" / "elm-stuff" / "packages").get.toSet
}
setup((base / "elm-package.json").get.toSet)
},
yarnInstall := {
val base = baseDirectory.value
val install = FileFunction.cached(
streams.value.cacheDirectory / "yarn-install",
inStyle = FilesInfo.hash,
outStyle = FilesInfo.exists
) { _: Set[File] =>
execute(Commands.install, base, streams.value.log)
(base / "node_modules").get.toSet
}
install((base * ("package.json" || "yarn.lock")).get.toSet)
},
yarnFormatValidate := {
execute(Commands.formatValidate, baseDirectory.value, streams.value.log)
},
yarnTest := {
execute(Commands.test, baseDirectory.value, streams.value.log)
},
yarnDist := {
val log = streams.value.log
log.info("Running webpack resource generator")
val managedResources = (resourceManaged in Compile).value
val yarnDist: Set[File] => Set[File] = FileFunction.cached(
streams.value.cacheDirectory / "yarn-dist",
inStyle = FilesInfo.hash,
outStyle = FilesInfo.exists
) {
_: Set[File] =>
// Make sure that no assets from the previous run remain here
val targetDirectory = managedResources / "public"
IO.delete(targetDirectory)
// Make webpack output to the managed resource directory.
execute(Commands.dist ++ Seq("--output-path", targetDirectory.absolutePath),
baseDirectory.value,
streams.value.log)
val generatedFiles = targetDirectory.***.get
// show the generated files to the user to ease debugging
generatedFiles.foreach(t => log.info(s"webpack generated $t"))
generatedFiles.toSet
}
// Track all source files that affect the build result
val sources = sourceDirectory.value.*** +++
baseDirectory.value * ("package.json" || "yarn.lock" || "elm-package.json" || "webpack.config.*.js")
yarnDist(sources.get.toSet).toSeq
},
yarnSetup := yarnSetup.dependsOn(yarnInstall).value,
yarnDist := yarnDist.dependsOn(yarnSetup).value,
yarnTest := yarnTest.dependsOn(yarnSetup).value,
yarnFormatValidate := yarnFormatValidate.dependsOn(yarnSetup).value,
test in Test := {
yarnTest.value
},
resourceGenerators in Compile += yarnDist.taskValue
)
private def execute(cmd: Seq[String], workingDirectory: File, log: Logger): Unit = {
val desc = s"executing: ${workingDirectory.toString}> ${cmd.mkString(" ")}"
log.info(desc)
val exitValue = Process(cmd, workingDirectory) ! log
assert(exitValue == 0, s"Nonzero exit value '$exitValue', while $desc")
}
}
|
FRosner/cluster-broccoli
|
project/YarnPlugin.scala
|
Scala
|
apache-2.0
| 4,802
|
package test
object Test extends App {
println("SUCCESS!")
}
|
benmccann/sbt-native-packager
|
src/sbt-test/windows/java-app-archetype/src/main/scala/test/Test.scala
|
Scala
|
bsd-2-clause
| 64
|
import annotation.showAsInfix
// This version of Tuple requires full retyping of untyped trees on inlining
object typelevel {
erased def erasedValue[T]: T = compiletime.erasedValue
class Typed[T](val value: T) { type Type = T }
}
sealed trait Tuple
object Empty extends Tuple
@showAsInfix
final case class *: [H, T <: Tuple](hd: H, tl: T) extends Tuple
object Tuple {
import typelevel.*
type Empty = Empty.type
class TupleOps(val xs: Tuple) extends AnyVal {
inline def *: [H] (x: H): Tuple = new *:(x, xs)
inline def size: Int = inline xs match {
case Empty => 0
case _ *: xs1 => xs1.size + 1
}
inline def apply(n: Int): Any = inline xs match {
case x *: _ if n == 0 => x
case _ *: xs1 if n > 0 => xs1.apply(n - 1)
}
inline def **: (ys: Tuple): Tuple = inline ys match {
case Empty => xs
case y *: ys1 => y *: (ys1 **: xs)
}
inline def head = inline xs match {
case x *: _ => x
}
inline def tail = inline xs match {
case _ *: xs => xs
}
}
val emptyArray = Array[Object]()
inline def toObj(t: Any) = t.asInstanceOf[Object]
inline def toArray(t: Tuple): Array[Object] = inline t.size match {
case 0 => emptyArray
case 1 => Array(toObj(t(0)))
case 2 => Array(toObj(t(0)), toObj(t(1)))
case 3 => Array(toObj(t(0)), toObj(t(1)), toObj(t(2)))
case 4 => Array(toObj(t(0)), toObj(t(1)), toObj(t(2)), toObj(t(3)))
}
inline implicit def tupleDeco(xs: Tuple): TupleOps = new TupleOps(xs)
inline def apply(): Tuple = Empty
inline def apply(x1: Any): Tuple = x1 *: Empty
inline def apply(x1: Any, x2: Any) = x1 *: x2 *: Empty
inline def apply(x1: Any, x2: Any, x3: Any) = x1 *: x2 *: x3 *: Empty
val xs0 = Tuple()
val xs1 = Tuple(2)
val xs2 = Tuple(2, "a")
val xs3 = Tuple(true, 1, 2.0)
inline val s0 = xs0.size; val s0c: 0 = s0
inline val s1 = xs1.size; val s1c: 1 = s1
inline val s2 = xs2.size; val s2c: 2 = s2
inline val s3 = xs3.size; val s3c: 3 = s3
val e0 = xs3(0); val e0c: Boolean = e0
val e1 = xs3(1); val e1c: Int = e1
val e2 = xs3(2); val e2c: Double = e2
val conc0 = xs0 **: xs3
val conc1 = xs3 **: xs0
val conc2 = xs2 **: xs3
val e3c: Int = conc0(1)
val e4c: Int = conc1(1)
val e5c: Int = conc2(0)
val e6c: Double = conc2(4)
}
object Test extends App
|
dotty-staging/dotty
|
tests/invalid/run/Tuple.scala
|
Scala
|
apache-2.0
| 2,347
|
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of PowerAPI.
*
* Copyright (C) 2011-2016 Inria, University of Lille 1.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.daemon
import akka.util.Timeout
import org.powerapi.UnitTest
import scala.concurrent.duration.DurationInt
class DaemonConfigurationSuite extends UnitTest {
override def afterAll() = {
system.terminate()
}
val timeout = Timeout(1.seconds)
"The DaemonConfiguration" should "read correctly the values from a resource file" in {
val configuration = new DaemonConfiguration {}
configuration.powerMeters should equal(
List(
(
Set("procfs-cpu-simple","libpfm-core-process"),
List(
(false,Set(),Set("firefox"),Set(),1000.milliseconds,"SUM","console"),
(false,Set(),Set("compiz"),Set(),2000.milliseconds,"AVG","chart")
)
),
(
Set("rapl"),
List(
(true,Set(),Set(),Set(),3000.milliseconds,"SUM","file=>out.papi")
)
)
)
)
}
}
|
Spirals-Team/powerapi
|
powerapi-daemon/src/test/scala/org/powerapi/daemon/DaemonConfigurationSuite.scala
|
Scala
|
agpl-3.0
| 1,785
|
/*
* TCPTransmitterImpl.scala
* (ScalaOSC)
*
* Copyright (c) 2008-2021 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Lesser General Public License v2.1+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.osc
package impl
import java.io.IOException
import java.net.SocketAddress
import java.nio.Buffer
import java.nio.channels.SocketChannel
private[osc] final class TCPTransmitterImpl(val channel: SocketChannel,
protected val target: SocketAddress,
protected val config: TCP.Config)
extends TransmitterImpl with SingleChannelDirectImpl with TCPSingleChannelImpl with Channel.Directed.Output {
override def toString: String = s"${TCP.name}.Transmitter($target)@${hashCode().toHexString}"
override def close(): Unit = closeChannel()
def isConnected: Boolean = channel.isConnected
@throws(classOf[IOException])
def connect(): Unit = connectChannel()
@throws(classOf[IOException])
def ! (p: Packet): Unit = bufSync.synchronized {
(buf: Buffer).clear()
(buf: Buffer).position(4)
p.encode(codec, buf)
val len = buf.position() - 4
(buf: Buffer).flip()
buf.putInt(0, len)
dumpPacket(p)
channel.write(buf)
}
}
|
Sciss/ScalaOSC
|
shared/src/main/scala/de/sciss/osc/impl/TCPTransmitterImpl.scala
|
Scala
|
lgpl-2.1
| 1,352
|
package de.htwg.zeta.common.format.project.gdsl.shape.geoModel
import de.htwg.zeta.common.models.project.gdsl.shape.geomodel.Compartement
import de.htwg.zeta.common.models.project.gdsl.shape.geomodel.Position
import de.htwg.zeta.common.models.project.gdsl.shape.geomodel.Size
import de.htwg.zeta.common.models.project.gdsl.style.Style
import org.scalatest.matchers.should.Matchers
import org.scalatest.freespec.AnyFreeSpec
import play.api.libs.json.JsSuccess
import play.api.libs.json.Json
//noinspection ScalaStyle
class CompartementFormatTest extends AnyFreeSpec with Matchers {
"A CompartementFormat should" - {
"write an object" in {
val result = CompartementFormat(GeoModelFormat.geoModelFormatProvider)
.writes(Compartement(
size = Size.default,
position = Position.default,
childGeoModels = List(),
style = Style.defaultStyle))
result.toString() shouldBe
"""{"type":"compartement","size":{"width":1,"height":1},"position":{"x":0,"y":0},"childGeoElements":[],"style":{"name":"default","description":"default","background":{"color":{"r":0,"g":0,"b":0,"a":1,"rgb":"rgb(0,0,0)","rgba":"rgba(0,0,0,1.0)","hex":"#000000"}},"font":{"name":"Arial","bold":false,"color":{"r":0,"g":0,"b":0,"a":1,"rgb":"rgb(0,0,0)","rgba":"rgba(0,0,0,1.0)","hex":"#000000"},"italic":false,"size":10},"line":{"color":{"r":0,"g":0,"b":0,"a":1,"rgb":"rgb(0,0,0)","rgba":"rgba(0,0,0,1.0)","hex":"#000000"},"style":"solid","width":1},"transparency":1}}"""
}
"read an object" in {
val result = CompartementFormat(GeoModelFormat.geoModelFormatProvider)
.reads(Json.parse(
"""{"type":"compartement",
|"size":{"width":1,"height":1},
|"position":{"x":0,"y":0},
|"childGeoElements":[],
|"style":{
| "name":"default",
| "description":"default",
| "background":{"color":"rgba(0,0,0,1.0)"},
| "font":{"name":"Arial","bold":false,"color":"rgba(0,0,0,1.0)","italic":false,"size":10},
| "line":{"color":"rgba(0,0,0,1.0)","style":"solid","width":1},
| "transparency":1}
|}""".stripMargin
))
result shouldBe JsSuccess(Compartement(
size = Size.default,
position = Position.default,
childGeoModels = List(),
style = Style.defaultStyle))
}
"fail in reading an invalid input" in {
val result = CompartementFormat(GeoModelFormat.geoModelFormatProvider)
.reads(Json.parse(
"""{"invalid":{"r":23}}"""
))
result.isSuccess shouldBe false
}
}
}
|
Zeta-Project/zeta
|
api/common/src/test/scala/de/htwg/zeta/common/format/project/gdsl/shape/geoModel/CompartementFormatTest.scala
|
Scala
|
bsd-2-clause
| 2,632
|
package picasso.frontend.compilerPlugin.utils
import scala.tools.nsc.Global
import picasso.utils.{LogCritical, LogError, LogWarning, LogNotice, LogInfo, LogDebug, Logger}
import picasso.utils.Namer
trait PatternUtils {
self: TypeUtils =>
val global: Global
import global._
import global.definitions._
object RemoveBingings extends Transformer {
override def transform(tree: Tree): Tree = tree match {
case Bind(name, body) => super.transform(body)
case _ => super.transform(tree)
}
}
/** Removes the Bind node from a tree (pattern). */
def removeBindings(tree: Tree): Tree = RemoveBingings.transform(tree)
def bindingsIn(tree: Tree): List[Symbol] = {
val finder = new FilterTreeTraverser({case Bind(_,_) => true; case _ => false})
finder.traverse(tree)
finder.hits.toList.map(_.symbol)
}
/** Takes a pattern with bindings and returns the list of assigments that corrpesongds to the bindings
* @param enclosingSymbol the method in which the pattern is located. this is needed to create new local values
* @param matched the values that is matched (as dent or Select), used to create the assignment
* @param pattern the pattern that the value is supposed to match
*/
def assignmentsOfBindings(enclosingSymbol: Symbol, matched: Tree, pattern: Tree): List[Tree] = {
val stack = new scala.collection.mutable.Stack[(Type, Name)] //nth arg of Type
val assignments = new scala.collection.mutable.ListBuffer[(List[(Type, Name)], Symbol)]
def explorePattern(pattern: Tree): Unit = pattern match {
case Bind(name, body) =>
assignments += (stack.toList -> pattern.symbol)
explorePattern(body)
case Apply(id, args) =>
val currentType = pattern.tpe
val constructorSym = currentType.members find (_.isPrimaryConstructor) get
val constructorType = constructorSym.tpe
assert(constructorType.typeParams == Nil) //not a polytype (hopefully)
val paramsName = constructorType.params map (_.name) //better to get the name and latter find the corresponding accessor
(paramsName zip args) foreach { case (name, argPattern) =>
stack push (currentType -> name)
explorePattern(argPattern)
stack.pop
}
case Alternative(lst) => //no bindings inside an alternative
case Star(stared) => //no bindings inside an alternative
case Ident(id) => //nothing to do: normally if not already WILDCARD it was transformed into (id @ _)
case Literal(lit) => //nothing to do
case Typed(e,tpt) => explorePattern(e)
case TypeTree() => //nothing to do
case err => Logger.logAndThrow("Plugin", LogError, "PatternUtils, assignmentsOfBindings: "+err)
}
//from the trace, build an unfolded list of assignments
//TODO cache prefix
def makeAssigns(trace: (List[(Type, Name)], Symbol)): List[Tree] = {
val fromRoot = trace._1.reverse
val lastSym = trace._2
val tempValues = fromRoot map (enclosingSymbol.newValue(Namer(enclosingSymbol.name + "$pattern"), pattern.pos) setInfo _._1)
val lhsSyms = tempValues ::: List(lastSym)
val firstRhs = if (lhsSyms.head.tpe =:= matched.tpe) matched else Apply(TypeApply(Select(matched, Any_asInstanceOf), List(TypeTree(lhsSyms.head.tpe))), List()) //casting if needed
val tailRhs = (tempValues zip fromRoot) map {case (sym, (tpe, name)) =>
val methodSym = getAccessorFor(tpe, name) getOrElse (Logger.logAndThrow("Plugin", LogError, tpe + " has no member called " + name))
val returnType = methodSym.tpe.resultType
Apply(Select(Ident(sym), methodSym), List()) setType returnType setPos pattern.pos
} //TODO this might also need some casting
val rhss = firstRhs :: tailRhs
val assigns = (lhsSyms zip rhss) map {case (sym, rhs) => ValDef(sym, rhs) setPos pattern.pos}
assigns
}
//
explorePattern(pattern)
assignments.toList flatMap makeAssigns
}
/** expr match pattern (needed for exception in control flow) */
def exprMatchPattern(expr: Tree, pattern: Tree): Boolean = {
//a simple version that just check type: that should be enough for most of the cases (at the beginning)
expr.tpe matchesPattern pattern.tpe
//TODO the real stuff
}
/** Checks if two patterns are unifiable (for disjointness). */
def isUnifiable(p1: Tree, p2: Tree): Boolean = (p1,p2) match {
case (Bind(_, p1), p2) => isUnifiable(p1,p2)
case (p1, Bind(_, p2)) => isUnifiable(p1,p2)
case (Alternative(lst), p2) => lst.exists( p1 => isUnifiable(p1,p2) )
case (p1, Alternative(lst)) => lst.exists( p2 => isUnifiable(p1,p2) )
case (Star(p1), Star(p2)) => true //if no element then both matches
case (p1, p2 @ Star(_)) => isUnifiable(p2,p1)
case (Star(p1), p2) => isUnifiable(p1, p2) //can match if extactly one element on the left side
case (p1 @ Ident(nme.WILDCARD), p2) => p1.tpe <:< p2.tpe || p2.tpe <:< p1.tpe
case (p1, p2 @ Ident(nme.WILDCARD)) => p1.tpe <:< p2.tpe || p2.tpe <:< p1.tpe
case (Ident(i1), Ident(i2)) => i1 == i2
case (Literal(v1),Literal(v2)) => v1 == v2
case (Apply(id1, args1), Apply(id2, args2)) =>
args1.length == args2.length &&
isUnifiable(id1, id2) &&
args1.zip(args2).forall{ case (p1,p2) => isUnifiable(p1,p2) }
case (Typed(e1,tpt1), e2) => isUnifiable(e1,e2) //TODO can really drop the 'Typed' ??
case (e1, Typed(e2,tpt2)) => isUnifiable(e1,e2) //TODO can really drop the 'Typed' ??
case (tt1 @ TypeTree(), tt2 @ TypeTree()) => tt1.tpe <:< tt2.tpe || tt2.tpe <:< tt1.tpe
//cases that do not match
case (Ident(_), _) | (_, Ident(_)) => false
case (Literal(_), _) | (Literal(_), _) => false
case (Apply(_, _), _) | (_, Apply(_, _)) => false
case (TypeTree(), _) | (_ , TypeTree()) => false
//TODO what about the UnApply
case (err1,err2) => Logger.logAndThrow("Plugin", LogError, "PatternUtils, isUnifiable:("+err1+","+err2+")")
}
def allDisjointPatterns(cases: List[CaseDef]): Boolean =
cases forall (cd1 => ! (cases exists (cd2 => cd1 != cd2 && isUnifiable(cd1.pat, cd2.pat))))
}
|
dzufferey/picasso
|
frontend/compilerPlugin/src/main/scala/picasso/frontend/compilerPlugin/utils/PatternUtils.scala
|
Scala
|
bsd-2-clause
| 6,164
|
import sbt._
import Keys._
object FPInScalaBuild extends Build {
val opts = Project.defaultSettings ++ Seq(
scalaVersion := "2.11.6",
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
)
lazy val root =
Project(id = "fpinscala",
base = file("."),
settings = opts ++ Seq(
onLoadMessage ~= (_ + nio2check())
)) aggregate (chapterCode, exercises, answers)
lazy val chapterCode =
Project(id = "chapter-code",
base = file("chaptercode"),
settings = opts)
lazy val exercises =
Project(id = "exercises",
base = file("exercises"),
settings = opts)
lazy val answers =
Project(id = "answers",
base = file("answers"),
settings = opts)
def nio2check(): String = {
val cls = "java.nio.channels.AsynchronousFileChannel"
try {Class.forName(cls); ""}
catch {case _: ClassNotFoundException =>
("\\nWARNING: JSR-203 \\"NIO.2\\" (" + cls + ") not found.\\n" +
"You are probably running Java < 1.7; answers will not compile.\\n" +
"You seem to be running " + System.getProperty("java.version") + ".\\n" +
"Try `project exercises' before compile, or upgrading your JDK.")
}
}
}
|
geoquant/fpinscala
|
project/Build.scala
|
Scala
|
mit
| 1,292
|
package akka.event
import language.existentials
object Logging {
/**
* Marker trait for annotating LogLevel, which must be Int after erasure.
*/
case class LogLevel(asInt: Int) extends AnyVal {
@inline final def >=(other: LogLevel): Boolean = asInt >= other.asInt
@inline final def <=(other: LogLevel): Boolean = asInt <= other.asInt
@inline final def >(other: LogLevel): Boolean = asInt > other.asInt
@inline final def <(other: LogLevel): Boolean = asInt < other.asInt
}
/**
* Log level in numeric form, used when deciding whether a certain log
* statement should generate a log event. Predefined levels are ErrorLevel (1)
* to DebugLevel (4). In case you want to add more levels, loggers need to
* be subscribed to their event bus channels manually.
*/
final val ErrorLevel = LogLevel(1)
final val WarningLevel = LogLevel(2)
final val InfoLevel = LogLevel(3)
final val DebugLevel = LogLevel(4)
/**
* Base type of LogEvents
*/
sealed trait LogEvent {
/**
* When this LogEvent was created according to System.currentTimeMillis
*/
val timestamp: Long = System.currentTimeMillis
/**
* The LogLevel of this LogEvent
*/
def level: LogLevel
/**
* The source of this event
*/
def logSource: String
/**
* The class of the source of this event
*/
def logClass: Class[_]
/**
* The message, may be any object or null.
*/
def message: Any
}
/**
* For ERROR Logging
*/
case class Error(cause: Throwable, logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
def this(logSource: String, logClass: Class[_], message: Any) = this(Error.NoCause, logSource, logClass, message)
override def level = ErrorLevel
}
object Error {
def apply(logSource: String, logClass: Class[_], message: Any) =
new Error(NoCause, logSource, logClass, message)
/** Null Object used for errors without cause Throwable */
object NoCause extends Throwable
}
def noCause = Error.NoCause
/**
* For WARNING Logging
*/
case class Warning(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
override def level = WarningLevel
}
/**
* For INFO Logging
*/
case class Info(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
override def level = InfoLevel
}
/**
* For DEBUG Logging
*/
case class Debug(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent {
override def level = DebugLevel
}
}
|
sjrd/scala-js-actors
|
actors/src/main/scala/akka/event/Logging.scala
|
Scala
|
bsd-3-clause
| 2,602
|
//
// Transformation.scala -- Scala traits NamedASTFunction and NamedASTTransform and object EmptyFunction
// Project OrcScala
//
// $Id: NamedASTTransform.scala 3099 2012-07-21 02:33:18Z laurenyew $
//
// Created by dkitchin on Jul 12, 2010.
//
// Copyright (c) 2011 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.ast.oil.named
/** @author dkitchin
*/
trait NamedASTFunction {
def apply(a: Argument): Argument
def apply(e: Expression): Expression
def apply(t: Type): Type
def apply(d: Def): Def
def apply(ast: NamedAST): NamedAST = {
ast match {
case a: Argument => this(a)
case e: Expression => this(e)
case t: Type => this(t)
case d: Def => this(d)
}
}
def andThen(g: NamedASTFunction): NamedASTFunction = {
val f = this
new NamedASTFunction {
def apply(a: Argument): Argument = g(f(a))
def apply(e: Expression): Expression = g(f(e))
def apply(t: Type): Type = g(f(t))
def apply(d: Def): Def = g(f(d))
}
}
}
object EmptyFunction extends PartialFunction[Any, Nothing] {
def isDefinedAt(x: Any): Boolean = false
def apply(x: Any): Nothing = throw new AssertionError("EmptyFunction is undefined for all inputs.")
}
trait NamedASTTransform extends NamedASTFunction {
def apply(a: Argument): Argument = transform(a, Nil)
def apply(e: Expression): Expression = transform(e, Nil, Nil)
def apply(t: Type): Type = transform(t, Nil)
def apply(d: Def): Def = transform(d, Nil, Nil)
def onExpression(context: List[BoundVar], typecontext: List[BoundTypevar]): PartialFunction[Expression, Expression] = EmptyFunction
def onArgument(context: List[BoundVar]): PartialFunction[Argument, Argument] = EmptyFunction
def onType(typecontext: List[BoundTypevar]): PartialFunction[Type, Type] = EmptyFunction
def onDef(context: List[BoundVar], typecontext: List[BoundTypevar]): PartialFunction[Def, Def] = EmptyFunction
def recurseWithContext(context: List[BoundVar], typecontext: List[BoundTypevar]) =
new NamedASTFunction {
def apply(a: Argument) = transform(a, context)
def apply(e: Expression) = transform(e, context, typecontext)
def apply(t: Type) = transform(t, typecontext)
def apply(d: Def) = transform(d, context, typecontext)
}
def transform(a: Argument, context: List[BoundVar]): Argument = {
val pf = onArgument(context)
if (pf isDefinedAt a) { a -> pf } else a
}
//Transform from Named to Extended AST to AST type
def transform(e: Expression, context: List[BoundVar], typecontext: List[BoundTypevar]): Expression = {
val pf = onExpression(context, typecontext)
if (pf isDefinedAt e) {
e -> pf
} else {
val recurse = recurseWithContext(context, typecontext)
e -> {
case Stop() => Stop()
case a: Argument => recurse(a)
case Call(target, args, typeargs) => {
val newtarget = recurse(target)
val newargs = args map { recurse(_) }
val newtypeargs = typeargs map { _ map { recurse(_) } }
Call(newtarget, newargs, newtypeargs)
}
case left || right => recurse(left) || recurse(right)
case left > x > right => recurse(left) > x > transform(right, x :: context, typecontext)
case left < x < right => transform(left, x :: context, typecontext) < x < recurse(right)
case left ow right => recurse(left) ow recurse(right)
case DeclareDefs(defs, body) => {
val defnames = defs map { _.name }
val newdefs = defs map { transform(_, defnames ::: context, typecontext) }
val newbody = transform(body, defnames ::: context, typecontext)
DeclareDefs(newdefs, newbody)
}
case DeclareType(u, t, body) => {
val newt = transform(t, u :: typecontext)
val newbody = transform(body, context, u :: typecontext)
DeclareType(u, newt, newbody)
}
case HasType(body, expectedType) => HasType(recurse(body), recurse(expectedType))
//recurse on the body's type. We dont have types for Security Level
case DeclareSecurityLevel(name, parents, children, body) => DeclareSecurityLevel(name,parents,children, recurse(body))
case HasSecurityLevel(body, level) => HasSecurityLevel(recurse(body), level)
}
}
}
def transform(t: Type, typecontext: List[BoundTypevar]): Type = {
val pf = onType(typecontext)
if (pf isDefinedAt t) {
t -> pf
} else {
def recurse(t: Type) = transform(t, typecontext)
t -> {
case Bot() => Bot()
case Top() => Top()
case ImportedType(cl) => ImportedType(cl)
case ClassType(cl) => ClassType(cl)
case u: Typevar => u
case TupleType(elements) => TupleType(elements map recurse)
case RecordType(entries) => {
val newEntries = entries map { case (s, t) => (s, recurse(t)) }
RecordType(newEntries)
}
case TypeApplication(tycon, typeactuals) => {
TypeApplication(recurse(tycon), typeactuals map recurse)
}
case AssertedType(assertedType) => AssertedType(recurse(assertedType))
case FunctionType(typeformals, argtypes, returntype) => {
val newtypecontext = typeformals ::: typecontext
val newargtypes = argtypes map { transform(_, newtypecontext) }
val newreturntype = transform(returntype, newtypecontext)
FunctionType(typeformals, newargtypes, newreturntype)
}
case TypeAbstraction(typeformals, t) => {
TypeAbstraction(typeformals, transform(t, typeformals ::: typecontext))
}
case VariantType(self, typeformals, variants) => {
val newTypeContext = self :: typeformals ::: typecontext
val newVariants =
for ((name, variant) <- variants) yield {
(name, variant map { transform(_, newTypeContext) })
}
VariantType(self, typeformals, newVariants)
}
}
}
}
def transform(d: Def, context: List[BoundVar], typecontext: List[BoundTypevar]): Def = {
val pf = onDef(context, typecontext)
if (pf isDefinedAt d) {
d -> pf
} else {
d -> {
case Def(name, formals, body, typeformals, argtypes, returntype) => {
val newcontext = formals ::: context
val newtypecontext = typeformals ::: typecontext
val newbody = transform(body, newcontext, newtypecontext)
val newargtypes = argtypes map { _ map { transform(_, newtypecontext) } }
val newreturntype = returntype map { transform(_, newtypecontext) }
Def(name, formals, newbody, typeformals, newargtypes, newreturntype)
}
}
}
}
}
|
laurenyew/cOrcS
|
src/orc/ast/oil/named/NamedASTTransform.scala
|
Scala
|
bsd-3-clause
| 6,973
|
package com.giyeok.gitexplorer.model
import com.giyeok.gitexplorer.Util._
import java.io.File
trait GitObjects {
this: GitRepository =>
case class GitUser(name: String, email: String, date: String) {
override def toString = s"$name <$email>"
}
object GitUser {
def fromString(spec: String) = {
val (lt, gt) = (spec.indexOf('<'), spec.lastIndexOf('>'))
GitUser(spec.substring(0, lt).trim, spec.substring(lt + 1, gt).trim, spec.substring(gt + 1).trim)
}
}
trait GitVirtualObject {
val id: GitId
lazy val content: Array[Byte] = actualContent
protected def actualContent: Array[Byte]
// TODO make it abstract and implement it on inherited classes
def verify: Boolean = true
}
sealed abstract class GitObject extends GitVirtualObject {
val objectType: GitObject.Types
}
object GitObject {
type Types = Types.Value
object Types extends Enumeration {
val BLOB, TREE, COMMIT, TAG = Value
def unapply(name: String): Option[Types.Value] = name.toLowerCase match {
case "blob" => Some(BLOB)
case "tree" => Some(TREE)
case "commit" => Some(COMMIT)
case "tag" => Some(TAG)
case _ => None
}
}
def fromTypes(id: GitId, objType: Types, actualContent: () => Array[Byte]) = {
import Types._
objType match {
case BLOB => new GitBlobExisting(id, actualContent)
case TREE => new GitTreeExisting(id, actualContent)
case COMMIT => new GitCommitExisting(id, actualContent)
case TAG => new GitTagExisting(id, actualContent)
}
}
def fromFile(id: GitId, file: File): GitObject = {
import Types._
inflate(readFile(file)) match {
case NullSplittedByteArray(objType, content) =>
// println(objType.length, objType.toContent, content.length)
objType.toContent match {
case SpaceSplittedString(Types(objType), length) if length.toInt == content.length =>
GitObject.fromTypes(id, objType, () => content)
case _ =>
throw InvalidFormat(s"Invalid object file: $id")
}
case _ =>
throw InvalidFormat(s"Invalid object file: $id")
}
}
}
sealed trait GitIdCalculator extends GitObject {
val objectType: GitObject.Types
lazy val objectContent: Array[Byte] =
(objectType.toString.toLowerCase + " " + content.length + "\\\\0").getBytes() ++ content
lazy val id = {
// TODO implement this
new GitSHA1("")
}
}
object GitObjects extends GitObjectStore {
def hasObject(id: GitId): Boolean = {
allObjectIds contains id
}
def getObject(id: GitId): Option[GitObject] = {
if (allObjectIds contains id) {
_allObjects get id match {
case Some(o) => Some(o)
case None =>
val ids = id.toString.splitAt(2)
Some(GitObject.fromFile(id, new File(path, Seq("objects", ids._1, ids._2) mkString "/")))
}
} else None
}
lazy val allObjectIds = {
val objects = new File(path, "objects")
val prefix = "^[0-9a-f]{2}$".r
(objects.list flatMap {
case subpath if prefix matches subpath =>
val subdirs = new File(objects, subpath)
subdirs.list map { obj =>
new GitSHA1(subpath + obj)
}
case "pack" => List()
case "info" => List()
}).toSet
}
private val _allObjects = scala.collection.mutable.Map[GitId, GitObject]()
lazy val allObjects = {
(allObjectIds map { id => (id, getObject(id).get) }).toMap
}
}
abstract class GitBlob extends GitObject {
val objectType = GitObject.Types.BLOB
}
class GitBlobExisting(val id: GitId, _actualContent: () => Array[Byte]) extends GitBlob {
def this(id: GitId, _actualContent: Array[Byte]) = this(id, () => _actualContent)
def actualContent = _actualContent()
}
class GitBlobNew(_content: Array[Byte]) extends GitBlob with GitIdCalculator {
def actualContent = _content
}
object GitTree {
case class Entry(octalMode: String, name: String, objId: GitId) {
def this(title: String, objId: GitId) = this(title.substring(0, title.indexOf(' ')), title.substring(title.indexOf(' ') + 1), objId)
}
}
abstract class GitTree extends GitObject {
val objectType = GitObject.Types.TREE
val entries: List[GitTree.Entry]
}
class GitTreeExisting(val id: GitId, _actualContent: () => Array[Byte]) extends GitTree {
def this(id: GitId, _actualContent: Array[Byte]) = this(id, () => _actualContent)
def actualContent = _actualContent()
lazy val entries: List[GitTree.Entry] = {
val reader = new Object {
var pointer = 0
def hasNext = pointer < content.length
def nextTitle = {
val line = content drop pointer takeWhile (_ != '\\0') map { _.toChar }
pointer += line.length + 1
new String(line)
}
def nextSHA1 = {
val sha1 = content slice (pointer, pointer + 20)
pointer += 20
GitSHA1(sha1)
}
}
var entries = List[GitTree.Entry]()
while (reader hasNext) {
val title = reader.nextTitle
val sha1 = reader.nextSHA1
entries +:= new GitTree.Entry(title, sha1)
}
entries.reverse
}
}
class GitTreeNew(val entries: List[GitTree.Entry]) extends GitTree with GitIdCalculator {
def actualContent = {
// TODO generate from entries
new Array[Byte](0)
}
}
abstract class GitCommit extends GitObject {
val objectType = GitObject.Types.COMMIT
// println(s"=========== commit $id =============")
// println(new String(content map { _.toChar }))
// println(tree, parents, author, committer, new String(content drop messageFrom map { _.toChar }))
val tree: GitId
val parents: List[GitId]
val author: Option[GitUser]
val committer: Option[GitUser]
val message: Array[Byte]
}
class GitCommitExisting(val id: GitId, _actualContent: () => Array[Byte]) extends GitCommit {
def this(id: GitId, _actualContent: Array[Byte]) = this(id, () => _actualContent)
def actualContent = _actualContent()
lazy val (tree: GitId, parents: List[GitId], author: Option[GitUser], committer: Option[GitUser], messageFrom: Int) = retrieveInfo
lazy val message = content drop messageFrom
private def retrieveInfo = {
val lines = new LineIterator(content)
val treePattern = "^tree ([0-9a-f]{40})$".r
val tree = lines.next match {
case treePattern(treeId) => GitSHA1(treeId)
case _ => throw InvalidFormat("Invalid commit content")
}
val parentPattern = "^parent ([0-9a-f]{40})$".r
val parents = (lines processWhile {
case parentPattern(parentId) => Some(GitSHA1(parentId))
case _ => None
}).toList
val author = lines.process {
case SpaceSplittedString("author", author) => (true, Some(GitUser.fromString(author)))
case _ => (false, None)
}
val committer = lines.process {
case SpaceSplittedString("committer", committer) => (true, Some(GitUser.fromString(committer)))
case _ => (false, None)
}
(tree, parents, author, committer, lines.pointer)
}
override def verify = {
try {
retrieveInfo
true
} catch {
case _: Throwable => false
}
}
}
class GitCommitNew(val tree: GitId, val parents: List[GitId], val author: Option[GitUser], val committer: Option[GitUser],
val message: Array[Byte]) extends GitCommit with GitIdCalculator {
def actualContent = {
// TODO generate from data
new Array[Byte](0)
}
}
abstract class GitTag extends GitObject {
val objectType = GitObject.Types.TAG
val objId: GitId
val objType: String
val tagName: String
val tagger: Option[GitUser]
val message: Array[Byte]
}
class GitTagExisting(val id: GitId, _actualContent: () => Array[Byte]) extends GitTag {
def this(id: GitId, _actualContent: Array[Byte]) = this(id, () => _actualContent)
def actualContent = _actualContent()
lazy val (objId: GitId, objType: String, tagName: String, tagger: Option[GitUser], messageFrom: Int) = retrieveInfo
lazy val message = content drop messageFrom
private def retrieveInfo = {
val lines = new LineIterator(content)
val objectPattern = "^object ([0-9a-f]{40})$".r
val objId = lines.next match {
case objectPattern(objId) => GitSHA1(objId)
case _ => throw InvalidFormat("Invalid tag content - object?")
}
val objType = lines.next match {
case SpaceSplittedString("type", objType) => objType
case _ => throw InvalidFormat("Invalid tag content - type?")
}
val tagName = lines.next match {
case SpaceSplittedString("tag", tagName) => tagName
case _ => throw InvalidFormat("Invalid tag content - name?")
}
val tagger = lines.process {
case SpaceSplittedString("tagger", tagger) => (true, Some(GitUser.fromString(tagger)))
case _ => (false, None)
}
(objId, objType, tagName, tagger, lines.pointer)
}
}
class GitTagNew(val objId: GitId, val objType: String, val tagName: String, val tagger: Option[GitUser],
val message: Array[Byte]) extends GitTag with GitIdCalculator {
def actualContent = {
// TODO generate from data
new Array[Byte](0)
}
}
}
|
Joonsoo/gitexplorer
|
src/main/scala/com/giyeok/gitexplorer/model/GitObjects.scala
|
Scala
|
mit
| 10,892
|
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "lifelog"
val appVersion = "0.0"
val commonDeps = Seq(
jdbc,
anorm,
cache,
"org.fluentd" % "fluent-logger" % "0.2.11"
)
val adminDeps = Seq(
)
val websiteDeps = Seq(
)
val batchDeps = Seq(
)
val commonProj = play.Project(
appName + "-common",
appVersion,
commonDeps,
path = file(appName + "-common")
)
val adminProj = play.Project(
appName + "-admin",
appVersion,
adminDeps,
path = file(appName + "-admin")
).dependsOn(commonProj)
val websiteProj = play.Project(
appName + "-website",
appVersion,
websiteDeps,
path = file(appName + "-website")
).dependsOn(commonProj)
val batchProj = play.Project(
appName + "-batch",
appVersion,
batchDeps,
path = file(appName + "-batch")
).dependsOn(commonProj)
val main = play.Project(
appName,
appVersion
).dependsOn(
adminProj,
websiteProj,
batchProj
).aggregate(
adminProj,
websiteProj,
batchProj
)
}
|
agwlvssainokuni/lifelog
|
project/Build.scala
|
Scala
|
apache-2.0
| 1,124
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.iota.fey.performer
case class UnknownException(message: String) extends Exception(message)
|
barbaragomes/incubator-iota
|
performers/zmq/src/main/scala/org/apache/iota/fey/performer/UnknownException.scala
|
Scala
|
apache-2.0
| 912
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.oap.filecache
import java.util.concurrent.ConcurrentHashMap
import java.util.function
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import com.google.common.base.Throwables
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.TaskLocation
import org.apache.spark.sql.execution.datasources.oap.filecache.FiberSensor.HostFiberCache
import org.apache.spark.sql.execution.datasources.oap.utils.CacheStatusSerDe
import org.apache.spark.sql.internal.oap.OapConf
import org.apache.spark.sql.oap.OapRuntime
import org.apache.spark.sql.oap.listener.SparkListenerCustomInfoUpdate
import org.apache.spark.util.collection.OapBitSet
private[oap] case class FiberCacheStatus(
file: String,
bitmask: OapBitSet,
groupCount: Int,
fieldCount: Int) {
val cachedFiberCount = bitmask.cardinality()
}
// FiberSensor is the FiberCache info recorder on Driver, it contains a file cache location mapping
// (for cache locality) and metrics info
private[sql] class FiberSensor(
private[filecache] val fileToHosts: ConcurrentHashMap[String, ArrayBuffer[HostFiberCache]]
) extends Logging {
private[filecache] def updateRecordingMap(fromHost: String, commingStatus: FiberCacheStatus) =
synchronized {
val currentHostsForFile = if (fileToHosts.containsKey(commingStatus.file)) {
fileToHosts.get(commingStatus.file)
} else {
new ArrayBuffer[HostFiberCache](0)
}
val (_, theRest) = currentHostsForFile.partition(_.host == fromHost)
val newHostsForFile = (HostFiberCache(fromHost, commingStatus) +: theRest)
.sorted.takeRight(FiberSensor.MAX_HOSTS_MAINTAINED)
fileToHosts.put(commingStatus.file, newHostsForFile)
}
private[filecache] def discardOutdatedInfo(host: String) = synchronized {
for ((k: String, v: ArrayBuffer[HostFiberCache]) <- fileToHosts.asScala) {
val(_, kept) = v.partition(_.host == host)
if (kept.size == 0) {
fileToHosts.remove(k)
} else {
fileToHosts.put(k, kept)
}
}
}
def updateLocations(fiberInfo: SparkListenerCustomInfoUpdate): Unit = {
val updateExecId = fiberInfo.executorId
val updateHostName = fiberInfo.hostName
// When the file location has a form like executor_[hostname]_[executorid], DAGScheduler will
// recognize it as a ExecutorCacheTaskLocation and corresponding task could be PROCESS_LOCAL
val host = s"${TaskLocation.executorLocationTag}${updateHostName}_$updateExecId"
val fiberCacheStatus = CacheStatusSerDe.deserialize(fiberInfo.customizedInfo)
logDebug(s"Got updated fiber info from host: $updateHostName, executorId: $updateExecId," +
s"host is $host, info array len is ${fiberCacheStatus.size}")
// Coming information of a certain executor requires discarding previous records so as to
// reflect Fiber eviction
discardOutdatedInfo(host)
fiberCacheStatus.foreach(updateRecordingMap(host, _))
}
// TODO: define a function to wrap this and make it private
private[sql] val executorToCacheManager = new ConcurrentHashMap[String, CacheStats]()
def updateMetrics(fiberInfo: SparkListenerCustomInfoUpdate): Unit = {
if (fiberInfo.customizedInfo.nonEmpty) {
try {
val cacheMetrics = CacheStats(fiberInfo.customizedInfo)
executorToCacheManager.put(fiberInfo.executorId, cacheMetrics)
logDebug(s"execID:${fiberInfo.executorId}, host:${fiberInfo.hostName}," +
s" ${cacheMetrics.toDebugString}")
} catch {
case t: Throwable =>
val stack = Throwables.getStackTraceAsString(t)
logError(s"FiberSensor parse json failed, $stack")
}
}
}
def getExecutorToCacheManager(): ConcurrentHashMap[String, CacheStats] = {
executorToCacheManager
}
/**
* get hosts that has fiber cached for fiber file.
* Current implementation only returns one host, but still using API name with [[getHosts]]
* @param filePath fiber file's path
* @return
*/
def getHosts(filePath: String): Seq[String] = {
lazy val newHostFiberArray = new function.Function[String, ArrayBuffer[HostFiberCache]]() {
override def apply(t: String): ArrayBuffer[HostFiberCache] = {
new ArrayBuffer[HostFiberCache](0)
}
}
val hosts = fileToHosts.computeIfAbsent(filePath, newHostFiberArray)
hosts.sorted.takeRight(FiberSensor.NUM_GET_HOSTS).reverse.map(_.host)
}
}
private[sql] object FiberSensor {
case class HostFiberCache(host: String, status: FiberCacheStatus)
extends Ordered[HostFiberCache] {
override def compare(another: HostFiberCache): Int = {
status.cachedFiberCount - another.status.cachedFiberCount
}
}
private def conf = OapRuntime.getOrCreate.sparkSession.conf
val NUM_GET_HOSTS = conf.get(
OapConf.OAP_CACHE_FIBERSENSOR_GETHOSTS_NUM,
OapConf.OAP_CACHE_FIBERSENSOR_GETHOSTS_NUM.defaultValue.get)
val MAX_HOSTS_MAINTAINED = conf.get(
OapConf.OAP_CACHE_FIBERSENSOR_MAXHOSTSMAINTAINED_NUM,
OapConf.OAP_CACHE_FIBERSENSOR_MAXHOSTSMAINTAINED_NUM.defaultValue.get)
}
|
Intel-bigdata/OAP
|
oap-cache/oap/src/main/scala/org/apache/spark/sql/execution/datasources/oap/filecache/FiberSensor.scala
|
Scala
|
apache-2.0
| 5,945
|
package lila.ublog
import akka.stream.scaladsl._
import cats.implicits._
import com.softwaremill.tagging._
import org.joda.time.DateTime
import play.api.i18n.Lang
import reactivemongo.akkastream.cursorProducer
import reactivemongo.api._
import scala.concurrent.ExecutionContext
import lila.db.dsl._
import lila.hub.actorApi.timeline.{ Propagate, UblogPostLike }
import lila.memo.SettingStore
import lila.user.User
final class UblogRank(
colls: UblogColls,
timeline: lila.hub.actors.Timeline,
factor: SettingStore[Float] @@ UblogRankFactor
)(implicit ec: ExecutionContext, mat: akka.stream.Materializer) {
import UblogBsonHandlers._
private def selectLiker(userId: User.ID) = $doc("likers" -> userId)
def liked(post: UblogPost)(user: User): Fu[Boolean] =
colls.post.exists($id(post.id) ++ selectLiker(user.id))
def like(postId: UblogPost.Id, user: User, v: Boolean): Fu[UblogPost.Likes] =
colls.post
.aggregateOne() { framework =>
import framework._
Match($id(postId)) -> List(
PipelineOperator($lookup.simple(from = colls.blog, as = "blog", local = "blog", foreign = "_id")),
UnwindField("blog"),
Project(
$doc(
"tier" -> "$blog.tier",
"likes" -> $doc("$size" -> "$likers"),
"topics" -> "$topics",
"at" -> "$lived.at",
"language" -> true,
"title" -> true
)
)
)
}
.map { docOption =>
for {
doc <- docOption
id <- doc.getAsOpt[UblogPost.Id]("_id")
likes <- doc.getAsOpt[UblogPost.Likes]("likes")
topics <- doc.getAsOpt[List[UblogTopic]]("topics")
liveAt <- doc.getAsOpt[DateTime]("at")
tier <- doc int "tier"
language <- doc.getAsOpt[Lang]("language")
title <- doc string "title"
} yield (id, topics, likes, liveAt, tier, language, title)
}
.flatMap {
_.fold(fuccess(UblogPost.Likes(v ?? 1))) {
case (id, topics, prevLikes, liveAt, tier, language, title) =>
val likes = UblogPost.Likes(prevLikes.value + (if (v) 1 else -1))
colls.post.update.one(
$id(postId),
$set(
"likes" -> likes,
"rank" -> computeRank(topics, likes, liveAt, language, tier)
) ++ {
if (v) $addToSet("likers" -> user.id) else $pull("likers" -> user.id)
}
) >>- {
if (v && tier >= UblogBlog.Tier.NORMAL)
timeline ! (Propagate(UblogPostLike(user.id, id.value, title)) toFollowersOf user.id)
} inject likes
}
}
def recomputeRankOfAllPostsOfBlog(blogId: UblogBlog.Id): Funit =
colls.blog.byId[UblogBlog](blogId.full) flatMap {
_ ?? recomputeRankOfAllPostsOfBlog
}
def recomputeRankOfAllPostsOfBlog(blog: UblogBlog): Funit =
colls.post
.find(
$doc("blog" -> blog.id),
$doc("topics" -> true, "likes" -> true, "lived" -> true, "language" -> true).some
)
.cursor[Bdoc](ReadPreference.secondaryPreferred)
.list() flatMap { docs =>
lila.common.Future.applySequentially(docs) { doc =>
(
doc.string("_id"),
doc.getAsOpt[List[UblogTopic]]("topics"),
doc.getAsOpt[UblogPost.Likes]("likes"),
doc.getAsOpt[UblogPost.Recorded]("lived"),
doc.getAsOpt[Lang]("language")
).tupled ?? { case (id, topics, likes, lived, language) =>
colls.post
.updateField($id(id), "rank", computeRank(topics, likes, lived.at, language, blog.tier))
.void
}
}
}
def recomputeRankOfAllPosts: Funit =
colls.blog
.find($empty)
.sort($sort desc "tier")
.cursor[UblogBlog](ReadPreference.secondaryPreferred)
.documentSource()
.mapAsyncUnordered(4)(recomputeRankOfAllPostsOfBlog)
.toMat(lila.common.LilaStream.sinkCount)(Keep.right)
.run()
.map(nb => println(s"Recomputed rank of $nb blogs"))
def computeRank(blog: UblogBlog, post: UblogPost): Option[UblogPost.Rank] =
post.lived map { lived =>
computeRank(post.topics, post.likes, lived.at, post.language, blog.tier)
}
private def computeRank(
topics: List[UblogTopic],
likes: UblogPost.Likes,
liveAt: DateTime,
language: Lang,
tier: UblogBlog.Tier
) = UblogPost.Rank {
if (tier < UblogBlog.Tier.LOW) liveAt minusMonths 1
else
liveAt plusHours {
val boostedLikes = likes.value.toFloat + ((tier - 2) * 15).atLeast(0) // initial boost
val baseHours =
if (boostedLikes < 1) 0
else (3 * math.log(boostedLikes) + 1).toFloat.atMost(boostedLikes)
val topicsMultiplier = topics.count(t => UblogTopic.chessExists(t.value)) match {
case 0 => 0.2
case 1 => 1
case _ => 1.2
}
val langMultiplier = if (language.language == lila.i18n.defaultLang.language) 1 else 0.5
val tierMultiplier = tier match {
case UblogBlog.Tier.LOW => 0.2
case UblogBlog.Tier.NORMAL => 3
case UblogBlog.Tier.HIGH => 6
case UblogBlog.Tier.BEST => 8
case _ => 0
}
(baseHours * topicsMultiplier * langMultiplier * tierMultiplier * factor.get()).toInt
}
}
}
|
luanlv/lila
|
modules/ublog/src/main/UblogRank.scala
|
Scala
|
mit
| 5,493
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.MapOutputStatistics
import org.apache.spark.internal.Logging
import org.apache.spark.sql.execution.{CoalescedPartitionSpec, ShufflePartitionSpec}
object ShufflePartitionsUtil extends Logging {
final val SMALL_PARTITION_FACTOR = 0.2
final val MERGED_PARTITION_FACTOR = 1.2
/**
* Coalesce the partitions from multiple shuffles. This method assumes that all the shuffles
* have the same number of partitions, and the partitions of same index will be read together
* by one task.
*
* The strategy used to determine the number of coalesced partitions is described as follows.
* To determine the number of coalesced partitions, we have a target size for a coalesced
* partition. Once we have size statistics of all shuffle partitions, we will do
* a pass of those statistics and pack shuffle partitions with continuous indices to a single
* coalesced partition until adding another shuffle partition would cause the size of a
* coalesced partition to be greater than the target size.
*
* For example, we have two shuffles with the following partition size statistics:
* - shuffle 1 (5 partitions): [100 MiB, 20 MiB, 100 MiB, 10MiB, 30 MiB]
* - shuffle 2 (5 partitions): [10 MiB, 10 MiB, 70 MiB, 5 MiB, 5 MiB]
* Assuming the target size is 128 MiB, we will have 4 coalesced partitions, which are:
* - coalesced partition 0: shuffle partition 0 (size 110 MiB)
* - coalesced partition 1: shuffle partition 1 (size 30 MiB)
* - coalesced partition 2: shuffle partition 2 (size 170 MiB)
* - coalesced partition 3: shuffle partition 3 and 4 (size 50 MiB)
*
* @return A sequence of [[CoalescedPartitionSpec]]s. For example, if partitions [0, 1, 2, 3, 4]
* split at indices [0, 2, 3], the returned partition specs will be:
* CoalescedPartitionSpec(0, 2), CoalescedPartitionSpec(2, 3) and
* CoalescedPartitionSpec(3, 5).
*/
def coalescePartitions(
mapOutputStatistics: Array[MapOutputStatistics],
advisoryTargetSize: Long,
minNumPartitions: Int): Seq[ShufflePartitionSpec] = {
// If `minNumPartitions` is very large, it is possible that we need to use a value less than
// `advisoryTargetSize` as the target size of a coalesced task.
val totalPostShuffleInputSize = mapOutputStatistics.map(_.bytesByPartitionId.sum).sum
// The max at here is to make sure that when we have an empty table, we only have a single
// coalesced partition.
// There is no particular reason that we pick 16. We just need a number to prevent
// `maxTargetSize` from being set to 0.
val maxTargetSize = math.max(
math.ceil(totalPostShuffleInputSize / minNumPartitions.toDouble).toLong, 16)
val targetSize = math.min(maxTargetSize, advisoryTargetSize)
logInfo(s"advisory target size: $advisoryTargetSize, actual target size $targetSize.")
// Make sure these shuffles have the same number of partitions.
val distinctNumShufflePartitions =
mapOutputStatistics.map(stats => stats.bytesByPartitionId.length).distinct
// The reason that we are expecting a single value of the number of shuffle partitions
// is that when we add Exchanges, we set the number of shuffle partitions
// (i.e. map output partitions) using a static setting, which is the value of
// `spark.sql.shuffle.partitions`. Even if two input RDDs are having different
// number of partitions, they will have the same number of shuffle partitions
// (i.e. map output partitions).
assert(
distinctNumShufflePartitions.length == 1,
"There should be only one distinct value of the number of shuffle partitions " +
"among registered Exchange operators.")
val numPartitions = distinctNumShufflePartitions.head
val partitionSpecs = ArrayBuffer[CoalescedPartitionSpec]()
var latestSplitPoint = 0
var coalescedSize = 0L
var i = 0
while (i < numPartitions) {
// We calculate the total size of i-th shuffle partitions from all shuffles.
var totalSizeOfCurrentPartition = 0L
var j = 0
while (j < mapOutputStatistics.length) {
totalSizeOfCurrentPartition += mapOutputStatistics(j).bytesByPartitionId(i)
j += 1
}
// If including the `totalSizeOfCurrentPartition` would exceed the target size, then start a
// new coalesced partition.
if (i > latestSplitPoint && coalescedSize + totalSizeOfCurrentPartition > targetSize) {
partitionSpecs += CoalescedPartitionSpec(latestSplitPoint, i)
latestSplitPoint = i
// reset postShuffleInputSize.
coalescedSize = totalSizeOfCurrentPartition
} else {
coalescedSize += totalSizeOfCurrentPartition
}
i += 1
}
partitionSpecs += CoalescedPartitionSpec(latestSplitPoint, numPartitions)
partitionSpecs
}
/**
* Given a list of size, return an array of indices to split the list into multiple partitions,
* so that the size sum of each partition is close to the target size. Each index indicates the
* start of a partition.
*/
def splitSizeListByTargetSize(sizes: Seq[Long], targetSize: Long): Array[Int] = {
val partitionStartIndices = ArrayBuffer[Int]()
partitionStartIndices += 0
var i = 0
var currentPartitionSize = 0L
var lastPartitionSize = -1L
def tryMergePartitions() = {
// When we are going to start a new partition, it's possible that the current partition or
// the previous partition is very small and it's better to merge the current partition into
// the previous partition.
val shouldMergePartitions = lastPartitionSize > -1 &&
((currentPartitionSize + lastPartitionSize) < targetSize * MERGED_PARTITION_FACTOR ||
(currentPartitionSize < targetSize * SMALL_PARTITION_FACTOR ||
lastPartitionSize < targetSize * SMALL_PARTITION_FACTOR))
if (shouldMergePartitions) {
// We decide to merge the current partition into the previous one, so the start index of
// the current partition should be removed.
partitionStartIndices.remove(partitionStartIndices.length - 1)
lastPartitionSize += currentPartitionSize
} else {
lastPartitionSize = currentPartitionSize
}
}
while (i < sizes.length) {
// If including the next size in the current partition exceeds the target size, package the
// current partition and start a new partition.
if (i > 0 && currentPartitionSize + sizes(i) > targetSize) {
tryMergePartitions()
partitionStartIndices += i
currentPartitionSize = sizes(i)
} else {
currentPartitionSize += sizes(i)
}
i += 1
}
tryMergePartitions()
partitionStartIndices.toArray
}
}
|
matthewfranglen/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/ShufflePartitionsUtil.scala
|
Scala
|
mit
| 7,698
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.File
import java.net.URI
import org.apache.hadoop.fs.Path
import org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER
import org.apache.parquet.hadoop.ParquetFileReader
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, TableAlreadyExistsException}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.connector.catalog.CatalogManager
import org.apache.spark.sql.connector.catalog.SupportsNamespaces.PROP_OWNER
import org.apache.spark.sql.execution.command.{DDLSuite, DDLUtils}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.HiveExternalCatalog
import org.apache.spark.sql.hive.HiveUtils.{CONVERT_METASTORE_ORC, CONVERT_METASTORE_PARQUET}
import org.apache.spark.sql.hive.orc.OrcFileOperator
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
import org.apache.spark.sql.internal.SQLConf.ORC_IMPLEMENTATION
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
// TODO(gatorsmile): combine HiveCatalogedDDLSuite and HiveDDLSuite
class HiveCatalogedDDLSuite extends DDLSuite with TestHiveSingleton with BeforeAndAfterEach {
override def afterEach(): Unit = {
try {
// drop all databases, tables and functions after each test
spark.sessionState.catalog.reset()
} finally {
super.afterEach()
}
}
protected override def generateTable(
catalog: SessionCatalog,
name: TableIdentifier,
isDataSource: Boolean,
partitionCols: Seq[String] = Seq("a", "b")): CatalogTable = {
val storage =
if (isDataSource) {
val serde = HiveSerDe.sourceToSerDe("parquet")
assert(serde.isDefined, "The default format is not Hive compatible")
CatalogStorageFormat(
locationUri = Some(catalog.defaultTablePath(name)),
inputFormat = serde.get.inputFormat,
outputFormat = serde.get.outputFormat,
serde = serde.get.serde,
compressed = false,
properties = Map.empty)
} else {
CatalogStorageFormat(
locationUri = Some(catalog.defaultTablePath(name)),
inputFormat = Some("org.apache.hadoop.mapred.SequenceFileInputFormat"),
outputFormat = Some("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"),
serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"),
compressed = false,
properties = Map("serialization.format" -> "1"))
}
val metadata = new MetadataBuilder()
.putString("key", "value")
.build()
val schema = new StructType()
.add("col1", "int", nullable = true, metadata = metadata)
.add("col2", "string")
CatalogTable(
identifier = name,
tableType = CatalogTableType.EXTERNAL,
storage = storage,
schema = schema.copy(
fields = schema.fields ++ partitionCols.map(StructField(_, IntegerType))),
provider = if (isDataSource) Some("parquet") else Some("hive"),
partitionColumnNames = partitionCols,
createTime = 0L,
createVersion = org.apache.spark.SPARK_VERSION,
tracksPartitionsInCatalog = true)
}
protected override def normalizeCatalogTable(table: CatalogTable): CatalogTable = {
val nondeterministicProps = Set(
"CreateTime",
"transient_lastDdlTime",
"grantTime",
"lastUpdateTime",
"last_modified_by",
"last_modified_time",
"Owner:",
"COLUMN_STATS_ACCURATE",
// The following are hive specific schema parameters which we do not need to match exactly.
"numFiles",
"numRows",
"rawDataSize",
"totalSize",
"totalNumberFiles",
"maxFileSize",
"minFileSize"
)
table.copy(
createTime = 0L,
lastAccessTime = 0L,
owner = "",
properties = table.properties.filterKeys(!nondeterministicProps.contains(_)),
// View texts are checked separately
viewText = None
)
}
test("alter table: set location") {
testSetLocation(isDatasourceTable = false)
}
test("alter table: set properties") {
testSetProperties(isDatasourceTable = false)
}
test("alter table: unset properties") {
testUnsetProperties(isDatasourceTable = false)
}
test("alter table: set serde") {
testSetSerde(isDatasourceTable = false)
}
test("alter table: set serde partition") {
testSetSerdePartition(isDatasourceTable = false)
}
test("alter table: change column") {
testChangeColumn(isDatasourceTable = false)
}
test("alter table: rename partition") {
testRenamePartitions(isDatasourceTable = false)
}
test("alter table: drop partition") {
testDropPartitions(isDatasourceTable = false)
}
test("alter table: add partition") {
testAddPartitions(isDatasourceTable = false)
}
test("drop table") {
testDropTable(isDatasourceTable = false)
}
test("alter datasource table add columns - orc") {
testAddColumn("orc")
}
test("alter datasource table add columns - partitioned - orc") {
testAddColumnPartitioned("orc")
}
test("SPARK-22431: illegal nested type") {
val queries = Seq(
"CREATE TABLE t USING hive AS SELECT STRUCT('a' AS `$a`, 1 AS b) q",
"CREATE TABLE t(q STRUCT<`$a`:INT, col2:STRING>, i1 INT) USING hive",
"CREATE VIEW t AS SELECT STRUCT('a' AS `$a`, 1 AS b) q")
queries.foreach(query => {
val err = intercept[SparkException] {
spark.sql(query)
}.getMessage
assert(err.contains("Cannot recognize hive type string"))
})
withView("v") {
spark.sql("CREATE VIEW v AS SELECT STRUCT('a' AS `a`, 1 AS b) q")
checkAnswer(sql("SELECT q.`a`, q.b FROM v"), Row("a", 1) :: Nil)
val err = intercept[SparkException] {
spark.sql("ALTER VIEW v AS SELECT STRUCT('a' AS `$a`, 1 AS b) q")
}.getMessage
assert(err.contains("Cannot recognize hive type string"))
}
}
test("SPARK-22431: table with nested type") {
withTable("t", "x") {
spark.sql("CREATE TABLE t(q STRUCT<`$a`:INT, col2:STRING>, i1 INT) USING PARQUET")
checkAnswer(spark.table("t"), Nil)
spark.sql("CREATE TABLE x (q STRUCT<col1:INT, col2:STRING>, i1 INT)")
checkAnswer(spark.table("x"), Nil)
}
}
test("SPARK-22431: view with nested type") {
withView("v") {
spark.sql("CREATE VIEW v AS SELECT STRUCT('a' AS `a`, 1 AS b) q")
checkAnswer(spark.table("v"), Row(Row("a", 1)) :: Nil)
spark.sql("ALTER VIEW v AS SELECT STRUCT('a' AS `b`, 1 AS b) q1")
val df = spark.table("v")
assert("q1".equals(df.schema.fields(0).name))
checkAnswer(df, Row(Row("a", 1)) :: Nil)
}
}
test("SPARK-22431: alter table tests with nested types") {
withTable("t1", "t2", "t3") {
spark.sql("CREATE TABLE t1 (q STRUCT<col1:INT, col2:STRING>, i1 INT)")
spark.sql("ALTER TABLE t1 ADD COLUMNS (newcol1 STRUCT<`col1`:STRING, col2:Int>)")
val newcol = spark.sql("SELECT * FROM t1").schema.fields(2).name
assert("newcol1".equals(newcol))
spark.sql("CREATE TABLE t2(q STRUCT<`a`:INT, col2:STRING>, i1 INT) USING PARQUET")
spark.sql("ALTER TABLE t2 ADD COLUMNS (newcol1 STRUCT<`$col1`:STRING, col2:Int>)")
spark.sql("ALTER TABLE t2 ADD COLUMNS (newcol2 STRUCT<`col1`:STRING, col2:Int>)")
val df2 = spark.table("t2")
checkAnswer(df2, Nil)
assert("newcol1".equals(df2.schema.fields(2).name))
assert("newcol2".equals(df2.schema.fields(3).name))
spark.sql("CREATE TABLE t3(q STRUCT<`$a`:INT, col2:STRING>, i1 INT) USING PARQUET")
spark.sql("ALTER TABLE t3 ADD COLUMNS (newcol1 STRUCT<`$col1`:STRING, col2:Int>)")
spark.sql("ALTER TABLE t3 ADD COLUMNS (newcol2 STRUCT<`col1`:STRING, col2:Int>)")
val df3 = spark.table("t3")
checkAnswer(df3, Nil)
assert("newcol1".equals(df3.schema.fields(2).name))
assert("newcol2".equals(df3.schema.fields(3).name))
}
}
test("SPARK-22431: negative alter table tests with nested types") {
withTable("t1") {
spark.sql("CREATE TABLE t1 (q STRUCT<col1:INT, col2:STRING>, i1 INT) USING hive")
val err = intercept[SparkException] {
spark.sql("ALTER TABLE t1 ADD COLUMNS (newcol1 STRUCT<`$col1`:STRING, col2:Int>)")
}.getMessage
assert(err.contains("Cannot recognize hive type string:"))
}
}
test("SPARK-26630: table with old input format and without partitioned will use HadoopRDD") {
withTable("table_old", "table_ctas_old") {
sql(
"""
|CREATE TABLE table_old (col1 LONG, col2 STRING, col3 DOUBLE, col4 BOOLEAN)
|STORED AS
|INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
|OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
""".stripMargin)
sql(
"""
|INSERT INTO table_old
|VALUES (2147483648, 'AAA', 3.14, false), (2147483649, 'BBB', 3.142, true)
""".stripMargin)
checkAnswer(
sql("SELECT col1, col2, col3, col4 FROM table_old"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
sql("CREATE TABLE table_ctas_old AS SELECT col1, col2, col3, col4 FROM table_old")
checkAnswer(
sql("SELECT col1, col2, col3, col4 from table_ctas_old"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
}
}
test("SPARK-26630: table with old input format and partitioned will use HadoopRDD") {
withTable("table_pt_old", "table_ctas_pt_old") {
sql(
"""
|CREATE TABLE table_pt_old (col1 LONG, col2 STRING, col3 DOUBLE, col4 BOOLEAN)
|PARTITIONED BY (pt INT)
|STORED AS
|INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
|OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
""".stripMargin)
sql(
"""
|INSERT INTO table_pt_old PARTITION (pt = 1)
|VALUES (2147483648, 'AAA', 3.14, false), (2147483649, 'BBB', 3.142, true)
""".stripMargin)
checkAnswer(
sql("SELECT col1, col2, col3, col4 FROM table_pt_old WHERE pt = 1"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
sql("CREATE TABLE table_ctas_pt_old AS SELECT col1, col2, col3, col4 FROM table_pt_old")
checkAnswer(
sql("SELECT col1, col2, col3, col4 from table_ctas_pt_old"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
}
}
test("SPARK-26630: table with new input format and without partitioned will use NewHadoopRDD") {
withTable("table_new", "table_ctas_new") {
sql(
"""
|CREATE TABLE table_new (col1 LONG, col2 STRING, col3 DOUBLE, col4 BOOLEAN)
|STORED AS
|INPUTFORMAT 'org.apache.hadoop.mapreduce.lib.input.TextInputFormat'
|OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
""".stripMargin)
sql(
"""
|INSERT INTO table_new
|VALUES (2147483648, 'AAA', 3.14, false), (2147483649, 'BBB', 3.142, true)
""".stripMargin)
checkAnswer(
sql("SELECT col1, col2, col3, col4 FROM table_new"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
sql("CREATE TABLE table_ctas_new AS SELECT col1, col2, col3, col4 FROM table_new")
checkAnswer(
sql("SELECT col1, col2, col3, col4 from table_ctas_new"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
}
}
test("SPARK-26630: table with new input format and partitioned will use NewHadoopRDD") {
withTable("table_pt_new", "table_ctas_pt_new") {
sql(
"""
|CREATE TABLE table_pt_new (col1 LONG, col2 STRING, col3 DOUBLE, col4 BOOLEAN)
|PARTITIONED BY (pt INT)
|STORED AS
|INPUTFORMAT 'org.apache.hadoop.mapreduce.lib.input.TextInputFormat'
|OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
""".stripMargin)
sql(
"""
|INSERT INTO table_pt_new PARTITION (pt = 1)
|VALUES (2147483648, 'AAA', 3.14, false), (2147483649, 'BBB', 3.142, true)
""".stripMargin)
checkAnswer(
sql("SELECT col1, col2, col3, col4 FROM table_pt_new WHERE pt = 1"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
sql("CREATE TABLE table_ctas_pt_new AS SELECT col1, col2, col3, col4 FROM table_pt_new")
checkAnswer(
sql("SELECT col1, col2, col3, col4 from table_ctas_pt_new"),
Row(2147483648L, "AAA", 3.14, false) :: Row(2147483649L, "BBB", 3.142, true) :: Nil)
}
}
test("Create Table LIKE USING Hive built-in ORC in Hive catalog") {
val catalog = spark.sessionState.catalog
withTable("s", "t") {
sql("CREATE TABLE s(a INT, b INT) USING parquet")
val source = catalog.getTableMetadata(TableIdentifier("s"))
assert(source.provider == Some("parquet"))
sql("CREATE TABLE t LIKE s USING org.apache.spark.sql.hive.orc")
val table = catalog.getTableMetadata(TableIdentifier("t"))
assert(table.provider == Some("org.apache.spark.sql.hive.orc"))
}
}
test("Database Ownership") {
val catalog = spark.sessionState.catalog
try {
val db = "spark_29425_1"
sql(s"CREATE DATABASE $db")
assert(sql(s"DESCRIBE DATABASE EXTENDED $db")
.where("database_description_item='Owner'")
.collect().head.getString(1) === Utils.getCurrentUserName())
sql(s"ALTER DATABASE $db SET DBPROPERTIES('abc'='xyz')")
assert(sql(s"DESCRIBE DATABASE EXTENDED $db")
.where("database_description_item='Owner'")
.collect().head.getString(1) === Utils.getCurrentUserName())
} finally {
catalog.reset()
}
}
test("Table Ownership") {
val catalog = spark.sessionState.catalog
try {
sql(s"CREATE TABLE spark_30019(k int)")
assert(sql(s"DESCRIBE TABLE EXTENDED spark_30019").where("col_name='Owner'")
.collect().head.getString(1) === Utils.getCurrentUserName())
} finally {
catalog.reset()
}
}
}
class HiveDDLSuite
extends QueryTest with SQLTestUtils with TestHiveSingleton with BeforeAndAfterEach {
import testImplicits._
val hiveFormats = Seq("PARQUET", "ORC", "TEXTFILE", "SEQUENCEFILE", "RCFILE", "AVRO")
override def afterEach(): Unit = {
try {
// drop all databases, tables and functions after each test
spark.sessionState.catalog.reset()
} finally {
super.afterEach()
}
}
// check if the directory for recording the data of the table exists.
private def tableDirectoryExists(
tableIdentifier: TableIdentifier,
dbPath: Option[String] = None): Boolean = {
val expectedTablePath =
if (dbPath.isEmpty) {
hiveContext.sessionState.catalog.defaultTablePath(tableIdentifier)
} else {
new Path(new Path(dbPath.get), tableIdentifier.table).toUri
}
val filesystemPath = new Path(expectedTablePath.toString)
val fs = filesystemPath.getFileSystem(spark.sessionState.newHadoopConf())
fs.exists(filesystemPath)
}
test("drop tables") {
withTable("tab1") {
val tabName = "tab1"
assert(!tableDirectoryExists(TableIdentifier(tabName)))
sql(s"CREATE TABLE $tabName(c1 int)")
assert(tableDirectoryExists(TableIdentifier(tabName)))
sql(s"DROP TABLE $tabName")
assert(!tableDirectoryExists(TableIdentifier(tabName)))
sql(s"DROP TABLE IF EXISTS $tabName")
sql(s"DROP VIEW IF EXISTS $tabName")
}
}
test("create a hive table without schema") {
import testImplicits._
withTempPath { tempDir =>
withTable("tab1", "tab2") {
(("a", "b") :: Nil).toDF().write.json(tempDir.getCanonicalPath)
var e = intercept[AnalysisException] { sql("CREATE TABLE tab1 USING hive") }.getMessage
assert(e.contains("Unable to infer the schema. The schema specification is required to " +
"create the table `default`.`tab1`"))
e = intercept[AnalysisException] {
sql(s"CREATE TABLE tab2 USING hive location '${tempDir.getCanonicalPath}'")
}.getMessage
assert(e.contains("Unable to infer the schema. The schema specification is required to " +
"create the table `default`.`tab2`"))
}
}
}
test("drop external tables in default database") {
withTempDir { tmpDir =>
val tabName = "tab1"
withTable(tabName) {
assert(tmpDir.listFiles.isEmpty)
sql(
s"""
|create table $tabName
|stored as parquet
|location '${tmpDir.toURI}'
|as select 1, '3'
""".stripMargin)
val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
assert(tmpDir.listFiles.nonEmpty)
sql(s"DROP TABLE $tabName")
assert(tmpDir.listFiles.nonEmpty)
}
}
}
test("drop external data source table in default database") {
withTempDir { tmpDir =>
val tabName = "tab1"
withTable(tabName) {
assert(tmpDir.listFiles.isEmpty)
withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> "true") {
Seq(1 -> "a").toDF("i", "j")
.write
.mode(SaveMode.Overwrite)
.format("parquet")
.option("path", tmpDir.toString)
.saveAsTable(tabName)
}
val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
// This data source table is external table
assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
assert(tmpDir.listFiles.nonEmpty)
sql(s"DROP TABLE $tabName")
// The data are not deleted since the table type is EXTERNAL
assert(tmpDir.listFiles.nonEmpty)
}
}
}
test("create table and view with comment") {
val catalog = spark.sessionState.catalog
val tabName = "tab1"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(c1 int) COMMENT 'BLABLA'")
val viewName = "view1"
withView(viewName) {
sql(s"CREATE VIEW $viewName COMMENT 'no comment' AS SELECT * FROM $tabName")
val tableMetadata = catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
val viewMetadata = catalog.getTableMetadata(TableIdentifier(viewName, Some("default")))
assert(tableMetadata.comment == Option("BLABLA"))
assert(viewMetadata.comment == Option("no comment"))
// Ensure that `comment` is removed from the table property
assert(tableMetadata.properties.get("comment").isEmpty)
assert(viewMetadata.properties.get("comment").isEmpty)
}
}
}
test("create Hive-serde table and view with unicode columns and comment") {
val catalog = spark.sessionState.catalog
val tabName = "tab1"
val viewName = "view1"
// scalastyle:off
// non ascii characters are not allowed in the source code, so we disable the scalastyle.
val colName1 = "和"
val colName2 = "尼"
val comment = "庙"
// scalastyle:on
withTable(tabName) {
sql(s"""
|CREATE TABLE $tabName(`$colName1` int COMMENT '$comment')
|COMMENT '$comment'
|PARTITIONED BY (`$colName2` int)
""".stripMargin)
sql(s"INSERT OVERWRITE TABLE $tabName partition (`$colName2`=2) SELECT 1")
withView(viewName) {
sql(
s"""
|CREATE VIEW $viewName(`$colName1` COMMENT '$comment', `$colName2`)
|COMMENT '$comment'
|AS SELECT `$colName1`, `$colName2` FROM $tabName
""".stripMargin)
val tableMetadata = catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
val viewMetadata = catalog.getTableMetadata(TableIdentifier(viewName, Some("default")))
assert(tableMetadata.comment == Option(comment))
assert(viewMetadata.comment == Option(comment))
assert(tableMetadata.schema.fields.length == 2 && viewMetadata.schema.fields.length == 2)
val column1InTable = tableMetadata.schema.fields.head
val column1InView = viewMetadata.schema.fields.head
assert(column1InTable.name == colName1 && column1InView.name == colName1)
assert(column1InTable.getComment() == Option(comment))
assert(column1InView.getComment() == Option(comment))
assert(tableMetadata.schema.fields(1).name == colName2 &&
viewMetadata.schema.fields(1).name == colName2)
checkAnswer(sql(s"SELECT `$colName1`, `$colName2` FROM $tabName"), Row(1, 2) :: Nil)
checkAnswer(sql(s"SELECT `$colName1`, `$colName2` FROM $viewName"), Row(1, 2) :: Nil)
}
}
}
test("create table: partition column names exist in table definition") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int) PARTITIONED BY (a string)")
}
assert(e.message == "Found duplicate column(s) in the table definition of `default`.`tbl`: `a`")
}
test("create partitioned table without specifying data type for the partition columns") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int) PARTITIONED BY (b) STORED AS parquet")
}
assert(e.message.contains("Must specify a data type for each partition column while creating " +
"Hive partitioned table."))
}
test("add/drop partition with location - managed table") {
val tab = "tab_with_partitions"
withTempDir { tmpDir =>
val basePath = new File(tmpDir.getCanonicalPath)
val part1Path = new File(basePath + "/part1")
val part2Path = new File(basePath + "/part2")
val dirSet = part1Path :: part2Path :: Nil
// Before data insertion, all the directory are empty
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
withTable(tab) {
sql(
s"""
|CREATE TABLE $tab (key INT, value STRING)
|PARTITIONED BY (ds STRING, hr STRING)
""".stripMargin)
sql(
s"""
|ALTER TABLE $tab ADD
|PARTITION (ds='2008-04-08', hr=11) LOCATION '${part1Path.toURI}'
|PARTITION (ds='2008-04-08', hr=12) LOCATION '${part2Path.toURI}'
""".stripMargin)
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
sql(s"INSERT OVERWRITE TABLE $tab partition (ds='2008-04-08', hr=11) SELECT 1, 'a'")
sql(s"INSERT OVERWRITE TABLE $tab partition (ds='2008-04-08', hr=12) SELECT 2, 'b'")
// add partition will not delete the data
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
checkAnswer(
spark.table(tab),
Row(1, "a", "2008-04-08", "11") :: Row(2, "b", "2008-04-08", "12") :: Nil
)
sql(s"ALTER TABLE $tab DROP PARTITION (ds='2008-04-08', hr=11)")
// drop partition will delete the data
assert(part1Path.listFiles == null || part1Path.listFiles.isEmpty)
assert(part2Path.listFiles.nonEmpty)
sql(s"DROP TABLE $tab")
// drop table will delete the data of the managed table
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
}
}
}
test("SPARK-19129: drop partition with a empty string will drop the whole table") {
val df = spark.createDataFrame(Seq((0, "a"), (1, "b"))).toDF("partCol1", "name")
df.write.mode("overwrite").partitionBy("partCol1").saveAsTable("partitionedTable")
val e = intercept[AnalysisException] {
spark.sql("alter table partitionedTable drop partition(partCol1='')")
}.getMessage
assert(e.contains("Partition spec is invalid. The spec ([partCol1=]) contains an empty " +
"partition column value"))
}
test("add/drop partitions - external table") {
val catalog = spark.sessionState.catalog
withTempDir { tmpDir =>
val basePath = tmpDir.getCanonicalPath
val partitionPath_1stCol_part1 = new File(basePath + "/ds=2008-04-08")
val partitionPath_1stCol_part2 = new File(basePath + "/ds=2008-04-09")
val partitionPath_part1 = new File(basePath + "/ds=2008-04-08/hr=11")
val partitionPath_part2 = new File(basePath + "/ds=2008-04-09/hr=11")
val partitionPath_part3 = new File(basePath + "/ds=2008-04-08/hr=12")
val partitionPath_part4 = new File(basePath + "/ds=2008-04-09/hr=12")
val dirSet =
tmpDir :: partitionPath_1stCol_part1 :: partitionPath_1stCol_part2 ::
partitionPath_part1 :: partitionPath_part2 :: partitionPath_part3 ::
partitionPath_part4 :: Nil
val externalTab = "extTable_with_partitions"
withTable(externalTab) {
assert(tmpDir.listFiles.isEmpty)
sql(
s"""
|CREATE EXTERNAL TABLE $externalTab (key INT, value STRING)
|PARTITIONED BY (ds STRING, hr STRING)
|LOCATION '${tmpDir.toURI}'
""".stripMargin)
// Before data insertion, all the directory are empty
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) {
sql(
s"""
|INSERT OVERWRITE TABLE $externalTab
|partition (ds='$ds',hr='$hr')
|SELECT 1, 'a'
""".stripMargin)
}
val hiveTable = catalog.getTableMetadata(TableIdentifier(externalTab, Some("default")))
assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
// After data insertion, all the directory are not empty
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
val message = intercept[AnalysisException] {
sql(s"ALTER TABLE $externalTab DROP PARTITION (ds='2008-04-09', unknownCol='12')")
}
assert(message.getMessage.contains("unknownCol is not a valid partition column in table " +
"`default`.`exttable_with_partitions`"))
sql(
s"""
|ALTER TABLE $externalTab DROP PARTITION (ds='2008-04-08'),
|PARTITION (hr='12')
""".stripMargin)
assert(catalog.listPartitions(TableIdentifier(externalTab)).map(_.spec).toSet ==
Set(Map("ds" -> "2008-04-09", "hr" -> "11")))
// drop partition will not delete the data of external table
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
sql(
s"""
|ALTER TABLE $externalTab ADD PARTITION (ds='2008-04-08', hr='12')
|PARTITION (ds='2008-04-08', hr=11)
""".stripMargin)
assert(catalog.listPartitions(TableIdentifier(externalTab)).map(_.spec).toSet ==
Set(Map("ds" -> "2008-04-08", "hr" -> "11"),
Map("ds" -> "2008-04-08", "hr" -> "12"),
Map("ds" -> "2008-04-09", "hr" -> "11")))
// add partition will not delete the data
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
sql(s"DROP TABLE $externalTab")
// drop table will not delete the data of external table
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
}
}
}
test("drop views") {
withTable("tab1") {
val tabName = "tab1"
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
val viewName = "view1"
assert(tableDirectoryExists(TableIdentifier(tabName)))
assert(!tableDirectoryExists(TableIdentifier(viewName)))
sql(s"CREATE VIEW $viewName AS SELECT * FROM tab1")
assert(tableDirectoryExists(TableIdentifier(tabName)))
assert(!tableDirectoryExists(TableIdentifier(viewName)))
sql(s"DROP VIEW $viewName")
assert(tableDirectoryExists(TableIdentifier(tabName)))
sql(s"DROP VIEW IF EXISTS $viewName")
}
}
}
test("alter views - rename") {
val tabName = "tab1"
withTable(tabName) {
spark.range(10).write.saveAsTable(tabName)
val oldViewName = "view1"
val newViewName = "view2"
withView(oldViewName, newViewName) {
val catalog = spark.sessionState.catalog
sql(s"CREATE VIEW $oldViewName AS SELECT * FROM $tabName")
assert(catalog.tableExists(TableIdentifier(oldViewName)))
assert(!catalog.tableExists(TableIdentifier(newViewName)))
sql(s"ALTER VIEW $oldViewName RENAME TO $newViewName")
assert(!catalog.tableExists(TableIdentifier(oldViewName)))
assert(catalog.tableExists(TableIdentifier(newViewName)))
}
}
}
test("alter views - set/unset tblproperties") {
val tabName = "tab1"
withTable(tabName) {
spark.range(10).write.saveAsTable(tabName)
val viewName = "view1"
withView(viewName) {
def checkProperties(expected: Map[String, String]): Boolean = {
val properties = spark.sessionState.catalog.getTableMetadata(TableIdentifier(viewName))
.properties
properties.filterNot { case (key, value) =>
Seq("transient_lastDdlTime", CatalogTable.VIEW_DEFAULT_DATABASE).contains(key) ||
key.startsWith(CatalogTable.VIEW_QUERY_OUTPUT_PREFIX)
} == expected
}
sql(s"CREATE VIEW $viewName AS SELECT * FROM $tabName")
checkProperties(Map())
sql(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'an')")
checkProperties(Map("p" -> "an"))
// no exception or message will be issued if we set it again
sql(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'an')")
checkProperties(Map("p" -> "an"))
// the value will be updated if we set the same key to a different value
sql(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'b')")
checkProperties(Map("p" -> "b"))
sql(s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')")
checkProperties(Map())
val message = intercept[AnalysisException] {
sql(s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')")
}.getMessage
assert(message.contains(
"Attempted to unset non-existent property 'p' in table '`default`.`view1`'"))
}
}
}
private def assertErrorForAlterTableOnView(sqlText: String): Unit = {
val message = intercept[AnalysisException](sql(sqlText)).getMessage
assert(message.contains("Cannot alter a view with ALTER TABLE. Please use ALTER VIEW instead"))
}
private def assertErrorForAlterViewOnTable(sqlText: String): Unit = {
val message = intercept[AnalysisException](sql(sqlText)).getMessage
assert(message.contains("Cannot alter a table with ALTER VIEW. Please use ALTER TABLE instead"))
}
test("create table - SET TBLPROPERTIES EXTERNAL to TRUE") {
val tabName = "tab1"
withTable(tabName) {
val message = intercept[AnalysisException] {
sql(s"CREATE TABLE $tabName (height INT, length INT) TBLPROPERTIES('EXTERNAL'='TRUE')")
}.getMessage
assert(message.contains("Cannot set or change the preserved property key: 'EXTERNAL'"))
}
}
test("alter table - SET TBLPROPERTIES EXTERNAL to TRUE") {
val tabName = "tab1"
withTable(tabName) {
val catalog = spark.sessionState.catalog
sql(s"CREATE TABLE $tabName (height INT, length INT)")
assert(
catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED)
val message = intercept[AnalysisException] {
sql(s"ALTER TABLE $tabName SET TBLPROPERTIES ('EXTERNAL' = 'TRUE')")
}.getMessage
assert(message.contains("Cannot set or change the preserved property key: 'EXTERNAL'"))
// The table type is not changed to external
assert(
catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED)
// The table property is case sensitive. Thus, external is allowed
sql(s"ALTER TABLE $tabName SET TBLPROPERTIES ('external' = 'TRUE')")
// The table type is not changed to external
assert(
catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED)
}
}
test("alter views and alter table - misuse") {
val tabName = "tab1"
withTable(tabName) {
spark.range(10).write.saveAsTable(tabName)
val oldViewName = "view1"
val newViewName = "view2"
withView(oldViewName, newViewName) {
val catalog = spark.sessionState.catalog
sql(s"CREATE VIEW $oldViewName AS SELECT * FROM $tabName")
assert(catalog.tableExists(TableIdentifier(tabName)))
assert(catalog.tableExists(TableIdentifier(oldViewName)))
assert(!catalog.tableExists(TableIdentifier(newViewName)))
assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName RENAME TO $newViewName")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName RENAME TO $newViewName")
assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName SET TBLPROPERTIES ('p' = 'an')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET TBLPROPERTIES ('p' = 'an')")
assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName UNSET TBLPROPERTIES ('p')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName UNSET TBLPROPERTIES ('p')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET LOCATION '/path/to/home'")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET SERDE 'whatever'")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET SERDEPROPERTIES ('x' = 'y')")
assertErrorForAlterTableOnView(
s"ALTER TABLE $oldViewName PARTITION (a=1, b=2) SET SERDEPROPERTIES ('x' = 'y')")
assertErrorForAlterTableOnView(
s"ALTER TABLE $oldViewName ADD IF NOT EXISTS PARTITION (a='4', b='8')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName DROP IF EXISTS PARTITION (a='2')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName RECOVER PARTITIONS")
assertErrorForAlterTableOnView(
s"ALTER TABLE $oldViewName PARTITION (a='1') RENAME TO PARTITION (a='100')")
assert(catalog.tableExists(TableIdentifier(tabName)))
assert(catalog.tableExists(TableIdentifier(oldViewName)))
assert(!catalog.tableExists(TableIdentifier(newViewName)))
}
}
}
test("Insert overwrite Hive table should output correct schema") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "false") {
withTable("tbl", "tbl2") {
withView("view1") {
spark.sql("CREATE TABLE tbl(id long)")
spark.sql("INSERT OVERWRITE TABLE tbl VALUES 4")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
withTempPath { path =>
sql(
s"""
|CREATE TABLE tbl2(ID long) USING hive
|OPTIONS(fileFormat 'parquet')
|LOCATION '${path.toURI}'
""".stripMargin)
spark.sql("INSERT OVERWRITE TABLE tbl2 SELECT ID FROM view1")
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(path.toString).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), Seq(Row(4)))
}
}
}
}
}
test("Create Hive table as select should output correct schema") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "false") {
withTable("tbl", "tbl2") {
withView("view1") {
spark.sql("CREATE TABLE tbl(id long)")
spark.sql("INSERT OVERWRITE TABLE tbl VALUES 4")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
withTempPath { path =>
sql(
s"""
|CREATE TABLE tbl2 USING hive
|OPTIONS(fileFormat 'parquet')
|LOCATION '${path.toURI}'
|AS SELECT ID FROM view1
""".stripMargin)
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(path.toString).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), Seq(Row(4)))
}
}
}
}
}
test("SPARK-25313 Insert overwrite directory should output correct schema") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "false") {
withTable("tbl") {
withView("view1") {
spark.sql("CREATE TABLE tbl(id long)")
spark.sql("INSERT OVERWRITE TABLE tbl VALUES 4")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
withTempPath { path =>
spark.sql(s"INSERT OVERWRITE LOCAL DIRECTORY '${path.getCanonicalPath}' " +
"STORED AS PARQUET SELECT ID FROM view1")
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(path.toString).schema == expectedSchema)
checkAnswer(spark.read.parquet(path.toString), Seq(Row(4)))
}
}
}
}
}
test("alter table partition - storage information") {
sql("CREATE TABLE boxes (height INT, length INT) PARTITIONED BY (width INT)")
sql("INSERT OVERWRITE TABLE boxes PARTITION (width=4) SELECT 4, 4")
val catalog = spark.sessionState.catalog
val expectedSerde = "com.sparkbricks.serde.ColumnarSerDe"
val expectedSerdeProps = Map("compress" -> "true")
val expectedSerdePropsString =
expectedSerdeProps.map { case (k, v) => s"'$k'='$v'" }.mkString(", ")
val oldPart = catalog.getPartition(TableIdentifier("boxes"), Map("width" -> "4"))
assert(oldPart.storage.serde != Some(expectedSerde), "bad test: serde was already set")
assert(oldPart.storage.properties.filterKeys(expectedSerdeProps.contains) !=
expectedSerdeProps, "bad test: serde properties were already set")
sql(s"""ALTER TABLE boxes PARTITION (width=4)
| SET SERDE '$expectedSerde'
| WITH SERDEPROPERTIES ($expectedSerdePropsString)
|""".stripMargin)
val newPart = catalog.getPartition(TableIdentifier("boxes"), Map("width" -> "4"))
assert(newPart.storage.serde == Some(expectedSerde))
assert(newPart.storage.properties.filterKeys(expectedSerdeProps.contains) ==
expectedSerdeProps)
}
test("MSCK REPAIR RABLE") {
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1")
sql("CREATE TABLE tab1 (height INT, length INT) PARTITIONED BY (a INT, b INT)")
val part1 = Map("a" -> "1", "b" -> "5")
val part2 = Map("a" -> "2", "b" -> "6")
val root = new Path(catalog.getTableMetadata(tableIdent).location)
val fs = root.getFileSystem(spark.sessionState.newHadoopConf())
// valid
fs.mkdirs(new Path(new Path(root, "a=1"), "b=5"))
fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "a.csv")) // file
fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "_SUCCESS")) // file
fs.mkdirs(new Path(new Path(root, "A=2"), "B=6"))
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "b.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "c.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), ".hiddenFile")) // file
fs.mkdirs(new Path(new Path(root, "A=2/B=6"), "_temporary"))
// invalid
fs.mkdirs(new Path(new Path(root, "a"), "b")) // bad name
fs.mkdirs(new Path(new Path(root, "b=1"), "a=1")) // wrong order
fs.mkdirs(new Path(root, "a=4")) // not enough columns
fs.createNewFile(new Path(new Path(root, "a=1"), "b=4")) // file
fs.createNewFile(new Path(new Path(root, "a=1"), "_SUCCESS")) // _SUCCESS
fs.mkdirs(new Path(new Path(root, "a=1"), "_temporary")) // _temporary
fs.mkdirs(new Path(new Path(root, "a=1"), ".b=4")) // start with .
try {
sql("MSCK REPAIR TABLE tab1")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2))
assert(catalog.getPartition(tableIdent, part1).parameters("numFiles") == "1")
assert(catalog.getPartition(tableIdent, part2).parameters("numFiles") == "2")
} finally {
fs.delete(root, true)
}
}
test("drop table using drop view") {
withTable("tab1") {
sql("CREATE TABLE tab1(c1 int)")
val message = intercept[AnalysisException] {
sql("DROP VIEW tab1")
}.getMessage
assert(message.contains("Cannot drop a table with DROP VIEW. Please use DROP TABLE instead"))
}
}
test("drop view using drop table") {
withTable("tab1") {
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
sql("CREATE VIEW view1 AS SELECT * FROM tab1")
val message = intercept[AnalysisException] {
sql("DROP TABLE view1")
}.getMessage
assert(message.contains("Cannot drop a view with DROP TABLE. Please use DROP VIEW instead"))
}
}
}
test("create view with mismatched schema") {
withTable("tab1") {
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
val e = intercept[AnalysisException] {
sql("CREATE VIEW view1 (col1, col3) AS SELECT * FROM tab1")
}.getMessage
assert(e.contains("the SELECT clause (num: `1`) does not match")
&& e.contains("CREATE VIEW (num: `2`)"))
}
}
}
test("create view with specified schema") {
withView("view1") {
sql("CREATE VIEW view1 (col1, col2) AS SELECT 1, 2")
checkAnswer(
sql("SELECT * FROM view1"),
Row(1, 2) :: Nil
)
}
}
test("desc table for Hive table - partitioned table") {
withTable("tbl") {
sql("CREATE TABLE tbl(a int) PARTITIONED BY (b int)")
assert(sql("DESC tbl").collect().containsSlice(
Seq(
Row("a", "int", null),
Row("b", "int", null),
Row("# Partition Information", "", ""),
Row("# col_name", "data_type", "comment"),
Row("b", "int", null)
)
))
}
}
test("desc table for Hive table - bucketed + sorted table") {
withTable("tbl") {
sql(
s"""
|CREATE TABLE tbl (id int, name string)
|CLUSTERED BY(id)
|SORTED BY(id, name) INTO 1024 BUCKETS
|PARTITIONED BY (ds string)
""".stripMargin)
val x = sql("DESC FORMATTED tbl").collect()
assert(x.containsSlice(
Seq(
Row("Num Buckets", "1024", ""),
Row("Bucket Columns", "[`id`]", ""),
Row("Sort Columns", "[`id`, `name`]", "")
)
))
}
}
test("desc table for data source table using Hive Metastore") {
assume(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "hive")
val tabName = "tab1"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(a int comment 'test') USING parquet ")
checkAnswer(
sql(s"DESC $tabName").select("col_name", "data_type", "comment"),
Row("a", "int", "test") :: Nil
)
}
}
private def createDatabaseWithLocation(tmpDir: File, dirExists: Boolean): Unit = {
val catalog = spark.sessionState.catalog
val dbName = "db1"
val tabName = "tab1"
val fs = new Path(tmpDir.toString).getFileSystem(spark.sessionState.newHadoopConf())
withTable(tabName) {
if (dirExists) {
assert(tmpDir.listFiles.isEmpty)
} else {
assert(!fs.exists(new Path(tmpDir.toString)))
}
sql(s"CREATE DATABASE $dbName Location '${tmpDir.toURI.getPath.stripSuffix("/")}'")
val db1 = catalog.getDatabaseMetadata(dbName)
val dbPath = new URI(tmpDir.toURI.toString.stripSuffix("/"))
assert(db1.copy(properties = db1.properties -- Seq(PROP_OWNER)) ===
CatalogDatabase(dbName, "", dbPath, Map.empty))
sql("USE db1")
sql(s"CREATE TABLE $tabName as SELECT 1")
assert(tableDirectoryExists(TableIdentifier(tabName), Option(tmpDir.toString)))
assert(tmpDir.listFiles.nonEmpty)
sql(s"DROP TABLE $tabName")
assert(tmpDir.listFiles.isEmpty)
sql("USE default")
sql(s"DROP DATABASE $dbName")
assert(!fs.exists(new Path(tmpDir.toString)))
}
}
test("create/drop database - location without pre-created directory") {
withTempPath { tmpDir =>
createDatabaseWithLocation(tmpDir, dirExists = false)
}
}
test("create/drop database - location with pre-created directory") {
withTempDir { tmpDir =>
createDatabaseWithLocation(tmpDir, dirExists = true)
}
}
private def dropDatabase(cascade: Boolean, tableExists: Boolean): Unit = {
val dbName = "db1"
val dbPath = new Path(spark.sessionState.conf.warehousePath)
val fs = dbPath.getFileSystem(spark.sessionState.newHadoopConf())
sql(s"CREATE DATABASE $dbName")
val catalog = spark.sessionState.catalog
val expectedDBLocation = s"file:${dbPath.toUri.getPath.stripSuffix("/")}/$dbName.db"
val expectedDBUri = CatalogUtils.stringToURI(expectedDBLocation)
val db1 = catalog.getDatabaseMetadata(dbName)
assert(db1.copy(properties = db1.properties -- Seq(PROP_OWNER)) ==
CatalogDatabase(
dbName,
"",
expectedDBUri,
Map.empty))
// the database directory was created
assert(fs.exists(dbPath) && fs.isDirectory(dbPath))
sql(s"USE $dbName")
val tabName = "tab1"
assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
sql(s"CREATE TABLE $tabName as SELECT 1")
assert(tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
if (!tableExists) {
sql(s"DROP TABLE $tabName")
assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
}
sql(s"USE default")
val sqlDropDatabase = s"DROP DATABASE $dbName ${if (cascade) "CASCADE" else "RESTRICT"}"
if (tableExists && !cascade) {
val message = intercept[AnalysisException] {
sql(sqlDropDatabase)
}.getMessage
assert(message.contains(s"Database $dbName is not empty. One or more tables exist."))
// the database directory was not removed
assert(fs.exists(new Path(expectedDBLocation)))
} else {
sql(sqlDropDatabase)
// the database directory was removed and the inclusive table directories are also removed
assert(!fs.exists(new Path(expectedDBLocation)))
}
}
test("drop database containing tables - CASCADE") {
dropDatabase(cascade = true, tableExists = true)
}
test("drop an empty database - CASCADE") {
dropDatabase(cascade = true, tableExists = false)
}
test("drop database containing tables - RESTRICT") {
dropDatabase(cascade = false, tableExists = true)
}
test("drop an empty database - RESTRICT") {
dropDatabase(cascade = false, tableExists = false)
}
test("drop default database") {
Seq("true", "false").foreach { caseSensitive =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) {
var message = intercept[AnalysisException] {
sql("DROP DATABASE default")
}.getMessage
assert(message.contains("Can not drop default database"))
// SQLConf.CASE_SENSITIVE does not affect the result
// because the Hive metastore is not case sensitive.
message = intercept[AnalysisException] {
sql("DROP DATABASE DeFault")
}.getMessage
assert(message.contains("Can not drop default database"))
}
}
}
test("Create Cataloged Table As Select - Drop Table After Runtime Exception") {
withTable("tab") {
intercept[SparkException] {
sql(
"""
|CREATE TABLE tab
|STORED AS TEXTFILE
|SELECT 1 AS a, (SELECT a FROM (SELECT 1 AS a UNION ALL SELECT 2 AS a) t) AS b
""".stripMargin)
}
// After hitting runtime exception, we should drop the created table.
assert(!spark.sessionState.catalog.tableExists(TableIdentifier("tab")))
}
}
test("CREATE TABLE LIKE a temporary view") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE a temporary view.
withCreateTableLikeTempView(location = None, provider)
// CREATE TABLE LIKE a temporary view location ...
withTempDir { tmpDir =>
withCreateTableLikeTempView(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeTempView(
location : Option[String], provider: Option[String]): Unit = {
val sourceViewName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTempView(sourceViewName) {
withTable(targetTabName) {
spark.range(10).select($"id" as "a", $"id" as "b", $"id" as "c", $"id" as "d")
.createTempView(sourceViewName)
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceViewName $providerClause $locationClause")
val sourceTable = spark.sessionState.catalog.getTempViewOrPermanentTableMetadata(
TableIdentifier(sourceViewName))
val targetTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
}
test("CREATE TABLE LIKE a data source table") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE a data source table.
withCreateTableLikeDSTable(location = None, provider)
// CREATE TABLE LIKE a data source table location ...
withTempDir { tmpDir =>
withCreateTableLikeDSTable(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeDSTable(
location : Option[String], provider: Option[String]): Unit = {
val sourceTabName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTable(sourceTabName, targetTabName) {
spark.range(10).select($"id" as "a", $"id" as "b", $"id" as "c", $"id" as "d")
.write.format("json").saveAsTable(sourceTabName)
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $providerClause $locationClause")
val sourceTable =
spark.sessionState.catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
val targetTable =
spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
// The table type of the source table should be a Hive-managed data source table
assert(DDLUtils.isDatasourceTable(sourceTable))
assert(sourceTable.tableType == CatalogTableType.MANAGED)
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
test("CREATE TABLE LIKE an external data source table") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE an external data source table.
withCreateTableLikeExtDSTable(location = None, provider)
// CREATE TABLE LIKE an external data source table location ...
withTempDir { tmpDir =>
withCreateTableLikeExtDSTable(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeExtDSTable(
location : Option[String], provider: Option[String]): Unit = {
val sourceTabName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTable(sourceTabName, targetTabName) {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(10).select($"id" as "a", $"id" as "b", $"id" as "c", $"id" as "d")
.write.format("parquet").save(path)
sql(s"CREATE TABLE $sourceTabName USING parquet OPTIONS (PATH '${dir.toURI}')")
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $providerClause $locationClause")
// The source table should be an external data source table
val sourceTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
val targetTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
// The table type of the source table should be an external data source table
assert(DDLUtils.isDatasourceTable(sourceTable))
assert(sourceTable.tableType == CatalogTableType.EXTERNAL)
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
}
test("CREATE TABLE LIKE a managed Hive serde table") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE a managed Hive serde table.
withCreateTableLikeManagedHiveTable(location = None, provider)
// CREATE TABLE LIKE a managed Hive serde table location ...
withTempDir { tmpDir =>
withCreateTableLikeManagedHiveTable(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeManagedHiveTable(
location : Option[String], provider: Option[String]): Unit = {
val sourceTabName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
val catalog = spark.sessionState.catalog
withTable(sourceTabName, targetTabName) {
sql(s"CREATE TABLE $sourceTabName TBLPROPERTIES('prop1'='value1') AS SELECT 1 key, 'a'")
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $providerClause $locationClause")
val sourceTable = catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
assert(sourceTable.tableType == CatalogTableType.MANAGED)
assert(sourceTable.properties.get("prop1").nonEmpty)
val targetTable = catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
test("CREATE TABLE LIKE an external Hive serde table") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE an external Hive serde table.
withCreateTableLikeExtHiveTable(location = None, provider)
// CREATE TABLE LIKE an external Hive serde table location ...
withTempDir { tmpDir =>
withCreateTableLikeExtHiveTable(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeExtHiveTable(
location : Option[String], provider: Option[String]): Unit = {
val catalog = spark.sessionState.catalog
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTempDir { tmpDir =>
val basePath = tmpDir.toURI
val sourceTabName = "tab1"
val targetTabName = "tab2"
withTable(sourceTabName, targetTabName) {
assert(tmpDir.listFiles.isEmpty)
sql(
s"""
|CREATE EXTERNAL TABLE $sourceTabName (key INT comment 'test', value STRING)
|COMMENT 'Apache Spark'
|PARTITIONED BY (ds STRING, hr STRING)
|LOCATION '$basePath'
""".stripMargin)
for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) {
sql(
s"""
|INSERT OVERWRITE TABLE $sourceTabName
|partition (ds='$ds',hr='$hr')
|SELECT 1, 'a'
""".stripMargin)
}
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $providerClause $locationClause")
val sourceTable = catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
assert(sourceTable.tableType == CatalogTableType.EXTERNAL)
assert(sourceTable.comment == Option("Apache Spark"))
val targetTable = catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceTable, targetTable, tableType, provider)
}
}
}
test("CREATE TABLE LIKE a view") {
Seq(None, Some("parquet"), Some("orc"), Some("hive")) foreach { provider =>
// CREATE TABLE LIKE a view.
withCreateTableLikeView(location = None, provider)
// CREATE TABLE LIKE a view location ...
withTempDir { tmpDir =>
withCreateTableLikeView(Some(tmpDir.toURI.toString), provider)
}
}
}
private def withCreateTableLikeView(
location : Option[String], provider: Option[String]): Unit = {
val sourceTabName = "tab1"
val sourceViewName = "view"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTable(sourceTabName, targetTabName) {
withView(sourceViewName) {
spark.range(10).select($"id" as "a", $"id" as "b", $"id" as "c", $"id" as "d")
.write.format("json").saveAsTable(sourceTabName)
sql(s"CREATE VIEW $sourceViewName AS SELECT * FROM $sourceTabName")
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
val providerClause = if (provider.nonEmpty) s"USING ${provider.get}" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceViewName $providerClause $locationClause")
val sourceView = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(sourceViewName, Some("default")))
// The original source should be a VIEW with an empty path
assert(sourceView.tableType == CatalogTableType.VIEW)
assert(sourceView.viewText.nonEmpty)
assert(sourceView.viewCatalogAndNamespace ==
Seq(CatalogManager.SESSION_CATALOG_NAME, "default"))
assert(sourceView.viewQueryColumnNames == Seq("a", "b", "c", "d"))
val targetTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceView, targetTable, tableType, provider)
}
}
}
private def checkCreateTableLike(
sourceTable: CatalogTable,
targetTable: CatalogTable,
tableType: CatalogTableType,
provider: Option[String]): Unit = {
// The created table should be a MANAGED table or EXTERNAL table with empty view text
// and original text.
assert(targetTable.tableType == tableType,
s"the created table must be a/an ${tableType.name} table")
assert(targetTable.viewText.isEmpty,
"the view text in the created table must be empty")
assert(targetTable.viewCatalogAndNamespace.isEmpty,
"the view catalog and namespace in the created table must be empty")
assert(targetTable.viewQueryColumnNames.isEmpty,
"the view query output columns in the created table must be empty")
assert(targetTable.comment.isEmpty,
"the comment in the created table must be empty")
assert(targetTable.unsupportedFeatures.isEmpty,
"the unsupportedFeatures in the create table must be empty")
val metastoreGeneratedProperties = Seq(
"CreateTime",
"transient_lastDdlTime",
"grantTime",
"lastUpdateTime",
"last_modified_by",
"last_modified_time",
"Owner:",
"totalNumberFiles",
"maxFileSize",
"minFileSize"
)
assert(targetTable.properties.filterKeys(!metastoreGeneratedProperties.contains(_)).isEmpty,
"the table properties of source tables should not be copied in the created table")
provider match {
case Some(_) =>
assert(targetTable.provider == provider)
if (DDLUtils.isHiveTable(provider)) {
assert(DDLUtils.isHiveTable(targetTable),
"the target table should be a hive table if provider is hive")
}
case None =>
if (sourceTable.tableType == CatalogTableType.VIEW) {
// Source table is a temporary/permanent view, which does not have a provider.
// The created target table uses the default data source format
assert(targetTable.provider == Option(spark.sessionState.conf.defaultDataSourceName))
} else {
assert(targetTable.provider == sourceTable.provider)
}
if (DDLUtils.isDatasourceTable(sourceTable) ||
sourceTable.tableType == CatalogTableType.VIEW) {
assert(DDLUtils.isDatasourceTable(targetTable),
"the target table should be a data source table")
} else {
assert(!DDLUtils.isDatasourceTable(targetTable),
"the target table should be a Hive serde table")
}
}
assert(targetTable.storage.locationUri.nonEmpty, "target table path should not be empty")
// User-specified location and sourceTable's location can be same or different,
// when we creating an external table. So we don't need to do this check
if (tableType != CatalogTableType.EXTERNAL) {
assert(sourceTable.storage.locationUri != targetTable.storage.locationUri,
"source table/view path should be different from target table path")
}
if (DDLUtils.isHiveTable(targetTable)) {
assert(targetTable.tracksPartitionsInCatalog)
} else {
assert(targetTable.tracksPartitionsInCatalog == sourceTable.tracksPartitionsInCatalog)
}
// The source table contents should not been seen in the target table.
assert(spark.table(sourceTable.identifier).count() != 0, "the source table should be nonempty")
assert(spark.table(targetTable.identifier).count() == 0, "the target table should be empty")
// Their schema should be identical
checkAnswer(
sql(s"DESC ${sourceTable.identifier}"),
sql(s"DESC ${targetTable.identifier}"))
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
// Check whether the new table can be inserted using the data from the original table
sql(s"INSERT INTO TABLE ${targetTable.identifier} SELECT * FROM ${sourceTable.identifier}")
}
// After insertion, the data should be identical
checkAnswer(
sql(s"SELECT * FROM ${sourceTable.identifier}"),
sql(s"SELECT * FROM ${targetTable.identifier}"))
}
test("create table with the same name as an index table") {
val tabName = "tab1"
val indexName = tabName + "_index"
withTable(tabName) {
// Spark SQL does not support creating index. Thus, we have to use Hive client.
val client =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
sql(s"CREATE TABLE $tabName(a int)")
try {
client.runSqlHive(
s"CREATE INDEX $indexName ON TABLE $tabName (a) AS 'COMPACT' WITH DEFERRED REBUILD")
val indexTabName =
spark.sessionState.catalog.listTables("default", s"*$indexName*").head.table
// Even if index tables exist, listTables and getTable APIs should still work
checkAnswer(
spark.catalog.listTables().toDF(),
Row(indexTabName, "default", null, null, false) ::
Row(tabName, "default", null, "MANAGED", false) :: Nil)
assert(spark.catalog.getTable("default", indexTabName).name === indexTabName)
intercept[TableAlreadyExistsException] {
sql(s"CREATE TABLE $indexTabName(b int) USING hive")
}
intercept[TableAlreadyExistsException] {
sql(s"ALTER TABLE $tabName RENAME TO $indexTabName")
}
// When tableExists is not invoked, we still can get an AnalysisException
val e = intercept[AnalysisException] {
sql(s"DESCRIBE $indexTabName")
}.getMessage
assert(e.contains("Hive index table is not supported."))
} finally {
client.runSqlHive(s"DROP INDEX IF EXISTS $indexName ON $tabName")
}
}
}
test("insert skewed table") {
val tabName = "tab1"
withTable(tabName) {
// Spark SQL does not support creating skewed table. Thus, we have to use Hive client.
val client =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
client.runSqlHive(
s"""
|CREATE Table $tabName(col1 int, col2 int)
|PARTITIONED BY (part1 string, part2 string)
|SKEWED BY (col1) ON (3, 4) STORED AS DIRECTORIES
""".stripMargin)
val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
assert(hiveTable.unsupportedFeatures.contains("skewed columns"))
// Call loadDynamicPartitions against a skewed table with enabling list bucketing
sql(
s"""
|INSERT OVERWRITE TABLE $tabName
|PARTITION (part1='a', part2)
|SELECT 3, 4, 'b'
""".stripMargin)
// Call loadPartitions against a skewed table with enabling list bucketing
sql(
s"""
|INSERT INTO TABLE $tabName
|PARTITION (part1='a', part2='b')
|SELECT 1, 2
""".stripMargin)
checkAnswer(
sql(s"SELECT * from $tabName"),
Row(3, 4, "a", "b") :: Row(1, 2, "a", "b") :: Nil)
}
}
test("desc table for data source table - no user-defined schema") {
Seq("parquet", "json", "orc").foreach { fileFormat =>
withTable("t1") {
withTempPath { dir =>
val path = dir.toURI.toString
spark.range(1).write.format(fileFormat).save(path)
sql(s"CREATE TABLE t1 USING $fileFormat OPTIONS (PATH '$path')")
val desc = sql("DESC FORMATTED t1").collect().toSeq
assert(desc.contains(Row("id", "bigint", null)))
}
}
}
}
test("datasource and statistics table property keys are not allowed") {
import org.apache.spark.sql.hive.HiveExternalCatalog.DATASOURCE_PREFIX
import org.apache.spark.sql.hive.HiveExternalCatalog.STATISTICS_PREFIX
withTable("tbl") {
sql("CREATE TABLE tbl(a INT) STORED AS parquet")
Seq(DATASOURCE_PREFIX, STATISTICS_PREFIX).foreach { forbiddenPrefix =>
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE tbl SET TBLPROPERTIES ('${forbiddenPrefix}foo' = 'loser')")
}
assert(e.getMessage.contains(forbiddenPrefix + "foo"))
val e2 = intercept[AnalysisException] {
sql(s"ALTER TABLE tbl UNSET TBLPROPERTIES ('${forbiddenPrefix}foo')")
}
assert(e2.getMessage.contains(forbiddenPrefix + "foo"))
val e3 = intercept[AnalysisException] {
sql(s"CREATE TABLE tbl2 (a INT) TBLPROPERTIES ('${forbiddenPrefix}foo'='anything')")
}
assert(e3.getMessage.contains(forbiddenPrefix + "foo"))
}
}
}
test("truncate table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i, i) }.toDF("width", "length")
// Test both a Hive compatible and incompatible code path.
Seq("json", "parquet").foreach { format =>
withTable("rectangles") {
data.write.format(format).saveAsTable("rectangles")
assert(spark.table("rectangles").collect().nonEmpty,
"bad test; table was empty to begin with")
sql("TRUNCATE TABLE rectangles")
assert(spark.table("rectangles").collect().isEmpty)
// not supported since the table is not partitioned
val e = intercept[AnalysisException] {
sql("TRUNCATE TABLE rectangles PARTITION (width=1)")
}
assert(e.message.contains("Operation not allowed"))
}
}
}
test("truncate partitioned table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i % 3, i % 5, i) }.toDF("width", "length", "height")
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// supported since partitions are stored in the metastore
sql("TRUNCATE TABLE partTable PARTITION (width=1, length=1)")
assert(spark.table("partTable").filter($"width" === 1).collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1 && $"length" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// support partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=1)")
assert(spark.table("partTable").collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// do nothing if no partition is matched for the given partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=100)")
assert(spark.table("partTable").count() == data.count())
// throw exception if no partition is matched for the given non-partial partition spec.
intercept[NoSuchPartitionException] {
sql("TRUNCATE TABLE partTable PARTITION (width=100, length=100)")
}
// throw exception if the column in partition spec is not a partition column.
val e = intercept[AnalysisException] {
sql("TRUNCATE TABLE partTable PARTITION (unknown=1)")
}
assert(e.message.contains("unknown is not a valid partition column"))
}
}
test("create hive serde table with new syntax") {
withTable("t", "t2", "t3") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) USING hive
|OPTIONS(fileFormat 'orc', compression 'Zlib')
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde == Some("org.apache.hadoop.hive.ql.io.orc.OrcSerde"))
assert(table.storage.properties.get("compression") == Some("Zlib"))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
// Check if this is compressed as ZLIB.
val maybeOrcFile = path.listFiles().find(_.getName.startsWith("part"))
assertCompression(maybeOrcFile, "orc", "ZLIB")
sql("CREATE TABLE t2 USING HIVE AS SELECT 1 AS c1, 'a' AS c2")
val table2 = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t2"))
assert(DDLUtils.isHiveTable(table2))
assert(table2.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
checkAnswer(spark.table("t2"), Row(1, "a"))
sql("CREATE TABLE t3(a int, p int) USING hive PARTITIONED BY (p)")
sql("INSERT INTO t3 PARTITION(p=1) SELECT 0")
checkAnswer(spark.table("t3"), Row(0, 1))
}
}
}
test("create hive serde table with Catalog") {
withTable("t") {
withTempDir { dir =>
val df = spark.catalog.createTable(
"t",
"hive",
new StructType().add("i", "int"),
Map("path" -> dir.getCanonicalPath, "fileFormat" -> "parquet"))
assert(df.collect().isEmpty)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"))
assert(table.storage.serde ==
Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"))
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
}
}
}
test("create hive serde table with DataFrameWriter.saveAsTable") {
withTable("t", "t1") {
Seq(1 -> "a").toDF("i", "j")
.write.format("hive").option("fileFormat", "avro").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a"))
Seq("c" -> 1).toDF("i", "j").write.format("hive")
.mode(SaveMode.Overwrite).option("fileFormat", "parquet").saveAsTable("t")
checkAnswer(spark.table("t"), Row("c", 1))
var table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"))
assert(table.storage.serde ==
Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"))
Seq(9 -> "x").toDF("i", "j")
.write.format("hive").mode(SaveMode.Overwrite).option("fileFormat", "avro").saveAsTable("t")
checkAnswer(spark.table("t"), Row(9, "x"))
table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat"))
assert(table.storage.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat"))
assert(table.storage.serde ==
Some("org.apache.hadoop.hive.serde2.avro.AvroSerDe"))
val e2 = intercept[AnalysisException] {
Seq(1 -> "a").toDF("i", "j").write.format("hive").bucketBy(4, "i").saveAsTable("t1")
}
assert(e2.message.contains("Creating bucketed Hive serde table is not supported yet"))
val e3 = intercept[AnalysisException] {
spark.table("t").write.format("hive").mode("overwrite").saveAsTable("t")
}
assert(e3.message.contains("Cannot overwrite table default.t that is also being read from"))
}
}
test("append data to hive serde table") {
withTable("t", "t1") {
Seq(1 -> "a").toDF("i", "j")
.write.format("hive").option("fileFormat", "avro").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a"))
sql("INSERT INTO t SELECT 2, 'b'")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Nil)
Seq(3 -> "c").toDF("i", "j")
.write.format("hive").mode("append").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Row(3, "c") :: Nil)
Seq(3.5 -> 3).toDF("i", "j")
.write.format("hive").mode("append").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Row(3, "c")
:: Row(3, "3") :: Nil)
Seq(4 -> "d").toDF("i", "j").write.saveAsTable("t1")
val e = intercept[AnalysisException] {
Seq(5 -> "e").toDF("i", "j")
.write.format("hive").mode("append").saveAsTable("t1")
}
assert(e.message.contains("The format of the existing table default.t1 is "))
assert(e.message.contains("It doesn't match the specified format `HiveFileFormat`."))
}
}
test("create partitioned hive serde table as select") {
withTable("t", "t1") {
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
Seq(10 -> "y").toDF("i", "j").write.format("hive").partitionBy("i").saveAsTable("t")
checkAnswer(spark.table("t"), Row("y", 10) :: Nil)
Seq((1, 2, 3)).toDF("i", "j", "k").write.mode("overwrite").format("hive")
.partitionBy("j", "k").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, 2, 3) :: Nil)
spark.sql("create table t1 using hive partitioned by (i) as select 1 as i, 'a' as j")
checkAnswer(spark.table("t1"), Row("a", 1) :: Nil)
}
}
}
test("read/write files with hive data source is not allowed") {
withTempDir { dir =>
val e = intercept[AnalysisException] {
spark.read.format("hive").load(dir.getAbsolutePath)
}
assert(e.message.contains("Hive data source can only be used with tables"))
val e2 = intercept[AnalysisException] {
Seq(1 -> "a").toDF("i", "j").write.format("hive").save(dir.getAbsolutePath)
}
assert(e2.message.contains("Hive data source can only be used with tables"))
val e3 = intercept[AnalysisException] {
spark.readStream.format("hive").load(dir.getAbsolutePath)
}
assert(e3.message.contains("Hive data source can only be used with tables"))
val e4 = intercept[AnalysisException] {
spark.readStream.schema(new StructType()).parquet(dir.getAbsolutePath)
.writeStream.format("hive").start(dir.getAbsolutePath)
}
assert(e4.message.contains("Hive data source can only be used with tables"))
}
}
test("partitioned table should always put partition columns at the end of table schema") {
def getTableColumns(tblName: String): Seq[String] = {
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tblName)).schema.map(_.name)
}
val provider = spark.sessionState.conf.defaultDataSourceName
withTable("t", "t1", "t2", "t3", "t4", "t5", "t6") {
sql(s"CREATE TABLE t(a int, b int, c int, d int) USING $provider PARTITIONED BY (d, b)")
assert(getTableColumns("t") == Seq("a", "c", "d", "b"))
sql(s"CREATE TABLE t1 USING $provider PARTITIONED BY (d, b) AS SELECT 1 a, 1 b, 1 c, 1 d")
assert(getTableColumns("t1") == Seq("a", "c", "d", "b"))
Seq((1, 1, 1, 1)).toDF("a", "b", "c", "d").write.partitionBy("d", "b").saveAsTable("t2")
assert(getTableColumns("t2") == Seq("a", "c", "d", "b"))
withTempPath { path =>
val dataPath = new File(new File(path, "d=1"), "b=1").getCanonicalPath
Seq(1 -> 1).toDF("a", "c").write.save(dataPath)
sql(s"CREATE TABLE t3 USING $provider LOCATION '${path.toURI}'")
assert(getTableColumns("t3") == Seq("a", "c", "d", "b"))
}
sql("CREATE TABLE t4(a int, b int, c int, d int) USING hive PARTITIONED BY (d, b)")
assert(getTableColumns("t4") == Seq("a", "c", "d", "b"))
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
sql("CREATE TABLE t5 USING hive PARTITIONED BY (d, b) AS SELECT 1 a, 1 b, 1 c, 1 d")
assert(getTableColumns("t5") == Seq("a", "c", "d", "b"))
Seq((1, 1, 1, 1)).toDF("a", "b", "c", "d").write.format("hive")
.partitionBy("d", "b").saveAsTable("t6")
assert(getTableColumns("t6") == Seq("a", "c", "d", "b"))
}
}
}
test("create hive table with a non-existing location") {
withTable("t", "t1") {
withTempPath { dir =>
spark.sql(s"CREATE TABLE t(a int, b int) USING hive LOCATION '${dir.toURI}'")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t SELECT 1, 2")
assert(dir.exists())
checkAnswer(spark.table("t"), Row(1, 2))
}
// partition table
withTempPath { dir =>
spark.sql(
s"""
|CREATE TABLE t1(a int, b int)
|USING hive
|PARTITIONED BY(a)
|LOCATION '${dir.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t1 PARTITION(a=1) SELECT 2")
val partDir = new File(dir, "a=1")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(2, 1))
}
}
}
Seq(true, false).foreach { shouldDelete =>
val tcName = if (shouldDelete) "non-existing" else "existed"
test(s"CTAS for external hive table with a $tcName location") {
withTable("t", "t1") {
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t
|USING hive
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
checkAnswer(spark.table("t"), Row(3, 4, 1, 2))
}
// partition table
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t1
|USING hive
|PARTITIONED BY(a, b)
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
val partDir = new File(dir, "a=3")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(1, 2, 3, 4))
}
}
}
}
}
Seq("parquet", "hive").foreach { datasource =>
Seq("a b", "a:b", "a%b", "a,b").foreach { specialChars =>
test(s"partition column name of $datasource table containing $specialChars") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a string, `$specialChars` string)
|USING $datasource
|PARTITIONED BY(`$specialChars`)
|LOCATION '${dir.toURI}'
""".stripMargin)
assert(dir.listFiles().isEmpty)
spark.sql(s"INSERT INTO TABLE t PARTITION(`$specialChars`=2) SELECT 1")
val partEscaped = s"${ExternalCatalogUtils.escapePathName(specialChars)}=2"
val partFile = new File(dir, partEscaped)
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1", "2") :: Nil)
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
spark.sql(s"INSERT INTO TABLE t PARTITION(`$specialChars`) SELECT 3, 4")
val partEscaped1 = s"${ExternalCatalogUtils.escapePathName(specialChars)}=4"
val partFile1 = new File(dir, partEscaped1)
assert(partFile1.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1", "2") :: Row("3", "4") :: Nil)
}
}
}
}
}
}
Seq("a b", "a:b", "a%b").foreach { specialChars =>
test(s"hive table: location uri contains $specialChars") {
// On Windows, it looks colon in the file name is illegal by default. See
// https://support.microsoft.com/en-us/help/289627
assume(!Utils.isWindows || specialChars != "a:b")
withTable("t") {
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t(a string)
|USING hive
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
if (specialChars != "a:b") {
spark.sql("INSERT INTO TABLE t SELECT 1")
assert(loc.listFiles().length >= 1)
checkAnswer(spark.table("t"), Row("1") :: Nil)
} else {
val e = intercept[AnalysisException] {
spark.sql("INSERT INTO TABLE t SELECT 1")
}.getMessage
assert(e.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b"))
}
}
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t1(a string, b string)
|USING hive
|PARTITIONED BY(b)
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
if (specialChars != "a:b") {
spark.sql("INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1")
val partFile = new File(loc, "b=2")
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t1"), Row("1", "2") :: Nil)
spark.sql("INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1")
val partFile1 = new File(loc, "b=2017-03-03 12:13%3A14")
assert(!partFile1.exists())
if (!Utils.isWindows) {
// Actual path becomes "b=2017-03-03%2012%3A13%253A14" on Windows.
val partFile2 = new File(loc, "b=2017-03-03 12%3A13%253A14")
assert(partFile2.listFiles().nonEmpty)
checkAnswer(spark.table("t1"),
Row("1", "2") :: Row("1", "2017-03-03 12:13%3A14") :: Nil)
}
} else {
val e = intercept[AnalysisException] {
spark.sql("INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1")
}.getMessage
assert(e.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b"))
val e1 = intercept[AnalysisException] {
spark.sql("INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1")
}.getMessage
assert(e1.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b"))
}
}
}
}
}
test("SPARK-19905: Hive SerDe table input paths") {
withTable("spark_19905") {
withTempView("spark_19905_view") {
spark.range(10).createOrReplaceTempView("spark_19905_view")
sql("CREATE TABLE spark_19905 STORED AS RCFILE AS SELECT * FROM spark_19905_view")
assert(spark.table("spark_19905").inputFiles.nonEmpty)
assert(sql("SELECT input_file_name() FROM spark_19905").count() > 0)
}
}
}
hiveFormats.foreach { tableType =>
test(s"alter hive serde table add columns -- partitioned - $tableType") {
withTable("tab") {
sql(
s"""
|CREATE TABLE tab (c1 int, c2 int)
|PARTITIONED BY (c3 int) STORED AS $tableType
""".stripMargin)
sql("INSERT INTO tab PARTITION (c3=1) VALUES (1, 2)")
sql("ALTER TABLE tab ADD COLUMNS (c4 int)")
checkAnswer(
sql("SELECT * FROM tab WHERE c3 = 1"),
Seq(Row(1, 2, null, 1))
)
assert(spark.table("tab").schema
.contains(StructField("c4", IntegerType)))
sql("INSERT INTO tab PARTITION (c3=2) VALUES (2, 3, 4)")
checkAnswer(
spark.table("tab"),
Seq(Row(1, 2, null, 1), Row(2, 3, 4, 2))
)
checkAnswer(
sql("SELECT * FROM tab WHERE c3 = 2 AND c4 IS NOT NULL"),
Seq(Row(2, 3, 4, 2))
)
sql("ALTER TABLE tab ADD COLUMNS (c5 char(10))")
assert(spark.table("tab").schema.find(_.name == "c5")
.get.metadata.getString("HIVE_TYPE_STRING") == "char(10)")
}
}
}
hiveFormats.foreach { tableType =>
test(s"alter hive serde table add columns -- with predicate - $tableType ") {
withTable("tab") {
sql(s"CREATE TABLE tab (c1 int, c2 int) STORED AS $tableType")
sql("INSERT INTO tab VALUES (1, 2)")
sql("ALTER TABLE tab ADD COLUMNS (c4 int)")
checkAnswer(
sql("SELECT * FROM tab WHERE c4 IS NULL"),
Seq(Row(1, 2, null))
)
assert(spark.table("tab").schema
.contains(StructField("c4", IntegerType)))
sql("INSERT INTO tab VALUES (2, 3, 4)")
checkAnswer(
sql("SELECT * FROM tab WHERE c4 = 4 "),
Seq(Row(2, 3, 4))
)
checkAnswer(
spark.table("tab"),
Seq(Row(1, 2, null), Row(2, 3, 4))
)
}
}
}
Seq(true, false).foreach { caseSensitive =>
test(s"alter add columns with existing column name - caseSensitive $caseSensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> s"$caseSensitive") {
withTable("tab") {
sql("CREATE TABLE tab (c1 int) PARTITIONED BY (c2 int) STORED AS PARQUET")
if (!caseSensitive) {
// duplicating partitioning column name
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C2 string)")
}.getMessage
assert(e1.contains("Found duplicate column(s)"))
// duplicating data column name
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C1 string)")
}.getMessage
assert(e2.contains("Found duplicate column(s)"))
} else {
// hive catalog will still complains that c1 is duplicate column name because hive
// identifiers are case insensitive.
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C2 string)")
}.getMessage
assert(e1.contains("HiveException"))
// hive catalog will still complains that c1 is duplicate column name because hive
// identifiers are case insensitive.
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C1 string)")
}.getMessage
assert(e2.contains("HiveException"))
}
}
}
}
}
test("SPARK-21216: join with a streaming DataFrame") {
import org.apache.spark.sql.execution.streaming.MemoryStream
import testImplicits._
implicit val _sqlContext = spark.sqlContext
withTempView("t1") {
Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word").createOrReplaceTempView("t1")
// Make a table and ensure it will be broadcast.
sql("""CREATE TABLE smallTable(word string, number int)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|STORED AS TEXTFILE
""".stripMargin)
sql(
"""INSERT INTO smallTable
|SELECT word, number from t1
""".stripMargin)
val inputData = MemoryStream[Int]
val joined = inputData.toDS().toDF()
.join(spark.table("smallTable"), $"value" === $"number")
val sq = joined.writeStream
.format("memory")
.queryName("t2")
.start()
try {
inputData.addData(1, 2)
sq.processAllAvailable()
checkAnswer(
spark.table("t2"),
Seq(Row(1, "one", 1), Row(2, "two", 2))
)
} finally {
sq.stop()
}
}
}
test("table name with schema") {
// regression test for SPARK-11778
withDatabase("usrdb") {
spark.sql("create schema usrdb")
withTable("usrdb.test") {
spark.sql("create table usrdb.test(c int)")
spark.read.table("usrdb.test")
}
}
}
private def assertCompression(maybeFile: Option[File], format: String, compression: String) = {
assert(maybeFile.isDefined)
val actualCompression = format match {
case "orc" =>
OrcFileOperator.getFileReader(maybeFile.get.toPath.toString).get.getCompression.name
case "parquet" =>
val footer = ParquetFileReader.readFooter(
sparkContext.hadoopConfiguration, new Path(maybeFile.get.getPath), NO_FILTER)
footer.getBlocks.get(0).getColumns.get(0).getCodec.toString
}
assert(compression === actualCompression)
}
Seq(("orc", "ZLIB"), ("parquet", "GZIP")).foreach { case (fileFormat, compression) =>
test(s"SPARK-22158 convertMetastore should not ignore table property - $fileFormat") {
withSQLConf(CONVERT_METASTORE_ORC.key -> "true", CONVERT_METASTORE_PARQUET.key -> "true") {
withTable("t") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) USING hive
|OPTIONS(fileFormat '$fileFormat', compression '$compression')
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde.get.contains(fileFormat))
assert(table.storage.properties.get("compression") == Some(compression))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
val maybeFile = path.listFiles().find(_.getName.startsWith("part"))
assertCompression(maybeFile, fileFormat, compression)
}
}
}
}
}
private def getReader(path: String): org.apache.orc.Reader = {
val conf = spark.sessionState.newHadoopConf()
val files = org.apache.spark.sql.execution.datasources.orc.OrcUtils.listOrcFiles(path, conf)
assert(files.length == 1)
val file = files.head
val fs = file.getFileSystem(conf)
val readerOptions = org.apache.orc.OrcFile.readerOptions(conf).filesystem(fs)
org.apache.orc.OrcFile.createReader(file, readerOptions)
}
test("SPARK-23355 convertMetastoreOrc should not ignore table properties - STORED AS") {
Seq("native", "hive").foreach { orcImpl =>
withSQLConf(ORC_IMPLEMENTATION.key -> orcImpl, CONVERT_METASTORE_ORC.key -> "true") {
withTable("t") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) STORED AS ORC
|TBLPROPERTIES (
| orc.compress 'ZLIB',
| orc.compress.size '1001',
| orc.row.index.stride '2002',
| hive.exec.orc.default.block.size '3003',
| hive.exec.orc.compression.strategy 'COMPRESSION')
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde.get.contains("orc"))
val properties = table.properties
assert(properties.get("orc.compress") == Some("ZLIB"))
assert(properties.get("orc.compress.size") == Some("1001"))
assert(properties.get("orc.row.index.stride") == Some("2002"))
assert(properties.get("hive.exec.orc.default.block.size") == Some("3003"))
assert(properties.get("hive.exec.orc.compression.strategy") == Some("COMPRESSION"))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
val maybeFile = path.listFiles().find(_.getName.startsWith("part"))
Utils.tryWithResource(getReader(maybeFile.head.getCanonicalPath)) { reader =>
assert(reader.getCompressionKind.name === "ZLIB")
assert(reader.getCompressionSize == 1001)
assert(reader.getRowIndexStride == 2002)
}
}
}
}
}
}
test("SPARK-23355 convertMetastoreParquet should not ignore table properties - STORED AS") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "true") {
withTable("t") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) STORED AS PARQUET
|TBLPROPERTIES (
| parquet.compression 'GZIP'
|)
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde.get.contains("parquet"))
val properties = table.properties
assert(properties.get("parquet.compression") == Some("GZIP"))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
val maybeFile = path.listFiles().find(_.getName.startsWith("part"))
assertCompression(maybeFile, "parquet", "GZIP")
}
}
}
}
test("load command for non local invalid path validation") {
withTable("tbl") {
sql("CREATE TABLE tbl(i INT, j STRING) USING hive")
val e = intercept[AnalysisException](
sql("load data inpath '/doesnotexist.csv' into table tbl"))
assert(e.message.contains("LOAD DATA input path does not exist"))
}
}
test("SPARK-22252: FileFormatWriter should respect the input query schema in HIVE") {
withTable("t1", "t2", "t3", "t4") {
spark.range(1).select($"id" as "col1", $"id" as "col2").write.saveAsTable("t1")
spark.sql("select COL1, COL2 from t1").write.format("hive").saveAsTable("t2")
checkAnswer(spark.table("t2"), Row(0, 0))
// Test picking part of the columns when writing.
spark.range(1).select($"id", $"id" as "col1", $"id" as "col2").write.saveAsTable("t3")
spark.sql("select COL1, COL2 from t3").write.format("hive").saveAsTable("t4")
checkAnswer(spark.table("t4"), Row(0, 0))
}
}
test("SPARK-24812: desc formatted table for last access verification") {
withTable("t1") {
sql(
"CREATE TABLE IF NOT EXISTS t1 (c1_int INT, c2_string STRING, c3_float FLOAT)")
val desc = sql("DESC FORMATTED t1").filter($"col_name".startsWith("Last Access"))
.select("data_type")
// check if the last access time doesn't have the default date of year
// 1970 as its a wrong access time
assert((desc.first.toString.contains("UNKNOWN")))
}
}
test("SPARK-24681 checks if nested column names do not include ',', ':', and ';'") {
val expectedMsg = "Cannot create a table having a nested column whose name contains invalid " +
"characters (',', ':', ';') in Hive metastore."
Seq("nested,column", "nested:column", "nested;column").foreach { nestedColumnName =>
withTable("t") {
val e = intercept[AnalysisException] {
spark.range(1)
.select(struct(lit(0).as(nestedColumnName)).as("toplevel"))
.write
.format("hive")
.saveAsTable("t")
}.getMessage
assert(e.contains(expectedMsg))
}
}
}
test("desc formatted table should also show viewOriginalText for views") {
withView("v1", "v2") {
sql("CREATE VIEW v1 AS SELECT 1 AS value")
assert(sql("DESC FORMATTED v1").collect().containsSlice(
Seq(
Row("Type", "VIEW", ""),
Row("View Text", "SELECT 1 AS value", ""),
Row("View Original Text", "SELECT 1 AS value", "")
)
))
hiveClient.runSqlHive("CREATE VIEW v2 AS SELECT * FROM (SELECT 1) T")
assert(sql("DESC FORMATTED v2").collect().containsSlice(
Seq(
Row("Type", "VIEW", ""),
Row("View Text", "SELECT `t`.`_c0` FROM (SELECT 1) `T`", ""),
Row("View Original Text", "SELECT * FROM (SELECT 1) T", "")
)
))
}
}
test("Hive CTAS can't create partitioned table by specifying schema") {
val err1 = intercept[ParseException] {
spark.sql(
s"""
|CREATE TABLE t (a int)
|PARTITIONED BY (b string)
|STORED AS parquet
|AS SELECT 1 as a, "a" as b
""".stripMargin)
}.getMessage
assert(err1.contains("Schema may not be specified in a Create Table As Select " +
"(CTAS) statement"))
val err2 = intercept[ParseException] {
spark.sql(
s"""
|CREATE TABLE t
|PARTITIONED BY (b string)
|STORED AS parquet
|AS SELECT 1 as a, "a" as b
""".stripMargin)
}.getMessage
assert(err2.contains("Create Partitioned Table As Select cannot specify data type for " +
"the partition columns of the target table"))
}
test("Hive CTAS with dynamic partition") {
Seq("orc", "parquet").foreach { format =>
withTable("t") {
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
spark.sql(
s"""
|CREATE TABLE t
|PARTITIONED BY (b)
|STORED AS $format
|AS SELECT 1 as a, "a" as b
""".stripMargin)
checkAnswer(spark.table("t"), Row(1, "a"))
assert(spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
.partitionColumnNames === Seq("b"))
}
}
}
}
test("Create Table LIKE STORED AS Hive Format") {
val catalog = spark.sessionState.catalog
withTable("s") {
sql("CREATE TABLE s(a INT, b INT) STORED AS ORC")
hiveFormats.foreach { tableType =>
val expectedSerde = HiveSerDe.sourceToSerDe(tableType)
withTable("t") {
sql(s"CREATE TABLE t LIKE s STORED AS $tableType")
val table = catalog.getTableMetadata(TableIdentifier("t"))
assert(table.provider == Some("hive"))
assert(table.storage.serde == expectedSerde.get.serde)
assert(table.storage.inputFormat == expectedSerde.get.inputFormat)
assert(table.storage.outputFormat == expectedSerde.get.outputFormat)
}
}
}
}
test("Create Table LIKE with specified TBLPROPERTIES") {
val catalog = spark.sessionState.catalog
withTable("s", "t") {
sql("CREATE TABLE s(a INT, b INT) USING hive TBLPROPERTIES('a'='apple')")
val source = catalog.getTableMetadata(TableIdentifier("s"))
assert(source.properties("a") == "apple")
sql("CREATE TABLE t LIKE s STORED AS parquet TBLPROPERTIES('f'='foo', 'b'='bar')")
val table = catalog.getTableMetadata(TableIdentifier("t"))
assert(table.properties.get("a") === None)
assert(table.properties("f") == "foo")
assert(table.properties("b") == "bar")
}
}
test("Create Table LIKE with row format") {
val catalog = spark.sessionState.catalog
withTable("sourceHiveTable", "sourceDsTable", "targetHiveTable1", "targetHiveTable2") {
sql("CREATE TABLE sourceHiveTable(a INT, b INT) STORED AS PARQUET")
sql("CREATE TABLE sourceDsTable(a INT, b INT) USING PARQUET")
// row format doesn't work in create targetDsTable
var e = intercept[AnalysisException] {
spark.sql(
"""
|CREATE TABLE targetDsTable LIKE sourceHiveTable USING PARQUET
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
""".stripMargin)
}.getMessage
assert(e.contains("'ROW FORMAT' must be used with 'STORED AS'"))
// row format doesn't work with provider hive
e = intercept[AnalysisException] {
spark.sql(
"""
|CREATE TABLE targetHiveTable LIKE sourceHiveTable USING hive
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES ('test' = 'test')
""".stripMargin)
}.getMessage
assert(e.contains("'ROW FORMAT' must be used with 'STORED AS'"))
// row format doesn't work without 'STORED AS'
e = intercept[AnalysisException] {
spark.sql(
"""
|CREATE TABLE targetDsTable LIKE sourceDsTable
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES ('test' = 'test')
""".stripMargin)
}.getMessage
assert(e.contains("'ROW FORMAT' must be used with 'STORED AS'"))
// row format works with STORED AS hive format (from hive table)
spark.sql(
"""
|CREATE TABLE targetHiveTable1 LIKE sourceHiveTable STORED AS PARQUET
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES ('test' = 'test')
""".stripMargin)
var table = catalog.getTableMetadata(TableIdentifier("targetHiveTable1"))
assert(table.provider === Some("hive"))
assert(table.storage.inputFormat ===
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.serde === Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
assert(table.storage.properties("test") == "test")
// row format works with STORED AS hive format (from datasource table)
spark.sql(
"""
|CREATE TABLE targetHiveTable2 LIKE sourceDsTable STORED AS PARQUET
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES ('test' = 'test')
""".stripMargin)
table = catalog.getTableMetadata(TableIdentifier("targetHiveTable2"))
assert(table.provider === Some("hive"))
assert(table.storage.inputFormat ===
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.serde === Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
assert(table.storage.properties("test") == "test")
}
}
test("SPARK-30098: create table without provider should " +
"use default data source under non-legacy mode") {
val catalog = spark.sessionState.catalog
withSQLConf(
SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED.key -> "false") {
withTable("s") {
val defaultProvider = conf.defaultDataSourceName
sql("CREATE TABLE s(a INT, b INT)")
val table = catalog.getTableMetadata(TableIdentifier("s"))
assert(table.provider === Some(defaultProvider))
}
}
}
test("SPARK-30098: create table without provider should " +
"use hive under legacy mode") {
val catalog = spark.sessionState.catalog
withSQLConf(
SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED.key -> "true") {
withTable("s") {
sql("CREATE TABLE s(a INT, b INT)")
val table = catalog.getTableMetadata(TableIdentifier("s"))
assert(table.provider === Some("hive"))
}
}
}
test("SPARK-30785: create table like a partitioned table") {
val catalog = spark.sessionState.catalog
withTable("sc_part", "ta_part") {
sql("CREATE TABLE sc_part (key string, ts int) USING parquet PARTITIONED BY (ts)")
sql("CREATE TABLE ta_part like sc_part")
val sourceTable = catalog.getTableMetadata(TableIdentifier("sc_part", Some("default")))
val targetTable = catalog.getTableMetadata(TableIdentifier("ta_part", Some("default")))
assert(sourceTable.tracksPartitionsInCatalog)
assert(targetTable.tracksPartitionsInCatalog)
assert(targetTable.partitionColumnNames == Seq("ts"))
sql("ALTER TABLE ta_part ADD PARTITION (ts=10)") // no exception
checkAnswer(sql("SHOW PARTITIONS ta_part"), Row("ts=10") :: Nil)
}
}
}
|
zuotingbing/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
|
Scala
|
apache-2.0
| 109,723
|
object Hello{
def main(args: Array[String])
{
println("Hello World!")
}
}
|
Com-Mean/Scala_for_the_Impatient
|
chapter5/hello.scala
|
Scala
|
gpl-3.0
| 94
|
package com.eevolution.context.dictionary.domain.api.service
import com.eevolution.context.dictionary.api
import com.eevolution.context.dictionary.domain.model.MigrationStep
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 10/11/17.
*/
/**
* Migration Step Service
*/
trait MigrationStepService extends api.Service[MigrationStep, Int] {
//Definition
}
|
adempiere/ADReactiveSystem
|
dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/service/MigrationStepService.scala
|
Scala
|
gpl-3.0
| 1,233
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.models.maskrcnn
import com.intel.analytics.bigdl.dllib.models.resnet.Utils.{TestParams, _}
import com.intel.analytics.bigdl.dllib.feature.transform.vision.image._
import com.intel.analytics.bigdl.dllib.feature.transform.vision.image.augmentation._
import com.intel.analytics.bigdl.dllib.utils.{T, Table}
import com.intel.analytics.bigdl.dllib.utils.{Engine, OptimizerV1, OptimizerV2}
import scopt.OptionParser
import com.intel.analytics.bigdl.dllib.feature.dataset.{DataSet, MiniBatch, segmentation}
import com.intel.analytics.bigdl.dllib.nn.Module
import com.intel.analytics.bigdl.dllib.optim.MeanAveragePrecision
import org.apache.spark.{SparkContext, rdd}
object Test {
case class TestParams(
folder: String = "./",
model: String = "",
batchSize: Int = 2,
partitionNum: Int = -1
)
val testParser = new OptionParser[TestParams]("BigDL Mask-RCNN on COCO Test Example") {
opt[String]('f', "folder")
.text("the location of COCO dataset")
.action((x, c) => c.copy(folder = x))
opt[String]('m', "model")
.text("the location of model snapshot")
.action((x, c) => c.copy(model = x))
opt[Int]('b', "batchSize")
.text("total batch size")
.action((x, c) => c.copy(batchSize = x))
opt[Int]('p', "partitionNum")
.text("partition number")
.action((x, c) => c.copy(partitionNum = x))
}
def main(args: Array[String]): Unit = {
testParser.parse(args, TestParams()).foreach { param => {
val conf = Engine.createSparkConf().setAppName("Test MaskRCNN on COCO")
.set("spark.akka.frameSize", 64.toString)
.set("spark.task.maxFailures", "1")
val sc = new SparkContext(conf)
Engine.init
val partitionNum = if (param.partitionNum > 0) param.partitionNum
else Engine.nodeNumber() * Engine.coreNumber()
val rddData = DataSet.SeqFileFolder.filesToRoiImageFeatures(param.folder,
sc, Some(partitionNum))
.toDistributed().data(train = false)
val transformer = RoiImageFeatureToBatch.withResize(
sizeDivisible = 32,
batchSize = param.batchSize / Engine.nodeNumber(),
transformer =
PixelBytesToMat() ->
ScaleResize(minSize = 800, maxSize = 1333) ->
ChannelNormalize(122.7717f, 115.9465f, 102.9801f) ->
MatToTensor[Float](),
toRGB = false
)
val evaluationSet = transformer(rddData)
val model = Module.loadModule[Float](param.model)
val result = model.evaluate(evaluationSet,
Array(MeanAveragePrecision.cocoBBox(81), MeanAveragePrecision.cocoSegmentation(81)))
result.foreach(r => println(s"${r._2} is ${r._1}"))
sc.stop()
}}
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/models/maskrcnn/Test.scala
|
Scala
|
apache-2.0
| 3,361
|
class Test {
// Any
override def getClass(): Class[_] = ??? // error
override def ==(that: Any): Boolean = ??? // error
override def != (that: Any): Boolean = ??? // error
override def ##(): Int = ??? // error
override def isInstanceOf[T0]: Boolean = ??? // error
override def asInstanceOf[T0]: T0 = ??? // error
// AnyRef
override def eq(that: AnyRef): Boolean = ??? // error
override def ne(that: AnyRef): Boolean = ??? // error
override def notify(): Unit = ??? // error
override def notifyAll(): Unit = ??? // error
override def wait(): Unit = ??? // error
override def wait(timeout: Long, nanos: Int): Unit = ??? // error
override def wait(timeout: Long): Unit = ??? // error
}
|
som-snytt/dotty
|
tests/neg/i3442.scala
|
Scala
|
apache-2.0
| 714
|
package edu.colorado.plv.cuanto.jsy
package mutation {
/** @group Abstract Syntax Nodes */
case object Assign extends Bop
/** @group Abstract Syntax Nodes */
case object Null extends Val
/** @group Intermediate AST Nodes */
case object Deref extends Uop
/** Address.
*
* @group Intermediate AST Nodes
*/
case class A private (a: Int) extends Val
}
/**
* @author Bor-Yuh Evan Chang
*/
package object mutation
|
cuplv/cuanto
|
src/main/scala/edu/colorado/plv/cuanto/jsy/mutation/package.scala
|
Scala
|
apache-2.0
| 448
|
package scalaz.stream.async.mutable
import scalaz.stream.Cause._
import scalaz.concurrent.{Actor, Strategy, Task}
import scalaz.stream.Process.{Cont, Halt}
import scalaz.stream._
import scalaz.stream.async.immutable
import scalaz.{-\\/, \\/, \\/-}
/**
* Like a `Topic`, but allows to specify `Writer1` to eventually `write` the state `W` or produce `O`
* from arriving messages of `I` to this topic
*
*/
trait WriterTopic[W, I, O] {
/**
* Gets publisher to this writer topic. There may be multiple publishers to this writer topic.
*/
def publish: Sink[Task, I]
/**
* Gets subscriber from this writer topic. There may be multiple subscribers to this writer topic. Subscriber
* subscribes and un-subscribes when it is run or terminated.
*
* If writer topic has `W` written, it will be first value written by this topic, following any `B` or `W` produced after.
* @return
*/
def subscribe: Writer[Task, W, O]
/**
* Subscribes to `O` values from this writer topic only.
*/
def subscribeO: Process[Task, O]
/** Subscribes to `W` values only from this Writer topic */
def subscribeW: Process[Task, W]
/**
* Provides signal of `W` values as they were emitted by Writer1 of this Writer topic
*/
def signal: scalaz.stream.async.immutable.Signal[W]
/**
* Publishes single `I` to this writer topic.
*/
def publishOne(i: I): Task[Unit]
/**
* Will `close` this writer topic. Once `closed` all publishers and subscribers are halted via `End`.
*
* @return
*/
def close: Task[Unit] = failWithCause(End)
/**
* Kills the writer topic. All subscribers and publishers are immediately terminated with `Kill` cause.
* @return
*/
def kill: Task[Unit] = failWithCause(Kill)
/**
* Will `fail` this writer topic. All subscribers and publishers are immediately terminated with `Error` cause.
*
* The resulting task is completed _after_ all publishers and subscribers finished
*
*/
def fail(err: Throwable): Task[Unit] = failWithCause(Error(err))
private[stream] def failWithCause(c:Cause): Task[Unit]
}
private[stream] object WriterTopic {
def apply[W, I, O](writer: Writer1[W, I, O])(source: Process[Task, I], haltOnSource: Boolean)(implicit S: Strategy): WriterTopic[W, I, O] = {
import scalaz.stream.Util._
sealed trait M
case class Subscribe(sub: Subscription, cb: (Throwable \\/ Unit) => Unit) extends M
case class Ready(sub: Subscription, cb: (Throwable \\/ Seq[W \\/ O]) => Unit) extends M
case class UnSubscribe(sub: Subscription, cb: (Throwable \\/ Unit) => Unit) extends M
case class Upstream(result: Cause \\/ (Seq[I], Cont[Task,I])) extends M
case class Publish(is: Seq[I], cb: Throwable \\/ Unit => Unit) extends M
case class Fail(cause: Cause, cb: Throwable \\/ Unit => Unit) extends M
case class Get(cb:(Throwable \\/ Seq[W]) => Unit) extends M
class Subscription(var state: (Vector[W \\/ O]) \\/ ((Throwable \\/ Seq[W \\/ O]) => Unit)) {
def getOrAwait(cb: ((Throwable \\/ Seq[W \\/ O]) => Unit)): Unit = state match {
case -\\/(v) if v.isEmpty => state = \\/-(cb)
case -\\/(v) => state = -\\/(Vector.empty); S(cb(\\/-(v)))
case \\/-(_) => state = \\/-(cb) //impossible
}
def publish(wo: Seq[W \\/ O]): Unit = {
val ns = state
state match {
case -\\/(v) => state = -\\/(v fast_++ wo)
case \\/-(cb) => state = -\\/(Vector.empty); S(cb(\\/-(wo)))
}
}
def close(cause: Cause): Unit = state match {
case \\/-(cb) =>
state = -\\/(Vector.empty)
S(cb(-\\/(Terminated(cause))))
case _ => // no-op
}
def flushOrClose(cause: Cause, cb: (Throwable \\/ Seq[W \\/ O]) => Unit): Unit = state match {
case -\\/(v) if v.isEmpty => state = \\/-(cb); close(cause)
case -\\/(v) => getOrAwait(cb)
case \\/-(cb) => //impossible
}
}
///////////////////////
// subscriptions to W \\/ O
var subscriptions: Vector[Subscription] = Vector.empty
// call backs on single W that can't be completed as there is no `W` yet
var awaitingW: Vector[(Throwable \\/ Seq[W]) => Unit] = Vector.empty
//last memorized `W`
var lastW: Option[W] = None
//contains an reason of termination
var closed: Option[Cause] = None
// state of upstream. Upstream is running and left is interrupt, or upstream is stopped.
var upState: Option[Cause \\/ (EarlyCause => Unit)] = None
var w: Writer1[W, I, O] = writer
def fail(cause: Cause): Unit = {
closed = Some(cause)
upState.collect { case \\/-(interrupt) => interrupt(Kill)}
val (wos,next) = w.disconnect(cause.kill).unemit
w = next
if (wos.nonEmpty) subscriptions.foreach(_.publish(wos))
subscriptions.foreach(_.close(cause))
subscriptions = Vector.empty
awaitingW.foreach(cb=>S(cb(-\\/(cause.asThrowable))))
awaitingW = Vector.empty
}
def publish(is: Seq[I]) = {
process1.feed(is)(w).unemit match {
case (wos, next) =>
w = next
lastW = wos.collect({ case -\\/(w) => w }).lastOption orElse lastW
if (awaitingW.nonEmpty ) lastW.foreach { w =>
awaitingW.foreach(cb => S(cb(\\/-(Seq(w)))))
awaitingW = Vector.empty
}
if (wos.nonEmpty) subscriptions.foreach(_.publish(wos))
next match {
case hlt@Halt(rsn) => fail(rsn)
case _ => //no-op
}
}
}
var actor: Actor[M] = null
def getNext(p: Process[Task, I]) = {
upState= Some(\\/-(
p.stepAsync({ actor ! Upstream(_) })(S)
))
}
def startIfNotYet =
if (upState.isEmpty) {
publish(Nil) // causes un-emit of first `w`
getNext(source)
}
actor = Actor[M](m => {
closed.fold(m match {
case Subscribe(sub, cb) =>
subscriptions = subscriptions :+ sub
lastW.foreach(w => sub.publish(Seq(-\\/(w))))
S(cb(\\/-(())))
startIfNotYet
case UnSubscribe(sub, cb) =>
subscriptions = subscriptions.filterNot(_ == sub)
S(cb(\\/-(())))
case Ready(sub, cb) =>
sub.getOrAwait(cb)
case Get(cb) =>
startIfNotYet
lastW match {
case Some(w) => S(cb(\\/-(Seq(w))))
case None => awaitingW = awaitingW :+ cb
}
case Upstream(-\\/(rsn)) =>
if (haltOnSource || rsn != End) fail(rsn)
upState = Some(-\\/(rsn))
case Upstream(\\/-((is, cont))) =>
publish(is)
getNext(Util.Try(cont.continue))
case Publish(is, cb) =>
publish(is)
S(cb(\\/-(())))
case Fail(rsn, cb) =>
fail(rsn)
upState.collect { case \\/-(interrupt) => interrupt(Kill) }
S(cb(\\/-(())))
})(rsn => m match {
case Subscribe(_, cb) => S(cb(-\\/(rsn.asThrowable)))
case UnSubscribe(_, cb) => S(cb(\\/-(())))
case Ready(sub, cb) => sub.flushOrClose(rsn,cb)
case Get(cb) => S(cb(-\\/(rsn.asThrowable)))
case Publish(_, cb) => S(cb(-\\/(rsn.asThrowable)))
case Fail(_, cb) => S(cb(\\/-(())))
case Upstream(-\\/(_)) => //no-op
case Upstream(\\/-((_, cont))) => S((Halt(Kill) +: cont) stepAsync { _ => () })
})
})(S)
new WriterTopic[W, I, O] {
def publish: Sink[Task, I] = Process.constant(publishOne _)
def publishOne(i: I): Task[Unit] = Task.async { cb => actor ! Publish(Seq(i), cb) }
def failWithCause(cause: Cause): Task[Unit] = Task.async { cb => actor ! Fail(cause, cb) }
def subscribe: Writer[Task, W, O] = Process.suspend {
val subscription = new Subscription(-\\/(Vector.empty[W \\/ O]))
val register = Task.async[Unit] { cb => actor ! Subscribe(subscription, cb) }
val unRegister = Task.async[Unit] { cb => actor ! UnSubscribe(subscription, cb) }
val oneChunk = Task.async[Seq[W \\/ O]] { cb => actor ! Ready(subscription, cb) }
(Process.eval_(register) ++ Process.repeatEval(oneChunk).flatMap(Process.emitAll)).onHalt(_.asHalt)
.onComplete(Process.eval_(unRegister))
}
def subscribeO: Process[Task, O] = subscribe.collect { case \\/-(o) => o }
def subscribeW: Process[Task, W] = subscribe.collect { case -\\/(w) => w }
def signal: immutable.Signal[W] = new immutable.Signal[W] {
def changes: Process[Task, Unit] = discrete.map(_=>())
def continuous: Process[Task, W] =
Process.repeatEval(Task.async[Seq[W]](cb => actor ! Get(cb))).onHalt(_.asHalt)
.flatMap(Process.emitAll)
def discrete: Process[Task, W] = subscribeW
def changed: Process[Task, Boolean] =
discrete.map(_ => true)
.wye(Process.repeatEval(Task.now(false)))(wye.mergeHaltL)
}
}
}
}
|
shawjef3/scalaz-stream
|
src/main/scala/scalaz/stream/async/mutable/WriterTopic.scala
|
Scala
|
mit
| 9,023
|
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600a.v3
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtOptionalInteger}
import uk.gov.hmrc.ct.ct600.v3.calculations.LoansToParticipatorsCalculator
import uk.gov.hmrc.ct.ct600a.v3.retriever.CT600ABoxRetriever
case class A75(value: Option[Int]) extends CtBoxIdentifier(name = "A75 - Total of all loans outstanding at end of return period - including all loans outstanding at the end of the return period, whether they were made in this period or an earlier one")
with CtOptionalInteger
object A75 extends Calculated[A75, CT600ABoxRetriever] with LoansToParticipatorsCalculator {
override def calculate(fieldValueRetriever: CT600ABoxRetriever): A75 = {
calculateA75(fieldValueRetriever.retrieveA15(), fieldValueRetriever.retrieveLP04(),
fieldValueRetriever.retrieveA40(), fieldValueRetriever.retrieveA65())
}
}
|
keithhall/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600a/v3/A75.scala
|
Scala
|
apache-2.0
| 1,460
|
package uima.cpe
import java.io.{BufferedReader, File, InputStreamReader}
import modules.converter.QALabDataset2MultiLingualQACorpusConverter
import modules.ir.fulltext.indri.en.{EnglishIndriContentWordLevelIndexer, EnglishIndriTokenLevelIndexer}
import modules.ir.fulltext.indri.ja.{JapaneseIndriCharactereLevelIndexer, JapaneseIndriContentWordLevelIndexer}
import modules.text.vector.wordembedding.fastText.en.EnglishFastTextVectorGenerator
import modules.text.vector.wordembedding.fastText.ja.JapaneseFastTextVectorGenerator
import org.apache.uima.UIMAFramework
import org.apache.uima.cas.CAS
import org.apache.uima.collection._
import org.apache.uima.collection.metadata.CpeDescription
import org.apache.uima.util.XMLInputSource
import org.kohsuke.args4j.{CmdLineException, CmdLineParser}
import uima.fc._
import us.feliscat.text.StringOption
import util.Config
import scala.util.control.Breaks
/**
* @author K.Sakamoto
* Created on 15/10/30
*/
object CPERunner extends Thread {
private var cpeOption = Option.empty[CollectionProcessingEngine]
private var startTimeOption = Option.empty[Long]
private var initCompleteTimeOption = Option.empty[Long]
private def startPoint(code: StringOption): IntermediatePoint = {
IntermediatePoint.get(code, IntermediatePoint.QuestionReader)
}
private def endPoint(code: StringOption): IntermediatePoint = {
IntermediatePoint.get(code, IntermediatePoint.AnswerEvaluator)
}
private def preProcess(option: CPERunnerOption): Unit = {
if (option.getDoCharacterLevelIndriIndexInJapanese) {
Config.doCharacterLevelIndriIndexAsPreprocessInJapanese = true
}
if (option.getDoContentWordLevelIndriIndexInJapanese) {
Config.doContentWordLevelIndriIndexAsPreprocessInJapanese = true
}
if (option.getDoTokenLevelIndriIndexInEnglish) {
Config.doTokenLevelIndriIndexAsPreprocessInEnglish = true
}
if (option.getDoContentWordLevelIndriIndexInEnglish) {
Config.doContentWordLevelIndriIndexAsPreprocessInEnglish = true
}
if (option.getDoFastText) {
Config.doFastTestAsPreprocess = true
}
if (option.wantToOutputForQALabExtractionSubtask) {
Config.wantToOutputForQALabExtractionSubtask = true
}
if (option.wantToOutputForQALabSummarizationSubtask) {
Config.wantToOutputForQALabSummarizationSubtask = true
}
if (option.wantToOutputForQALabEvaluationMethodSubtask) {
Config.wantToOutputForQALabEvaluationMethodSubtask = true
}
if (Config.doCharacterLevelIndriIndexAsPreprocessInJapanese) {
JapaneseIndriCharactereLevelIndexer.run()
}
if (Config.doContentWordLevelIndriIndexAsPreprocessInJapanese) {
JapaneseIndriContentWordLevelIndexer.run()
}
if (Config.doTokenLevelIndriIndexAsPreprocessInEnglish) {
EnglishIndriTokenLevelIndexer.run()
}
if (Config.doContentWordLevelIndriIndexAsPreprocessInEnglish) {
EnglishIndriContentWordLevelIndexer.run()
}
if (Config.doFastTestAsPreprocess) {
EnglishFastTextVectorGenerator.main(Array.empty[String])
JapaneseFastTextVectorGenerator.main(Array.empty[String])
}
QALabDataset2MultiLingualQACorpusConverter.convert()
}
@throws[Exception]
def main(args: Array[String]): Unit = {
val option = new CPERunnerOption()
if (args.nonEmpty) {
val parser = new CmdLineParser(option)
try {
parser.parseArgument(args: _*)
} catch {
case e: CmdLineException =>
parser.printUsage(System.out)
e.printStackTrace()
System.exit(0)
}
}
preProcess(option)
startTimeOption = Option(System.nanoTime)
println(">> Collection Processing Engine Processing")
val startPointOpt = StringOption(option.getStartPoint)
val startPointValue: IntermediatePoint = startPoint(startPointOpt)
val endPointOpt = StringOption(option.getEndPoint)
val endPointValue: IntermediatePoint = endPoint(endPointOpt)
if (endPointValue.id < startPointValue.id) {
System.err.println("Error: The start point was after the end point. They must be in regular order.")
System.exit(1)
}
// Flow Controller
println(">> Flow Controller Initializing")
//val flowController = FlowController()
FlowController.clear()
def needFlow(intermediatePoint: IntermediatePoint): Boolean = {
startPointValue.id <= intermediatePoint.id && intermediatePoint.id <= endPointValue.id
}
val needQuestionAnalyzer: Boolean = needFlow(IntermediatePoint.QuestionAnalyzer)
val needInformationRetriever: Boolean = needFlow(IntermediatePoint.InformationRetriever)
val needAnswerGenerator: Boolean = needFlow(IntermediatePoint.AnswerGenerator)
val needAnswerWriter: Boolean = needFlow(IntermediatePoint.AnswerWriter)
val needAnswerEvaluator: Boolean = needFlow(IntermediatePoint.AnswerEvaluator)
//val questionAnalyzerFlowController = QuestionAnalyzerFlowController()
QuestionAnalyzerFlowController.clear()
//val informationRetrieverFlowController = InformationRetrieverFlowController()
InformationRetrieverFlowController.clear()
//val answerGeneratorFlowController = AnswerGeneratorFlowController()
AnswerGeneratorFlowController.clear()
if (needQuestionAnalyzer) {
println(s">> ${IntermediatePoint.QuestionAnalyzer.name} Flow Controller Initializing")
FlowController.setAnalysisEngine(IntermediatePoint.QuestionAnalyzer.descriptor.get)
QuestionAnalyzerFlowController.setAnalysisEngine(IntermediatePoint.QuestionAnalyzer.primitiveDescriptor.get)
}
if (needInformationRetriever) {
println(s">> ${IntermediatePoint.InformationRetriever.name} Flow Controller Initializing")
FlowController.setAnalysisEngine(IntermediatePoint.InformationRetriever.descriptor.get)
InformationRetrieverFlowController.setAnalysisEngine(IntermediatePoint.InformationRetriever.primitiveDescriptor.get)
if (Config.wantToOutputForQALabExtractionSubtask) {
InformationRetrieverFlowController.setAnalysisEngine("qalabExtractionSubtaskCCDescriptor")
}
}
if (needAnswerGenerator || needAnswerWriter || needAnswerEvaluator) {
FlowController.setAnalysisEngine(IntermediatePoint.AnswerGenerator.descriptor.get)
if (needFlow(IntermediatePoint.AnswerGenerator)) {
AnswerGeneratorFlowController.setAnalysisEngine(IntermediatePoint.AnswerGenerator.primitiveDescriptor.get)
}
if (Config.wantToOutputForQALabSummarizationSubtask) {
AnswerGeneratorFlowController.setAnalysisEngine("qalabSummarizationSubtaskCCDescriptor")
}
if (Config.wantToOutputForQALabEvaluationMethodSubtask) {
AnswerGeneratorFlowController.setAnalysisEngine("qalabEvaluationMethodSubtaskCCDescriptor")
}
if (needFlow(IntermediatePoint.AnswerWriter)) {
AnswerGeneratorFlowController.setAnalysisEngine(IntermediatePoint.AnswerWriter.descriptor.get)
}
if (needFlow(IntermediatePoint.AnswerEvaluator)) {
AnswerGeneratorFlowController.setAnalysisEngine(IntermediatePoint.AnswerEvaluator.descriptor.get)
}
}
val gzipXmiCasConsumerDescriptor: String = "gzipXmiCasConsumerDescriptor"
if (option.unSave != "all") {
val unSavedStates: Array[String] = option.unSave.split(',').map(_.trim.toLowerCase)
if (
FlowController.hasAnalysisEngine(IntermediatePoint.QuestionAnalyzer.descriptor.get) &&
(!(
unSavedStates.contains(IntermediatePoint.QuestionAnalyzer.code) ||
unSavedStates.contains(IntermediatePoint.QuestionAnalyzer.name.toLowerCase)
))
) {
val index: Int = QuestionAnalyzerFlowController.indexOf(IntermediatePoint.QuestionAnalyzer.primitiveDescriptor.get) + 1
QuestionAnalyzerFlowController.insert(index, gzipXmiCasConsumerDescriptor)
}
if (
FlowController.hasAnalysisEngine(IntermediatePoint.InformationRetriever.descriptor.get) &&
(!(
unSavedStates.contains(IntermediatePoint.InformationRetriever.code) ||
unSavedStates.contains(IntermediatePoint.InformationRetriever.name.toLowerCase)
))
) {
val index: Int = InformationRetrieverFlowController.indexOf(IntermediatePoint.InformationRetriever.primitiveDescriptor.get) + 1
InformationRetrieverFlowController.insert(index, gzipXmiCasConsumerDescriptor)
}
if (
FlowController.hasAnalysisEngine(IntermediatePoint.AnswerGenerator.descriptor.get) &&
(!(
unSavedStates.contains(IntermediatePoint.AnswerGenerator.code) ||
unSavedStates.contains(IntermediatePoint.AnswerGenerator.name.toLowerCase)
))
) {
val index: Int = AnswerGeneratorFlowController.indexOf(IntermediatePoint.AnswerGenerator.primitiveDescriptor.get) + 1
AnswerGeneratorFlowController.insert(index, gzipXmiCasConsumerDescriptor)
}
}
val useIntermediatePoint: Boolean = {
startPointValue match {
case IntermediatePoint.InformationRetriever |
IntermediatePoint.AnswerGenerator |
IntermediatePoint.AnswerWriter |
IntermediatePoint.AnswerEvaluator =>
true
case _ =>
false
}
}
val cpeDescriptor: String = raw"cpe${if (useIntermediatePoint) "FromIntermediatePoint" else ""}Descriptor.xml"
print(
s"""Collection Processing Engine:
|* $cpeDescriptor
|""".stripMargin)
FlowController.printAnalysisEngines()
QuestionAnalyzerFlowController.printAnalysisEngines()
InformationRetrieverFlowController.printAnalysisEngines()
AnswerGeneratorFlowController.printAnalysisEngines()
val filePath: String = {
new File(
raw"src/main/resources/desc/cpe/$cpeDescriptor"
).toPath.toAbsolutePath.toString
}
val xmlInputSource = new XMLInputSource(filePath)
val cpeDesc: CpeDescription = UIMAFramework.getXMLParser.parseCpeDescription(xmlInputSource)
cpeOption = Option(UIMAFramework.produceCollectionProcessingEngine(cpeDesc))
cpeOption match {
case Some(cpe) =>
/*
val casProcessors: Array[CasProcessor] = cpe.getCasProcessors
val aae: AnalysisEngine = casProcessors(0).asInstanceOf[AnalysisEngine]
val essayEvaluator: CasConsumer = casProcessors(1).asInstanceOf[CasConsumer]
*/
if (useIntermediatePoint) {
val collectionReader: CollectionReader = cpe.getCollectionReader.asInstanceOf[CollectionReader]
val code: String = startPointValue match {
case IntermediatePoint.InformationRetriever =>
IntermediatePoint.QuestionAnalyzer.code
case IntermediatePoint.AnswerGenerator =>
IntermediatePoint.InformationRetriever.code
case IntermediatePoint.AnswerWriter | IntermediatePoint.AnswerEvaluator =>
IntermediatePoint.AnswerGenerator.code
case _ =>
IntermediatePoint.QuestionAnalyzer.code
}
collectionReader.setConfigParameterValue(
"InputDirectory",
s"out/xmi/$code"
)
println(">> Collection Reader Reconfiguration Started")
collectionReader.reconfigure()
}
val statusCallbackListener = new StatusCallbackListenerImpl()
cpe.addStatusCallbackListener(statusCallbackListener)
cpe.process()
val loop = new Breaks()
loop.breakable {
Iterator.continually(Option(
new BufferedReader(
new InputStreamReader(
System.in)).readLine)) foreach {
line: Option[String] =>
if (line.isDefined && (line.get == "abort") && cpe.isProcessing) {
println("Aborting...")
cpe.stop()
loop.break()
}
}
}
case None =>
//Do nothing
}
}
class StatusCallbackListenerImpl extends StatusCallbackListener {
private var entityCount: Int = 0
private var size: Int = 0
override def entityProcessComplete(aCAS: CAS, aStatus: EntityProcessStatus): Unit = {
if (aStatus.isException) {
val exceptions: java.util.List[Exception] = aStatus.getExceptions
for (i <- 0 until exceptions.size) {
exceptions.get(i).asInstanceOf[Throwable].printStackTrace()
}
return
}
entityCount += 1
val docText: Option[String] = Option(aCAS.getDocumentText)
docText match {
case Some(dt) =>
size += dt.codePointCount(0, dt.length)
case None =>
//Do nothing
}
}
override def resumed(): Unit = {
println("Resumed")
}
override def initializationComplete(): Unit = {
println("Collection Processing Management Initialization Complete")
initCompleteTimeOption = Option(System.nanoTime)
}
override def paused(): Unit = {
println("Paused")
}
private def printNumberOfDocumentsAndCharacters(): Unit = {
printf("Completed %d documents", entityCount)
if (0 < size) {
printf("; %d characters", size)
}
println()
}
override def collectionProcessComplete(): Unit = {
val time = Option[Long](System.nanoTime)
printNumberOfDocumentsAndCharacters()
val initTime: Long = initCompleteTimeOption.getOrElse(0L) - startTimeOption.getOrElse(0L)
val processingTime: Long = time.getOrElse(0L) - initCompleteTimeOption.getOrElse(0L)
val elapsedTime: Long = initTime + processingTime
print(
s"""Total Time Elapsed: $elapsedTime nano seconds
|Initialization Time: $initTime nano seconds
|Processing Time: $processingTime nano seconds
|""".stripMargin)
cpeOption match {
case Some(cpe) =>
print(
s"""
|
| ------------------ PERFORMANCE REPORT ------------------
|
|${cpe.getPerformanceReport.toString}
|""".stripMargin)
case None =>
//Do nothing
}
// exit safely
System.exit(0)
}
override def batchProcessComplete(): Unit = {
printNumberOfDocumentsAndCharacters()
printf("Time Elapsed: %d nano seconds%n", System.nanoTime - startTimeOption.getOrElse(0L))
// exit safely
System.exit(0)
}
override def aborted(): Unit = {
println("Aborted")
System.exit(1)
}
}
}
|
ktr-skmt/FelisCatusZero-multilingual
|
src/main/scala/uima/cpe/CPERunner.scala
|
Scala
|
apache-2.0
| 14,602
|
package cz.senkadam.gatlingsql.actions
import akka.actor.{ActorRef, Props}
import cz.senkadam.gatlingsql.requests.SqlRequestBuilder
import io.gatling.core.action.builder.ActionBuilder
import io.gatling.core.config.Protocols
/**
* Created by senk on 5.1.15.
*/
class SqlActionBuilder(requestName: String, requestBuilder: SqlRequestBuilder) extends ActionBuilder {
/**
* Something todo with the action chain. Creates a new instance of our builder with a new
* next action point.
*/
def withNext(next: ActorRef) = new SqlActionBuilder(requestName, requestBuilder)
/**
* Contract new SQLActions and wires them up with the Actor stuff.
*/
def build(next: ActorRef, protocolConfigurationRegistry: Protocols) = {
system.actorOf(Props(new SqlAction(requestName, next, requestBuilder) with ConnectionReuse))
}
}
|
veraicon/gatlingsql
|
src/main/scala/cz/senkadam/gatlingsql/actions/SqlActionBuilder.scala
|
Scala
|
apache-2.0
| 840
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.typed
import java.io.Serializable
import com.twitter.algebird.{ Semigroup, Monoid, Ring, Aggregator }
import com.twitter.scalding.TupleConverter.{ singleConverter, tuple2Converter, CTupleConverter, TupleEntryConverter }
import com.twitter.scalding.TupleSetter.{ singleSetter, tup2Setter }
import com.twitter.scalding._
import cascading.flow.FlowDef
import cascading.pipe.Pipe
import cascading.tap.Tap
import cascading.tuple.{ Fields, Tuple => CTuple, TupleEntry }
import util.Random
import scala.concurrent.Future
/**
* factory methods for TypedPipe, which is the typed representation of distributed lists in scalding.
* This object is here rather than in the typed package because a lot of code was written using
* the functions in the object, which we do not see how to hide with package object tricks.
*/
object TypedPipe extends Serializable {
import Dsl.flowDefToRichFlowDef
def from[T](pipe: Pipe, fields: Fields)(implicit flowDef: FlowDef, mode: Mode, conv: TupleConverter[T]): TypedPipe[T] = {
val localFlow = flowDef.onlyUpstreamFrom(pipe)
new TypedPipeInst[T](pipe, fields, localFlow, mode, Converter(conv))
}
def from[T](source: TypedSource[T]): TypedPipe[T] =
TypedPipeFactory({ (fd, mode) =>
val pipe = source.read(fd, mode)
from(pipe, source.sourceFields)(fd, mode, source.converter)
})
// It might pay to use a view here, but you should experiment
def from[T](iter: Iterable[T]): TypedPipe[T] =
IterablePipe[T](iter)
/** Input must be a Pipe with exactly one Field */
def fromSingleField[T](pipe: Pipe)(implicit fd: FlowDef, mode: Mode): TypedPipe[T] =
from(pipe, new Fields(0))(fd, mode, singleConverter[T])
def empty: TypedPipe[Nothing] = EmptyTypedPipe
/*
* This enables pipe.hashJoin(that) or pipe.join(that) syntax
* This is a safe enrichment because hashJoinable and CoGroupable are
* only used in the argument position or to give cogroup, join, leftJoin, rightJoin, outerJoin
* methods. Since those methods are unlikely to be used on TypedPipe in the future, this
* enrichment seems safe.
*
* This method is the Vitaly-was-right method.
*/
implicit def toHashJoinable[K, V](pipe: TypedPipe[(K, V)])(implicit ord: Ordering[K]): HashJoinable[K, V] =
new HashJoinable[K, V] {
def mapped = pipe
def keyOrdering = ord
def reducers = None
def joinFunction = CoGroupable.castingJoinFunction[V]
}
}
/**
* Think of a TypedPipe as a distributed unordered list that may or may not yet
* have been materialized in memory or disk.
*
* Represents a phase in a distributed computation on an input data source
* Wraps a cascading Pipe object, and holds the transformation done up until that point
*/
trait TypedPipe[+T] extends Serializable {
// Implements a cross product. The right side should be tiny
def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)]
def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U]
/**
* Export back to a raw cascading Pipe. useful for interop with the scalding
* Fields API or with Cascading code.
*/
def toPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe
/////////////////////////////////////////////
//
// The following have default implementations in terms of the above
//
/////////////////////////////////////////////
import Dsl._
/**
* Merge two TypedPipes (no order is guaranteed)
* This is only realized when a group (or join) is
* performed.
*/
def ++[U >: T](other: TypedPipe[U]): TypedPipe[U] = other match {
case EmptyTypedPipe => this
case IterablePipe(thatIter) if thatIter.isEmpty => this
case _ => MergedTypedPipe(this, other)
}
/**
* Same as groupAll.aggregate.values
*/
def aggregate[B, C](agg: Aggregator[T, B, C]): ValuePipe[C] =
ComputedValue(groupAll.aggregate(agg).values)
/**
* Put the items in this into the keys, and unit as the value in a Group
* in some sense, this is the dual of groupAll
*/
@annotation.implicitNotFound(msg = "For asKeys method to work, the type in TypedPipe must have an Ordering.")
def asKeys[U >: T](implicit ord: Ordering[U]): Grouped[U, Unit] =
map((_, ())).group
/**
* If T <:< U, then this is safe to treat as TypedPipe[U] due to covariance
*/
protected def raiseTo[U](implicit ev: T <:< U): TypedPipe[U] =
this.asInstanceOf[TypedPipe[U]]
/**
* Filter and map. See scala.collection.List.collect.
* {@code
* collect { case Some(x) => fn(x) }
* }
*/
def collect[U](fn: PartialFunction[T, U]): TypedPipe[U] =
filter(fn.isDefinedAt(_)).map(fn)
/**
* Attach a ValuePipe to each element this TypedPipe
*/
def cross[V](p: ValuePipe[V]): TypedPipe[(T, V)] =
p match {
case EmptyValue => EmptyTypedPipe
case LiteralValue(v) => map { (_, v) }
case ComputedValue(pipe) => cross(pipe)
}
/** prints the current pipe to stdout */
def debug: TypedPipe[T] = onRawSingle(_.debug)
/**
* Returns the set of distinct elements in the TypedPipe
*/
@annotation.implicitNotFound(msg = "For distinct method to work, the type in TypedPipe must have an Ordering.")
def distinct(implicit ord: Ordering[_ >: T]): TypedPipe[T] =
asKeys(ord.asInstanceOf[Ordering[T]]).sum.keys
/**
* Returns the set of distinct elements identified by a given lambda extractor in the TypedPipe
*/
@annotation.implicitNotFound(msg = "For distinctBy method to work, the type to distinct on in the TypedPipe must have an Ordering.")
def distinctBy[U](fn: T => U, numReducers: Option[Int] = None)(implicit ord: Ordering[_ >: U]): TypedPipe[T] = {
// cast because Ordering is not contravariant, but should be (and this cast is safe)
implicit val ordT: Ordering[U] = ord.asInstanceOf[Ordering[U]]
// Semigroup to handle duplicates for a given key might have different values.
implicit val sg = new Semigroup[T] {
def plus(a: T, b: T) = b
}
val op = map{ tup => (fn(tup), tup) }.sumByKey
val reduced = numReducers match {
case Some(red) => op.withReducers(red)
case None => op
}
reduced.map(_._2)
}
/** Merge two TypedPipes of different types by using Either */
def either[R](that: TypedPipe[R]): TypedPipe[Either[T, R]] =
map(Left(_)) ++ (that.map(Right(_)))
/**
* Sometimes useful for implementing custom joins with groupBy + mapValueStream when you know
* that the value/key can fit in memory. Beware.
*/
def eitherValues[K, V, R](that: TypedPipe[(K, R)])(implicit ev: T <:< (K, V)): TypedPipe[(K, Either[V, R])] =
mapValues { (v: V) => Left(v) } ++ (that.mapValues { (r: R) => Right(r) })
/**
* If you are going to create two branches or forks,
* it may be more efficient to call this method first
* which will create a node in the cascading graph.
* Without this, both full branches of the fork will be
* put into separate cascading pipes, which can, in some cases,
* be slower.
*
* Ideally the planner would see this
*/
def fork: TypedPipe[T] = onRawSingle(identity)
/**
* limit the output to at most count items.
* useful for debugging, but probably that's about it.
* The number may be less than count, and not sampled particular method
*/
def limit(count: Int): TypedPipe[T] = onRawSingle(_.limit(count))
/** Transform each element via the function f */
def map[U](f: T => U): TypedPipe[U] = flatMap { t => Iterator(f(t)) }
/** Transform only the values (sometimes requires giving the types due to scala type inference) */
def mapValues[K, V, U](f: V => U)(implicit ev: T <:< (K, V)): TypedPipe[(K, U)] =
raiseTo[(K, V)].map { case (k, v) => (k, f(v)) }
/**
* Keep only items that satisfy this predicate
*/
def filter(f: T => Boolean): TypedPipe[T] =
flatMap { t => if (f(t)) Iterator(t) else Iterator.empty }
/**
* If T is a (K, V) for some V, then we can use this function to filter.
* This is here to match the function in KeyedListLike, where it is optimized
*/
def filterKeys[K](fn: K => Boolean)(implicit ev: T <:< (K, Any)): TypedPipe[T] =
filter { ka => fn(ka.asInstanceOf[(K, Any)]._1) }
/**
* Keep only items that don't satisfy the predicate.
* `filterNot` is the same as `filter` with a negated predicate.
*/
def filterNot(f: T => Boolean): TypedPipe[T] =
filter(!f(_))
/** flatten an Iterable */
def flatten[U](implicit ev: T <:< TraversableOnce[U]): TypedPipe[U] =
flatMap { _.asInstanceOf[TraversableOnce[U]] } // don't use ev which may not be serializable
/**
* flatten just the values
* This is more useful on KeyedListLike, but added here to reduce assymmetry in the APIs
*/
def flattenValues[K, U](implicit ev: T <:< (K, TraversableOnce[U])): TypedPipe[(K, U)] =
raiseTo[(K, TraversableOnce[U])].flatMap { case (k, us) => us.map((k, _)) }
protected def onRawSingle(onPipe: Pipe => Pipe): TypedPipe[T] = {
val self = this
TypedPipeFactory({ (fd, m) =>
val pipe = self.toPipe[T](new Fields(java.lang.Integer.valueOf(0)))(fd, m, singleSetter)
TypedPipe.fromSingleField[T](onPipe(pipe))(fd, m)
})
}
/**
* Force a materialization of this pipe prior to the next operation.
* This is useful if you filter almost everything before a hashJoin, for instance.
*/
def forceToDisk: TypedPipe[T] = onRawSingle(_.forceToDisk)
/**
* This is the default means of grouping all pairs with the same key. Generally this triggers 1 Map/Reduce transition
*/
def group[K, V](implicit ev: <:<[T, (K, V)], ord: Ordering[K]): Grouped[K, V] =
//If the type of T is not (K,V), then at compile time, this will fail. It uses implicits to do
//a compile time check that one type is equivalent to another. If T is not (K,V), we can't
//automatically group. We cast because it is safe to do so, and we need to convert to K,V, but
//the ev is not needed for the cast. In fact, you can do the cast with ev(t) and it will return
//it as (K,V), but the problem is, ev is not serializable. So we do the cast, which due to ev
//being present, will always pass.
Grouped(raiseTo[(K, V)])
/** Send all items to a single reducer */
def groupAll: Grouped[Unit, T] = groupBy(x => ()).withReducers(1)
/** Given a key function, add the key, then call .group */
def groupBy[K](g: T => K)(implicit ord: Ordering[K]): Grouped[K, T] =
map { t => (g(t), t) }.group
/**
* Forces a shuffle by randomly assigning each item into one
* of the partitions.
*
* This is for the case where you mappers take a long time, and
* it is faster to shuffle them to more reducers and then operate.
*
* You probably want shard if you are just forcing a shuffle.
*/
def groupRandomly(partitions: Int): Grouped[Int, T] = {
// Make it lazy so all mappers get their own:
lazy val rng = new java.util.Random(123) // seed this so it is repeatable
groupBy { _ => rng.nextInt(partitions) }
.withReducers(partitions)
}
private[this] def defaultSeed: Long = System.identityHashCode(this) * 2654435761L ^ System.currentTimeMillis
def sample(percent: Double): TypedPipe[T] = sample(percent, defaultSeed)
def sample(percent: Double, seed: Long): TypedPipe[T] = {
// Make sure to fix the seed, otherwise restarts cause subtle errors
val rand = new Random(seed)
filter(_ => rand.nextDouble < percent)
}
/**
* This does a sum of values WITHOUT triggering a shuffle.
* the contract is, if followed by a group.sum the result is the same
* with or without this present, and it never increases the number of
* items. BUT due to the cost of caching, it might not be faster if
* there is poor key locality.
*
* It is only useful for expert tuning,
* and best avoided unless you are struggling with performance problems.
* If you are not sure you need this, you probably don't.
*
* The main use case is to reduce the values down before a key expansion
* such as is often done in a data cube.
*/
def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]): TypedPipe[(K, V)] = {
val fields: Fields = ('key, 'value)
val selfKV = raiseTo[(K, V)]
TypedPipeFactory({ (fd, mode) =>
val pipe = selfKV.toPipe(fields)(fd, mode, tup2Setter)
val msr = new MapsideReduce(sg, 'key, 'value, None)(singleConverter[V], singleSetter[V])
TypedPipe.from[(K, V)](pipe.eachTo(fields -> fields) { _ => msr }, fields)(fd, mode, tuple2Converter)
})
}
/**
* Used to force a shuffle into a given size of nodes.
* Only use this if your mappers are taking far longer than
* the time to shuffle.
*/
def shard(partitions: Int): TypedPipe[T] =
groupRandomly(partitions).forceToReducers.values
/**
* Reasonably common shortcut for cases of associative/commutative reduction
* returns a typed pipe with only one element.
*/
def sum[U >: T](implicit plus: Semigroup[U]): ValuePipe[U] = ComputedValue(groupAll.sum[U].values)
/**
* Reasonably common shortcut for cases of associative/commutative reduction by Key
*/
def sumByKey[K, V](implicit ev: T <:< (K, V), ord: Ordering[K], plus: Semigroup[V]): UnsortedGrouped[K, V] =
group[K, V].sum[V]
/*
* This writes the current TypedPipe into a temporary file
* and then opens it after complete so that you can continue from that point
*/
def forceToDiskExecution: Execution[TypedPipe[T]] = Execution.fromFn { (conf, mode) =>
val flowDef = new FlowDef
mode match {
case _: CascadingLocal => // Local or Test mode
val dest = new MemorySink[T]
write(dest)(flowDef, mode)
// We can't read until the job finishes
(flowDef, { (js: JobStats) => Future.successful(TypedPipe.from(dest.readResults)) })
case _: HadoopMode =>
// come up with unique temporary filename, use the config here
// TODO: refactor into TemporarySequenceFile class
val tmpDir = conf.get("hadoop.tmp.dir")
.orElse(conf.get("cascading.tmp.dir"))
.getOrElse("/tmp")
val tmpSeq = tmpDir + "/scalding-repl/snapshot-" + java.util.UUID.randomUUID + ".seq"
val dest = source.TypedSequenceFile[T](tmpSeq)
write(dest)(flowDef, mode)
(flowDef, { (js: JobStats) => Future.successful(TypedPipe.from(dest)) })
}
}
def toIteratorExecution: Execution[Iterator[T]]
/** use a TupleUnpacker to flatten U out into a cascading Tuple */
def unpackToPipe[U >: T](fieldNames: Fields)(implicit fd: FlowDef, mode: Mode, up: TupleUnpacker[U]): Pipe = {
val setter = up.newSetter(fieldNames)
toPipe[U](fieldNames)(fd, mode, setter)
}
/**
* Safely write to a TypedSink[T]. If you want to write to a Source (not a Sink)
* you need to do something like: toPipe(fieldNames).write(dest)
* @return a pipe equivalent to the current pipe.
*/
def write(dest: TypedSink[T])(implicit flowDef: FlowDef, mode: Mode): TypedPipe[T] = {
// Make sure that we don't render the whole pipeline twice:
val res = fork
dest.writeFrom(res.toPipe[T](dest.sinkFields)(flowDef, mode, dest.setter))
res
}
/**
* This is the functionally pure approach to building jobs. Note,
* that you have to call run on the result for anything to happen here.
*/
def writeExecution(dest: TypedSink[T]): Execution[Unit] =
Execution.fromFn { (conf: Config, m: Mode) =>
val fd = new FlowDef
write(dest)(fd, m)
(fd, { (js: JobStats) => Future.successful(()) })
}
/**
* If you want to write to a specific location, and then read from
* that location going forward, use this.
*/
def writeThrough[U >: T](dest: TypedSink[T] with TypedSource[U]): Execution[TypedPipe[U]] =
writeExecution(dest)
.map(_ => TypedPipe.from(dest))
/** Just keep the keys, or ._1 (if this type is a Tuple2) */
def keys[K](implicit ev: <:<[T, (K, Any)]): TypedPipe[K] =
// avoid capturing ev in the closure:
raiseTo[(K, Any)].map(_._1)
/** swap the keys with the values */
def swap[K, V](implicit ev: <:<[T, (K, V)]): TypedPipe[(V, K)] =
raiseTo[(K, V)].map(_.swap)
/** Just keep the values, or ._2 (if this type is a Tuple2) */
def values[V](implicit ev: <:<[T, (Any, V)]): TypedPipe[V] =
raiseTo[(Any, V)].map(_._2)
/**
* ValuePipe may be empty, so, this attaches it as an Option
* cross is the same as leftCross(p).collect { case (t, Some(v)) => (t, v) }
*/
def leftCross[V](p: ValuePipe[V]): TypedPipe[(T, Option[V])] =
p match {
case EmptyValue => map { (_, None) }
case LiteralValue(v) => map { (_, Some(v)) }
case ComputedValue(pipe) => leftCross(pipe)
}
/** uses hashJoin but attaches None if thatPipe is empty */
def leftCross[V](thatPipe: TypedPipe[V]): TypedPipe[(T, Option[V])] =
map(((), _)).hashLeftJoin(thatPipe.groupAll).values
/** common pattern of attaching a value and then map */
def mapWithValue[U, V](value: ValuePipe[U])(f: (T, Option[U]) => V): TypedPipe[V] =
leftCross(value).map(t => f(t._1, t._2))
/** common pattern of attaching a value and then flatMap */
def flatMapWithValue[U, V](value: ValuePipe[U])(f: (T, Option[U]) => TraversableOnce[V]): TypedPipe[V] =
leftCross(value).flatMap(t => f(t._1, t._2))
/** common pattern of attaching a value and then filter */
def filterWithValue[U](value: ValuePipe[U])(f: (T, Option[U]) => Boolean): TypedPipe[T] =
leftCross(value).filter(t => f(t._1, t._2)).map(_._1)
/**
* These operations look like joins, but they do not force any communication
* of the current TypedPipe. They are mapping operations where this pipe is streamed
* through one item at a time.
*
* WARNING These behave semantically very differently than cogroup.
* This is because we handle (K,V) tuples on the left as we see them.
* The iterable on the right is over all elements with a matching key K, and it may be empty
* if there are no values for this key K.
*/
def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
smaller.hashCogroupOn(ev(this))(joiner)
/** Do an inner-join without shuffling this TypedPipe, but replicating argument to all tasks */
def hashJoin[K, V, W](smaller: HashJoinable[K, W])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, (V, W))] =
hashCogroup[K, V, W, (V, W)](smaller)(Joiner.hashInner2)
/** Do an leftjoin without shuffling this TypedPipe, but replicating argument to all tasks */
def hashLeftJoin[K, V, W](smaller: HashJoinable[K, W])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, (V, Option[W]))] =
hashCogroup[K, V, W, (V, Option[W])](smaller)(Joiner.hashLeft2)
/**
* For each element, do a map-side (hash) left join to look up a value
*/
def hashLookup[K >: T, V](grouped: HashJoinable[K, V]): TypedPipe[(K, Option[V])] =
map((_, ()))
.hashLeftJoin(grouped)
.map { case (t, (_, optV)) => (t, optV) }
/** Build a sketch of this TypedPipe so that you can do a skew-join with another Grouped */
def sketch[K, V](reducers: Int,
eps: Double = 1.0E-5, //272k width = 1MB per row
delta: Double = 0.01, //5 rows (= 5 hashes)
seed: Int = 12345)(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)],
serialization: K => Array[Byte],
ordering: Ordering[K]): Sketched[K, V] =
Sketched(ev(this), reducers, delta, eps, seed)
// If any errors happen below this line, but before a groupBy, write to a TypedSink
def addTrap[U >: T](trapSink: Source with TypedSink[T])(implicit conv: TupleConverter[U]): TypedPipe[U] =
TypedPipeFactory({ (flowDef, mode) =>
val fields = trapSink.sinkFields
// TODO: with diamonds in the graph, this might not be correct
val pipe = RichPipe.assignName(fork.toPipe[T](fields)(flowDef, mode, trapSink.setter))
flowDef.addTrap(pipe, trapSink.createTap(Write)(mode))
TypedPipe.from[U](pipe, fields)(flowDef, mode, conv)
})
}
final case object EmptyTypedPipe extends TypedPipe[Nothing] {
import Dsl._
override def aggregate[B, C](agg: Aggregator[Nothing, B, C]): ValuePipe[C] = EmptyValue
// Cross product with empty is always empty.
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(Nothing, U)] = this
override def distinct(implicit ord: Ordering[_ >: Nothing]) = this
override def flatMap[U](f: Nothing => TraversableOnce[U]) = this
override def fork: TypedPipe[Nothing] = this
override def forceToDisk = this
override def leftCross[V](p: ValuePipe[V]) = this
/**
* limit the output to at most count items.
* useful for debugging, but probably that's about it.
* The number may be less than count, and not sampled particular method
*/
override def limit(count: Int) = this
// prints the current pipe to either stdout or stderr
override def debug: TypedPipe[Nothing] = this
override def ++[U >: Nothing](other: TypedPipe[U]): TypedPipe[U] = other
override def toPipe[U >: Nothing](fieldNames: Fields)(implicit fd: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe =
IterableSource(Iterable.empty, fieldNames)(setter, singleConverter[U]).read(fd, mode)
def toIteratorExecution: Execution[Iterator[Nothing]] = Execution.from(Iterator.empty)
override def forceToDiskExecution: Execution[TypedPipe[Nothing]] = Execution.from(this)
override def sum[U >: Nothing](implicit plus: Semigroup[U]): ValuePipe[U] = EmptyValue
override def sumByLocalKeys[K, V](implicit ev: Nothing <:< (K, V), sg: Semigroup[V]) = this
override def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[Nothing] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
this
}
/**
* You should use a view here
* If you avoid toPipe, this class is more efficient than IterableSource.
*/
final case class IterablePipe[T](iterable: Iterable[T]) extends TypedPipe[T] {
override def aggregate[B, C](agg: Aggregator[T, B, C]): ValuePipe[C] =
Some(iterable)
.filterNot(_.isEmpty)
.map(it => LiteralValue(agg(it)))
.getOrElse(EmptyValue)
override def ++[U >: T](other: TypedPipe[U]): TypedPipe[U] = other match {
case IterablePipe(thatIter) => IterablePipe(iterable ++ thatIter)
case EmptyTypedPipe => this
case _ if iterable.isEmpty => other
case _ => MergedTypedPipe(this, other)
}
// Implements a cross product.
override def cross[U](tiny: TypedPipe[U]) =
tiny.flatMap { u => iterable.map { (_, u) } }
override def filter(f: T => Boolean): TypedPipe[T] =
IterablePipe(iterable.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]) =
IterablePipe(iterable.flatMap(f))
override def fork: TypedPipe[T] = this
override def forceToDisk = this
override def limit(count: Int): TypedPipe[T] = IterablePipe(iterable.take(count))
override def map[U](f: T => U): TypedPipe[U] = IterablePipe(iterable.map(f))
override def forceToDiskExecution: Execution[TypedPipe[T]] = Execution.from(this)
override def sum[U >: T](implicit plus: Semigroup[U]): ValuePipe[U] =
Semigroup.sumOption[U](iterable).map(LiteralValue(_))
.getOrElse(EmptyValue)
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]) =
// TODO This is pretty inefficient
IterablePipe(iterable.map(ev(_)).groupBy(_._1).mapValues(_.map(_._2).reduce(sg.plus(_, _))))
override def toPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe =
IterableSource[U](iterable, fieldNames)(setter, singleConverter[U]).read(flowDef, mode)
def toIteratorExecution: Execution[Iterator[T]] = Execution.from(iterable.iterator)
}
object TypedPipeFactory {
def apply[T](next: (FlowDef, Mode) => TypedPipe[T]): TypedPipeFactory[T] = {
val memo = new java.util.WeakHashMap[FlowDef, (Mode, TypedPipe[T])]()
val fn = { (fd: FlowDef, m: Mode) =>
memo.synchronized {
memo.get(fd) match {
case null =>
val res = next(fd, m)
memo.put(fd, (m, res))
res
case (memoMode, pipe) if memoMode == m => pipe
case (memoMode, pipe) =>
sys.error("FlowDef reused on different Mode. Original: %s, now: %s".format(memoMode, m))
}
}
}
new TypedPipeFactory(NoStackAndThen(fn.tupled))
}
def unapply[T](tp: TypedPipe[T]): Option[NoStackAndThen[(FlowDef, Mode), TypedPipe[T]]] =
tp match {
case tp: TypedPipeFactory[_] =>
Some(tp.asInstanceOf[TypedPipeFactory[T]].next)
case _ => None
}
}
/**
* This is a TypedPipe that delays having access
* to the FlowDef and Mode until toPipe is called
*/
class TypedPipeFactory[T] private (@transient val next: NoStackAndThen[(FlowDef, Mode), TypedPipe[T]]) extends TypedPipe[T] {
private[this] def andThen[U](fn: TypedPipe[T] => TypedPipe[U]): TypedPipe[U] =
new TypedPipeFactory(next.andThen(fn))
def cross[U](tiny: TypedPipe[U]) = andThen(_.cross(tiny))
override def filter(f: T => Boolean): TypedPipe[T] = andThen(_.filter(f))
def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] = andThen(_.flatMap(f))
override def map[U](f: T => U): TypedPipe[U] = andThen(_.map(f))
override def limit(count: Int) = andThen(_.limit(count))
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]) =
andThen(_.sumByLocalKeys[K, V])
def toPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) =
// unwrap in a loop, without recursing
unwrap(this).toPipe[U](fieldNames)(flowDef, mode, setter)
def toIteratorExecution: Execution[Iterator[T]] = Execution.factory { (conf, mode) =>
// This can only terminate in TypedPipeInst, which will
// keep the reference to this flowDef
val flowDef = new FlowDef
val nextPipe = unwrap(this)(flowDef, mode)
nextPipe.toIteratorExecution
}
@annotation.tailrec
private def unwrap(pipe: TypedPipe[T])(implicit flowDef: FlowDef, mode: Mode): TypedPipe[T] = pipe match {
case TypedPipeFactory(n) => unwrap(n(flowDef, mode))
case tp => tp
}
}
/**
* This is an instance of a TypedPipe that wraps a cascading Pipe
*/
class TypedPipeInst[T] private[scalding] (@transient inpipe: Pipe,
fields: Fields,
@transient localFlowDef: FlowDef,
@transient val mode: Mode,
flatMapFn: FlatMapFn[T]) extends TypedPipe[T] {
/**
* If this TypedPipeInst represents a Source that was opened with no
* filtering or mapping
*/
private[scalding] def openIfHead: Option[(Tap[_, _, _], Fields, FlatMapFn[T])] =
// Keep this local
if (inpipe.getPrevious.isEmpty) {
val srcs = localFlowDef.getSources
if (srcs.containsKey(inpipe.getName)) {
Some((srcs.get(inpipe.getName), fields, flatMapFn))
} else {
sys.error("Invalid head: pipe has no previous, but there is no registered source.")
}
} else None
def checkMode(m: Mode): Unit =
// This check is not likely to fail unless someone does something really strange.
// for historical reasons, it is not checked by the typed system
assert(m == mode,
"Cannot switch Mode between TypedSource.read and toPipe calls. Pipe: %s, call: %s".format(mode, m))
// Implements a cross product. The right side should be tiny (< 100MB)
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] = tiny match {
case EmptyTypedPipe => EmptyTypedPipe
case MergedTypedPipe(l, r) => MergedTypedPipe(cross(l), cross(r))
case IterablePipe(iter) => flatMap { t => iter.map { (t, _) } }
// This should work for any, TODO, should we just call this?
case _ => map(((), _)).hashJoin(tiny.groupAll).values
}
override def filter(f: T => Boolean): TypedPipe[T] =
new TypedPipeInst[T](inpipe, fields, localFlowDef, mode, flatMapFn.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
new TypedPipeInst[U](inpipe, fields, localFlowDef, mode, flatMapFn.flatMap(f))
override def map[U](f: T => U): TypedPipe[U] =
new TypedPipeInst[U](inpipe, fields, localFlowDef, mode, flatMapFn.map(f))
/**
* This actually runs all the pure map functions in one Cascading Each
* This approach is more efficient than untyped scalding because we
* don't use TupleConverters/Setters after each map.
*/
override def toPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, m: Mode, setter: TupleSetter[U]): Pipe = {
import Dsl.flowDefToRichFlowDef
checkMode(m)
flowDef.mergeFrom(localFlowDef)
RichPipe(inpipe).flatMapTo[TupleEntry, U](fields -> fieldNames)(flatMapFn)
}
def toIteratorExecution: Execution[Iterator[T]] = Execution.factory { (conf, m) =>
// To convert from java iterator to scala below
import scala.collection.JavaConverters._
checkMode(m)
openIfHead match {
// TODO: it might be good to apply flatMaps locally,
// since we obviously need to iterate all,
// but filters we might want the cluster to apply
// for us. So unwind until you hit the first filter, snapshot,
// then apply the unwound functions
case Some((tap, fields, Converter(conv))) =>
Execution.from(m.openForRead(tap).asScala.map(tup => conv(tup.selectEntry(fields))))
case _ => forceToDiskExecution.flatMap(_.toIteratorExecution)
}
}
}
final case class MergedTypedPipe[T](left: TypedPipe[T], right: TypedPipe[T]) extends TypedPipe[T] {
import Dsl._
// Implements a cross project. The right side should be tiny
def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] = tiny match {
case EmptyTypedPipe => EmptyTypedPipe
case _ => MergedTypedPipe(left.cross(tiny), right.cross(tiny))
}
// prints the current pipe to either stdout or stderr
override def debug: TypedPipe[T] =
MergedTypedPipe(left.debug, right.debug)
override def filter(f: T => Boolean): TypedPipe[T] =
MergedTypedPipe(left.filter(f), right.filter(f))
def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
MergedTypedPipe(left.flatMap(f), right.flatMap(f))
override def sample(percent: Double, seed: Long): TypedPipe[T] =
MergedTypedPipe(left.sample(percent, seed), right.sample(percent, seed))
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]): TypedPipe[(K, V)] =
MergedTypedPipe(left.sumByLocalKeys, right.sumByLocalKeys)
override def map[U](f: T => U): TypedPipe[U] =
MergedTypedPipe(left.map(f), right.map(f))
override def fork: TypedPipe[T] =
MergedTypedPipe(left.fork, right.fork)
/**
* This relies on the fact that two executions that are zipped will run in the
* same cascading flow, so we don't have to worry about it here.
*/
override def forceToDiskExecution =
left.forceToDiskExecution.zip(right.forceToDiskExecution)
.map { case (l, r) => l ++ r }
@annotation.tailrec
private def flattenMerge(toFlatten: List[TypedPipe[T]], acc: List[TypedPipe[T]])(implicit fd: FlowDef, m: Mode): List[TypedPipe[T]] =
toFlatten match {
case MergedTypedPipe(l, r) :: rest => flattenMerge(l :: r :: rest, acc)
case TypedPipeFactory(next) :: rest => flattenMerge(next(fd, m) :: rest, acc)
case nonmerge :: rest => flattenMerge(rest, nonmerge :: acc)
case Nil => acc
}
override def toPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe = {
/*
* Cascading can't handle duplicate pipes in merges. What we do here is see if any pipe appears
* multiple times and if it does we can do self merges using flatMap.
* Finally, if there is actually more than one distinct TypedPipe, we use the cascading
* merge primitive. When using the merge primitive we rename all pipes going into it as
* Cascading cannot handle multiple pipes with the same name.
*/
val merged = flattenMerge(List(this), Nil)
// check for repeated pipes
.groupBy(identity)
.mapValues(_.size)
.map {
case (pipe, 1) => pipe
case (pipe, cnt) => pipe.flatMap(List.fill(cnt)(_).iterator)
}
.map(_.toPipe[U](fieldNames)(flowDef, mode, setter))
.toList
if (merged.size == 1) {
// there is no actual merging here, no need to rename:
merged.head
} else {
new cascading.pipe.Merge(merged.map(RichPipe.assignName): _*)
}
}
/**
* This relies on the fact that two executions that are zipped will run in the
* same cascading flow, so we don't have to worry about it here.
*/
def toIteratorExecution: Execution[Iterator[T]] =
left.toIteratorExecution.zip(right.toIteratorExecution)
.map { case (l, r) => l ++ r }
override def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
MergedTypedPipe(left.hashCogroup(smaller)(joiner), right.hashCogroup(smaller)(joiner))
}
class MappablePipeJoinEnrichment[T](pipe: TypedPipe[T]) {
def joinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (T, U)] = pipe.groupBy(g).withReducers(reducers).join(smaller.groupBy(h))
def leftJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (T, Option[U])] = pipe.groupBy(g).withReducers(reducers).leftJoin(smaller.groupBy(h))
def rightJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (Option[T], U)] = pipe.groupBy(g).withReducers(reducers).rightJoin(smaller.groupBy(h))
def outerJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (Option[T], Option[U])] = pipe.groupBy(g).withReducers(reducers).outerJoin(smaller.groupBy(h))
}
object Syntax {
implicit def joinOnMappablePipe[T](p: TypedPipe[T]): MappablePipeJoinEnrichment[T] = new MappablePipeJoinEnrichment(p)
}
|
lucamilanesio/scalding
|
scalding-core/src/main/scala/com/twitter/scalding/typed/TypedPipe.scala
|
Scala
|
apache-2.0
| 34,866
|
package sylvestris.core
import cats.data._
import cats.implicits._
import catsclaw.implicits._
import spray.json._
object InMemoryGraph {
def apply() = new InMemoryGraph {}
}
trait InMemoryGraph extends Graph {
case class GNode(id: Id, tag: Tag, content: String)
object GNode {
def apply[T : NodeManifest](node: Node[T]): GNode = {
GNode(node.id, NodeManifest[T].tag, node.content.toJson(NodeManifest[T].jsonFormat).compactPrint)
}
}
// TODO
@SuppressWarnings(Array("org.brianmckenna.wartremover.warts.Var"))
var gnodes: Map[Id, GNode] = Map()
// TODO
@SuppressWarnings(Array("org.brianmckenna.wartremover.warts.Var"))
var gedges: Set[Edge] = Set.empty
def parseNode[T : NodeManifest](v: GNode): Error Xor Node[T] =
Xor.fromTryCatch(v.content.parseJson.convertTo[T](NodeManifest[T].jsonFormat))
.bimap(t => Error(s"unable to parse $v to Node", Some(t)), Node[T](v.id, _))
def nodes[T : NodeManifest](): XorT[GraphM, List[Error], Set[Node[T]]] = XorTGraphM {
gnodes
.collect {
case (_, gnode) if gnode.tag === NodeManifest[T].tag => parseNode(gnode).bimap(List(_), Set(_))
}
.toList
.combineAll
}
def getNode[T : NodeManifest](id: Id): XorT[GraphM, Error, Node[T]] = XorTGraphM {
gnodes
.values
.find(n => n.id === id && n.tag === NodeManifest[T].tag)
.toRightXor(Error(s"$id not found"))
.flatMap(parseNode[T])
}
def addNode[T : NodeManifest](node: Node[T]): XorT[GraphM, Error, Node[T]] = XorTGraphM {
gnodes += node.id -> GNode(node)
node.right
}
def updateNode[T : NodeManifest](node: Node[T]): XorT[GraphM, Error, Node[T]] = XorTGraphM {
gnodes
.get(node.id)
.map { n => gnodes += node.id -> GNode(node); node }
.toRightXor(Error("node not found"))
}
def removeNode[T : NodeManifest](id: Id): XorT[GraphM, Error, Node[T]] = XorTGraphM {
val tag = NodeManifest[T].tag
val node = gnodes.get(id)
gnodes -= id
gedges = gedges.filterNot(e => (e.idA === id && e.tagA === tag) || (e.idB === id && e.tagB === tag))
node
.toRightXor(Error("node not found"))
.flatMap(parseNode[T])
}
def getEdges(id: Id, tag: Tag): XorT[GraphM, Error, Set[Edge]] = XorTGraphM {
gedges.filter(e => e.idA === id && e.tagA === tag).right
}
def getEdges(label: Option[Label], idA: Id, tagA: Tag, tagB: Tag): XorT[GraphM, Error, Set[Edge]] =
XorTGraphM {
gedges.filter(e => e.idA === idA && e.tagA === tagA && e.tagB === tagB && e.label === label).right
}
def addEdges(edges: Set[Edge]): XorT[GraphM, Error, Set[Edge]] = XorTGraphM {
gedges ++= edges
edges.right
}
def removeEdges(edges: Set[Edge]): XorT[GraphM, Error, Set[Edge]] = XorTGraphM {
gedges = gedges -- edges
edges.right
}
def removeEdges(idA: Id, tagA: Tag, tagB: Tag): XorT[GraphM, Error, Set[Edge]] = XorTGraphM {
val removedGedges = gedges.filter(e => e.idA === idA && e.tagA === tagA && e.tagB === tagB)
gedges --= removedGedges
removedGedges.right
}
}
|
janrain/sylvestris
|
core/src/main/scala/sylvestris/core/InMemoryGraph.scala
|
Scala
|
mit
| 3,067
|
package uk.co.morleydev.zander.client.test.unit.data.map
import java.io.File
import uk.co.morleydev.zander.client.data.map.GetCachedArtefactsLocation
import uk.co.morleydev.zander.client.test.gen.GenModel
import uk.co.morleydev.zander.client.test.unit.UnitTest
class GetCacheArtefactsLocationTests extends UnitTest {
describe("Given a cache root") {
val cachePathFile = new File("some/cache/path")
val getCacheLocationFromCache = new GetCachedArtefactsLocation(cachePathFile)
describe("When getting the cache location for artefacts") {
val project = GenModel.arg.genProject()
val compiler = GenModel.arg.genCompiler()
val mode = GenModel.arg.genBuildMode()
val branch = GenModel.arg.genBranch()
val actual = getCacheLocationFromCache(project, compiler, mode, branch)
it("Then the expected cache location is returned") {
assert(actual == new File(cachePathFile, "%s/bin/%s/%s.%s".format(project,branch,compiler,mode)))
}
}
}
}
|
MorleyDev/zander.client
|
src/test/scala/uk/co/morleydev/zander/client/test/unit/data/map/GetCacheArtefactsLocationTests.scala
|
Scala
|
mit
| 1,001
|
package scala.macros.internal
package prettyprinters
// NOTE: This class is an evolution of Show from 1.x
// and is heavily inspired by ShowBuilder from scala-native/scala-native.
final class Prettyprinter {
private val buf = new java.lang.StringBuilder
def raw(value: String): Prettyprinter = {
buf.append(value)
this
}
def stx[T: Syntax](value: T): Prettyprinter = {
implicitly[Syntax[T]].render(this, value)
this
}
def str[T: Structure](value: T): Prettyprinter = {
implicitly[Structure[T]].render(this, value)
this
}
def rep[T](xs: List[T], sep: String = "")(fn: T => Unit): Prettyprinter = {
if (xs.nonEmpty) {
xs.init.foreach { x =>
fn(x)
raw(sep)
}
fn(xs.last)
}
this
}
override def toString: String = buf.toString
}
|
xeno-by/scalamacros
|
core/src/main/scala/scala/macros/internal/prettyprinters/Prettyprinter.scala
|
Scala
|
bsd-3-clause
| 821
|
// Copyright (c) 2014 David Miguel Antunes <davidmiguel {at} antunes.net>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.github.david04.liftutils.entity
import com.github.david04.liftutils.elem.DefaultHTMLEditor
import com.github.david04.liftutils.datatables.Col
trait StdEntityBase {
def id: Long
def entityName: String
def singular: String
def plural: String
def create: StdEntityBase
}
trait StdEntity[T <: StdEntity[T]] extends StdEntityBase {
self: T =>
def save(): T
def delete(): Unit
def elems(implicit editor: DefaultHTMLEditor): List[com.github.david04.liftutils.elem.HTMLEditableElem]
def columns: List[Col[T]]
def create: T
}
|
david04/liftutils
|
src/main/scala/com/github/david04/liftutils/entity/Entity.scala
|
Scala
|
mit
| 1,729
|
package sri.universal
import scala.scalajs.js
import scala.scalajs.js.annotation.JSGlobalScope
@js.native
@JSGlobalScope
object AssetLoader extends js.Object {
def require[T](name: String): T = js.native
}
//http://www.cakesolutions.net/teamblogs/default-type-parameters-with-implicits-in-scala
trait DefaultsTo[Type, Default]
object DefaultsTo {
implicit def defaultDefaultsTo[T]: DefaultsTo[T, T] = null
implicit def fallback[T, D]: DefaultsTo[T, D] = null
}
|
scalajs-react-interface/universal
|
src/main/scala/sri/universal/AssetLoader.scala
|
Scala
|
apache-2.0
| 473
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.crud.rest.utils
import akka.http.scaladsl.model.MediaTypes._
import akka.http.scaladsl.model.headers.{ContentDispositionTypes, `Content-Disposition`}
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpResponse, MediaTypes}
import com.bwsw.common.JsonSerializer
import com.bwsw.sj.common.rest.RestResponse
import com.bwsw.sj.crud.rest.{CustomFile, CustomJar, ModuleJar}
/**
* Provides methods for completion of sj-api response
*/
trait CompletionUtils {
private val responseSerializer = new JsonSerializer()
def restResponseToHttpResponse(restResponse: RestResponse): HttpResponse = {
restResponse match {
case customJar: CustomJar =>
HttpResponse(
headers = List(`Content-Disposition`(ContentDispositionTypes.attachment, Map("filename" -> customJar.filename))),
entity = HttpEntity.Chunked.fromData(MediaTypes.`application/java-archive`, customJar.source)
)
case customFile: CustomFile =>
HttpResponse(
headers = List(`Content-Disposition`(ContentDispositionTypes.attachment, Map("filename" -> customFile.filename))),
entity = HttpEntity.Chunked.fromData(ContentTypes.`application/octet-stream`, customFile.source)
)
case moduleJar: ModuleJar =>
HttpResponse(
headers = List(`Content-Disposition`(ContentDispositionTypes.attachment, Map("filename" -> moduleJar.filename))),
entity = HttpEntity.Chunked.fromData(MediaTypes.`application/java-archive`, moduleJar.source)
)
case _ =>
HttpResponse(
status = restResponse.statusCode,
entity = HttpEntity(`application/json`, responseSerializer.serialize(restResponse))
)
}
}
}
|
bwsw/sj-platform
|
core/sj-crud-rest/src/main/scala/com/bwsw/sj/crud/rest/utils/CompletionUtils.scala
|
Scala
|
apache-2.0
| 2,552
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection
import scala.reflect.ClassTag
/**
* A fast hash map implementation for nullable keys. This hash map supports insertions and updates,
* but not deletions. This map is about 5X faster than java.util.HashMap, while using much less
* space overhead.
*
* Under the hood, it uses our OpenHashSet implementation.
*
* NOTE: when using numeric type as the value type, the user of this class should be careful to
* distinguish between the 0/0.0/0L and non-exist value
*/
private[spark]
class OpenHashMap[K : ClassTag, @specialized(Long, Int, Double) V: ClassTag](
initialCapacity: Int)
extends Iterable[(K, V)]
with Serializable {
def this() = this(64)
protected var _keySet = new OpenHashSet[K](initialCapacity)
// Init in constructor (instead of in declaration) to work around a Scala compiler specialization
// bug that would generate two arrays (one for Object and one for specialized T).
private var _values: Array[V] = _
_values = new Array[V](_keySet.capacity)
@transient private var _oldValues: Array[V] = null
// Treat the null key differently so we can use nulls in "data" to represent empty items.
private var haveNullValue = false
private var nullValue: V = null.asInstanceOf[V]
override def size: Int = if (haveNullValue) _keySet.size + 1 else _keySet.size
/** Tests whether this map contains a binding for a key. */
def contains(k: K): Boolean = {
if (k == null) {
haveNullValue
} else {
_keySet.getPos(k) != OpenHashSet.INVALID_POS
}
}
/** Get the value for a given key */
def apply(k: K): V = {
if (k == null) {
nullValue
} else {
val pos = _keySet.getPos(k)
if (pos < 0) {
null.asInstanceOf[V]
} else {
_values(pos)
}
}
}
/** Set the value for a key */
def update(k: K, v: V): Unit = {
if (k == null) {
haveNullValue = true
nullValue = v
} else {
val pos = _keySet.addWithoutResize(k) & OpenHashSet.POSITION_MASK
_values(pos) = v
_keySet.rehashIfNeeded(k, grow, move)
_oldValues = null
}
}
/**
* If the key doesn't exist yet in the hash map, set its value to defaultValue; otherwise,
* set its value to mergeValue(oldValue).
*
* @return the newly updated value.
*/
def changeValue(k: K, defaultValue: => V, mergeValue: (V) => V): V = {
if (k == null) {
if (haveNullValue) {
nullValue = mergeValue(nullValue)
} else {
haveNullValue = true
nullValue = defaultValue
}
nullValue
} else {
val pos = _keySet.addWithoutResize(k)
if ((pos & OpenHashSet.NONEXISTENCE_MASK) != 0) {
val newValue = defaultValue
_values(pos & OpenHashSet.POSITION_MASK) = newValue
_keySet.rehashIfNeeded(k, grow, move)
newValue
} else {
_values(pos) = mergeValue(_values(pos))
_values(pos)
}
}
}
override def iterator: Iterator[(K, V)] = new Iterator[(K, V)] {
var pos = -1
var nextPair: (K, V) = computeNextPair()
/** Get the next value we should return from next(), or null if we're finished iterating */
def computeNextPair(): (K, V) = {
if (pos == -1) { // Treat position -1 as looking at the null value
if (haveNullValue) {
pos += 1
return (null.asInstanceOf[K], nullValue)
}
pos += 1
}
pos = _keySet.nextPos(pos)
if (pos >= 0) {
val ret = (_keySet.getValue(pos), _values(pos))
pos += 1
ret
} else {
null
}
}
def hasNext: Boolean = nextPair != null
def next(): (K, V) = {
val pair = nextPair
nextPair = computeNextPair()
pair
}
}
// The following member variables are declared as protected instead of private for the
// specialization to work (specialized class extends the non-specialized one and needs access
// to the "private" variables).
// They also should have been val's. We use var's because there is a Scala compiler bug that
// would throw illegal access error at runtime if they are declared as val's.
protected var grow = (newCapacity: Int) => {
_oldValues = _values
_values = new Array[V](newCapacity)
}
protected var move = (oldPos: Int, newPos: Int) => {
_values(newPos) = _oldValues(oldPos)
}
}
|
matthewfranglen/spark
|
core/src/main/scala/org/apache/spark/util/collection/OpenHashMap.scala
|
Scala
|
mit
| 5,195
|
package com.softwaremill.guardedblocks.testing
class AkkaHttpCasesSpec extends GuardedBlocksPluginTestSuite("/akkaHttp/routes")
|
mkubala/scalac-guardedblocks-plugin
|
tests/akkaHttp/src/test/scala/com/softwaremill/guardedblocks/testing/AkkaHttpCasesSpec.scala
|
Scala
|
gpl-3.0
| 129
|
package direct.traffic.archivespark.specific.warc.commoncrawl
import java.net.URI
import direct.traffic.archivespark.dataspecs.MultiTextDataLoader
import direct.traffic.archivespark.specific.warc.spec.JCdxSpec
import direct.traffic.archivespark.dataspecs.access.ListingFileMap
import org.apache.spark.deploy.SparkHadoopUtil
/**
* Created by boris on 29.09.17.
*/
class CommonCrawlSpec(hdfsHost:URI,cdxPathsFile : String, warcPathsFile: String) extends JCdxSpec[CommonCrawlJson](JCdxCommonCrawl.fromString(_),ListingFileMap(hdfsHost,warcPathsFile)) with MultiTextDataLoader {
override def dataPath = ListingFileMap(hdfsHost,cdxPathsFile).toSeq.map(x => "/" + x)
}
object CommonCrawlsSpec {
def apply(cdxPathsFile: String, warcPathsFile: String, hdfsHost:URI = new URI(SparkHadoopUtil.get.conf.get("fs.defaultFS"))) = new CommonCrawlSpec(hdfsHost,cdxPathsFile, warcPathsFile)
}
|
trafficdirect/ArchiveSparkJCDX
|
src/main/scala/direct/traffic/archivespark/specific/warc/commoncrawl/CommonCrawlSpec.scala
|
Scala
|
mit
| 888
|
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.express.flow
import org.kiji.annotations.ApiAudience
import org.kiji.annotations.ApiStability
import org.kiji.annotations.Inheritance
/**
* This exception is thrown when a KijiTap cannot validate that the Kiji tables or columns it
* requires exist.
*
* @param message The exception message.
* @param cause An optional parameter for the causing exception.
*/
@ApiAudience.Public
@ApiStability.Stable
final class InvalidKijiTapException(message: String = null, cause: Throwable = null)
extends RuntimeException(message, cause)
|
kijiproject/kiji-express
|
kiji-express/src/main/scala/org/kiji/express/flow/InvalidKijiTapException.scala
|
Scala
|
apache-2.0
| 1,272
|
package org.scalaide.core.internal.launching
import org.eclipse.core.expressions.PropertyTester
import org.eclipse.core.runtime.CoreException
import org.eclipse.core.runtime.IAdaptable
import org.eclipse.jdt.core.IJavaElement
import org.eclipse.jdt.core.JavaModelException
import org.scalaide.util.eclipse.EclipseUtils.RichAdaptable
import org.scalaide.core.internal.jdt.model.ScalaSourceFile
class ScalaLaunchableTester extends PropertyTester {
/**
* name for the "has main" property
*/
private val PROPERTY_HAS_MAIN = "hasMain" //$NON-NLS-1$
/**
* name for the "can launch as junit" property
*/
private val PROPERTY_CAN_LAUNCH_AS_JUNIT = "canLaunchAsJUnit"; //$NON-NLS-1$
/**
* Determines if the Scala element contains main method(s).
*
* @param element the element to check for the method
* @return true if a method is found in the element, false otherwise
*/
private def hasMain(element: IJavaElement): Boolean = {
try {
ScalaLaunchShortcut.getMainMethods(element).nonEmpty
} catch {
case _: JavaModelException => false
case _: CoreException => false
}
}
/**
* Determines if the Scala element is in a source that contains one (or more) runnable JUnit test class.
*
* @param element the element to check for the method
* @return true if one or more JUnit test classes are found in the element, false otherwise
*/
private def canLaunchAsJUnit(element: IJavaElement): Boolean = {
try {
element match {
case _: ScalaSourceFile =>
ScalaLaunchShortcut.getJunitTestClasses(element).nonEmpty
case _ => true
}
} catch {
case _: JavaModelException => false
case _: CoreException => false
}
}
/**
* Method runs the tests defined from extension points for Run As... and Debug As... menu items.
* Currently this test optimistically considers everything not a source file. In this context we
* consider an optimistic approach to mean that the test will always return true.
*
* There are many reasons for the optimistic choice some of them are outlined below.
* <ul>
* <li>Performance (in terms of time needed to display menu) cannot be preserved. To know what to allow
* in any one of the menus we would have to search all of the children of the container to determine what it contains
* and what can be launched by what.</li>
* <li>If inspection of children of containers were done, a user might want to choose a different launch type, even though our tests
* filter it out.</li>
* </ul>
* @see org.eclipse.core.expressions.IPropertyTester#test(java.lang.Object, java.lang.String, java.lang.Object[], java.lang.Object)
* @since 3.2
* @return true if the specified tests pass, false otherwise
*/
def test(receiver: Object, property: String, args: Array[Object], expectedValue: Object): Boolean = {
var element: IJavaElement = null
if (receiver.isInstanceOf[IAdaptable]) {
element = receiver.asInstanceOf[IAdaptable].adaptTo[IJavaElement]
if (element == null || !element.exists()) {
return false
}
}
property match {
case PROPERTY_HAS_MAIN => hasMain(element)
case PROPERTY_CAN_LAUNCH_AS_JUNIT => canLaunchAsJUnit(element)
case _ => false
}
}
}
|
scala-ide/scala-ide
|
org.scala-ide.sdt.core/src/org/scalaide/core/internal/launching/ScalaLaunchableTester.scala
|
Scala
|
bsd-3-clause
| 3,316
|
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2016, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib.util
import org.junit.Test
import org.junit.Assert._
import java.util.EventObject
class EventObjectTest {
@Test def getSource(): Unit = {
val src = new AnyRef
val e = new EventObject(src)
assertSame(src, e.getSource)
}
/* #2532 This does not link, because we cannot declare a Java field
@Test def sourceField(): Unit = {
class E(s: AnyRef) extends EventObject(s) {
def setSource(s: AnyRef): Unit = source = s
def otherGetSource: AnyRef = source
}
val src1 = new AnyRef
val e = new E(src1)
assertSame(src1, e.otherGetSource)
val src2 = new AnyRef
e.setSource(src2)
assertSame(src2, e.otherGetSource)
assertSame(src2, e.getSource)
}
*/
@Test def testToString(): Unit = {
/* There is not much we can test about toString, but it should not be the
* default inherited from Object.
*/
val e = new EventObject(new AnyRef)
assertNotNull(e.toString())
val default = classOf[EventObject].getName + "@" + Integer.toHexString(e.##)
assertNotEquals(default, e.toString())
}
}
|
lrytz/scala-js
|
test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/util/EventObjectTest.scala
|
Scala
|
bsd-3-clause
| 1,639
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.io.IOException
import org.apache.kafka.clients.{ClientRequest, ClientResponse, NetworkClient}
import org.apache.kafka.common.Node
import org.apache.kafka.common.requests.AbstractRequest
import org.apache.kafka.common.utils.Time
import scala.annotation.tailrec
import scala.collection.JavaConverters._
object NetworkClientBlockingOps {
implicit def networkClientBlockingOps(client: NetworkClient): NetworkClientBlockingOps =
new NetworkClientBlockingOps(client)
}
/**
* Provides extension methods for `NetworkClient` that are useful for implementing blocking behaviour. Use with care.
*
* Example usage:
*
* {{{
* val networkClient: NetworkClient = ...
* import NetworkClientBlockingOps._
* networkClient.blockingReady(...)
* }}}
*/
class NetworkClientBlockingOps(val client: NetworkClient) extends AnyVal {
/**
* Checks whether the node is currently connected, first calling `client.poll` to ensure that any pending
* disconnects have been processed.
*
* This method can be used to check the status of a connection prior to calling `blockingReady` to be able
* to tell whether the latter completed a new connection.
*/
def isReady(node: Node)(implicit time: Time): Boolean = {
val currentTime = time.milliseconds()
client.poll(0, currentTime)
client.isReady(node, currentTime)
}
/**
* Invokes `client.poll` to discard pending disconnects, followed by `client.ready` and 0 or more `client.poll`
* invocations until the connection to `node` is ready, the timeout expires or the connection fails.
*
* It returns `true` if the call completes normally or `false` if the timeout expires. If the connection fails,
* an `IOException` is thrown instead. Note that if the `NetworkClient` has been configured with a positive
* connection timeout, it is possible for this method to raise an `IOException` for a previous connection which
* has recently disconnected.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
def blockingReady(node: Node, timeout: Long)(implicit time: Time): Boolean = {
require(timeout >=0, "timeout should be >= 0")
val startTime = time.milliseconds()
val expiryTime = startTime + timeout
@tailrec
def awaitReady(iterationStartTime: Long): Boolean = {
if (client.isReady(node, iterationStartTime))
true
else if (client.connectionFailed(node))
throw new IOException(s"Connection to $node failed")
else {
val pollTimeout = expiryTime - iterationStartTime
client.poll(pollTimeout, iterationStartTime)
val afterPollTime = time.milliseconds()
if (afterPollTime < expiryTime) awaitReady(afterPollTime)
else false
}
}
isReady(node) || client.ready(node, startTime) || awaitReady(startTime)
}
/**
* Invokes `client.send` followed by 1 or more `client.poll` invocations until a response is received or a
* disconnection happens (which can happen for a number of reasons including a request timeout).
*
* In case of a disconnection, an `IOException` is thrown.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
def blockingSendAndReceive(request: ClientRequest, body: AbstractRequest)(implicit time: Time): ClientResponse = {
client.send(request, time.milliseconds())
pollContinuously { responses =>
val response = responses.find { response =>
response.requestHeader.correlationId == request.header.correlationId
}
response.foreach { r =>
if (r.wasDisconnected)
throw new IOException(s"Connection to ${request.destination} was disconnected before the response was read")
}
response
}
}
/**
* Invokes `client.poll` until `collect` returns `Some`. The value inside `Some` is returned.
*
* Exceptions thrown via `collect` are not handled and will bubble up.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
private def pollContinuously[T](collect: Seq[ClientResponse] => Option[T])(implicit time: Time): T = {
@tailrec
def recursivePoll: T = {
// rely on request timeout to ensure we don't block forever
val responses = client.poll(Long.MaxValue, time.milliseconds()).asScala
collect(responses) match {
case Some(result) => result
case None => recursivePoll
}
}
recursivePoll
}
}
|
eribeiro/kafka
|
core/src/main/scala/kafka/utils/NetworkClientBlockingOps.scala
|
Scala
|
apache-2.0
| 5,463
|
package org.rebeam.boxes.persistence
/**
* Result of assigning an id to a thing
*/
sealed trait IdResult {
def existing: Boolean
def id: Long
}
/**
* Object already had an id
* @param id The pre-existing id for the object
*/
case class ExistingId(id: Long) extends IdResult {
def existing = true
}
/**
* Object had no pre-existing id, it is now assigned the included id
* @param id The id newly assigned to the object
*/
case class NewId(id:Long) extends IdResult {
def existing = false
}
|
trepidacious/boxes-core
|
src/main/scala/org/rebeam/boxes/persistence/IdResult.scala
|
Scala
|
gpl-2.0
| 510
|
package fafa.api
/**
* Created by mac on 03.01.16.
*/
case class Piece(color: Color, role: Role)
|
Macok/fafachess
|
src/main/scala/fafa/api/Piece.scala
|
Scala
|
apache-2.0
| 101
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.