code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sumologic.shellbase.notifications
import com.sumologic.shellbase.CommonWordSpec
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class NotificationCommandSetTest extends CommonWordSpec {
"Notification Command Set" should {
"list notifications" in {
val manager = new InMemoryShellNotificationManager("", Seq(createNotification("test")))
val sut = new NotificationCommandSet(manager)
sut.executeLine(List("list"))
}
"list notifications (even if empty)" in {
val manager = new InMemoryShellNotificationManager("", Seq.empty)
val sut = new NotificationCommandSet(manager)
sut.executeLine(List("list"))
}
"let you toggle on/off all notifications at once" in {
val manager = new InMemoryShellNotificationManager("", Seq(createNotification("1"), createNotification("2"), createNotification("3")))
val sut = new NotificationCommandSet(manager)
sut.executeLine(List("enable"))
manager.notificationEnabled("1") should be(true)
manager.notificationEnabled("2") should be(true)
manager.notificationEnabled("3") should be(true)
sut.executeLine(List("disable"))
manager.notificationEnabled("1") should be(false)
manager.notificationEnabled("2") should be(false)
manager.notificationEnabled("3") should be(false)
sut.executeLine(List("enable", "all"))
manager.notificationEnabled("1") should be(true)
manager.notificationEnabled("2") should be(true)
manager.notificationEnabled("3") should be(true)
sut.executeLine(List("disable", "all"))
manager.notificationEnabled("1") should be(false)
manager.notificationEnabled("2") should be(false)
manager.notificationEnabled("3") should be(false)
}
"let you toggle on/off notifications individually/in a group" in {
val manager = new InMemoryShellNotificationManager("", Seq(createNotification("1"), createNotification("2"), createNotification("3")))
val sut = new NotificationCommandSet(manager)
sut.executeLine(List("enable", "1"))
manager.notificationEnabled("1") should be(true)
manager.notificationEnabled("2") should be(false)
manager.notificationEnabled("3") should be(false)
sut.executeLine(List("disable", "1"))
manager.notificationEnabled("1") should be(false)
manager.notificationEnabled("2") should be(false)
manager.notificationEnabled("3") should be(false)
sut.executeLine(List("enable", "2,3"))
manager.notificationEnabled("1") should be(false)
manager.notificationEnabled("2") should be(true)
manager.notificationEnabled("3") should be(true)
sut.executeLine(List("disable", "1,3"))
manager.notificationEnabled("1") should be(false)
manager.notificationEnabled("2") should be(true)
manager.notificationEnabled("3") should be(false)
}
}
private def createNotification(n: String) = new ShellNotification {
override def notify(title: String, message: String): Unit = ???
override def name: String = n
}
}
| SumoLogic/shellbase | shellbase-core/src/test/scala/com/sumologic/shellbase/notifications/NotificationCommandSetTest.scala | Scala | apache-2.0 | 3,933 |
import javax.servlet.http.HttpServlet
import zio.ZIO
object Services {
val getMessages: ZIO[
WithRequest with WithResponse with JdbcIO,
Throwable,
Unit
] =
for {
_ <- Response.setContentType("text/html;charset=UTF-8")
entries <- Database.getEntries
writer <- Response.getWriter
_ = writer.write("<ul>\\n")
_ = entries map { case (name, message) =>
writer.write(s" <li>${name}: ${message}</li>\\n")
}
_ = writer.write("</ul>\\n")
} yield ()
val addMessage: ZIO[
WithRequest with WithResponse with JdbcIO,
Throwable,
Unit
] =
for {
nameO <- Request.getParameter("name")
messageO <- Request.getParameter("message")
_ <- (nameO, messageO) match {
case (Some(name), Some(message)) =>
for {
_ <- Database.addEntry(name, message)
_ <- Response.sendRedirect("/")
} yield ()
case _ =>
for {
_ <- Response.setStatus(400)
} yield ()
}
} yield ()
}
class ZioServlet extends HttpServlet {
import javax.servlet.http.HttpServletRequest
import javax.servlet.http.HttpServletResponse
import zio.Runtime
def unsafeRun[A](
req: HttpServletRequest,
res: HttpServletResponse
)(
k: ZIO[WithRequest with WithResponse with JdbcIO, Throwable, A]
): Either[Throwable, A] = {
val env =
new WithRequest with WithResponse with JdbcIO {
Class.forName("org.h2.Driver")
val request = req
val response = res
val connection = Database.connectionPool.getConnection
}
Runtime.default.unsafeRun {
JdbcIO
.transact(k)
.map(a => Right(a))
.catchAll { t =>
ZIO.succeed(Left[Throwable, A](t))
}
.provide(env)
}
}
override def init: Unit = {
unsafeRun(null, null)(Database.init)
}
override def service(
req: HttpServletRequest,
res: HttpServletResponse
): Unit = {
try {
unsafeRun(req, res) {
Request.route {
case "GET" :: Nil => Services.getMessages
case "POST" :: Nil => Services.addMessage
case _ => Response.setStatus(404)
}
}
} catch {
case t: Throwable =>
t.printStackTrace
res.setStatus(500)
}
}
}
| earldouglas/xsbt-web-plugin | src/sbt-test/examples/zio/src/main/scala/ZioServlet.scala | Scala | bsd-3-clause | 2,361 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.concurrent.{Delayed, TimeUnit}
import kafka.network
import kafka.network.RequestChannel
import kafka.network.RequestChannel.Response
import kafka.utils.Logging
import org.apache.kafka.common.utils.Time
/**
* Represents a request whose response has been delayed.
* @param request The request that has been delayed
* @param time Time instance to use
* @param throttleTimeMs Delay associated with this request
* @param channelThrottlingCallback Callback for channel throttling
*/
class ThrottledChannel(val request: RequestChannel.Request, val time: Time, val throttleTimeMs: Int, channelThrottlingCallback: Response => Unit)
extends Delayed with Logging {
var endTime = time.milliseconds + throttleTimeMs
// Notify the socket server that throttling has started for this channel.
channelThrottlingCallback(new RequestChannel.StartThrottlingResponse(request))
// Notify the socket server that throttling has been done for this channel.
def notifyThrottlingDone(): Unit = {
trace("Channel throttled for: " + throttleTimeMs + " ms")
channelThrottlingCallback(new network.RequestChannel.EndThrottlingResponse(request))
}
override def getDelay(unit: TimeUnit): Long = {
unit.convert(endTime - time.milliseconds, TimeUnit.MILLISECONDS)
}
override def compareTo(d: Delayed): Int = {
val other = d.asInstanceOf[ThrottledChannel]
if (this.endTime < other.endTime) -1
else if (this.endTime > other.endTime) 1
else 0
}
} | ollie314/kafka | core/src/main/scala/kafka/server/ThrottledChannel.scala | Scala | apache-2.0 | 2,332 |
package scala.c.engine
import org.eclipse.cdt.core.dom.ast._
import scala.collection.mutable.ListBuffer
import scala.util.control.Exception.allCatch
import java.math.BigInteger
import org.eclipse.cdt.internal.core.dom.parser.c.CBasicType
import scala.util.Try
object Literal {
def cast(litStr: String): ValueType = {
def isIntNumber(s: String): Boolean = (allCatch opt s.toInt).isDefined
def isLongNumber(s: String): Boolean = (allCatch opt s.toLong).isDefined
val isLong = litStr.endsWith("L")
val pre: String = if (litStr.endsWith("L")) {
litStr.take(litStr.size - 1).mkString
} else if (litStr.endsWith("u")) {
litStr.take(litStr.size - 1).mkString
} else {
litStr
}
def process(str: String): String = {
val theStr = str.toCharArray.toList
val result = new ListBuffer[Char]()
var index = 0
while (index < theStr.size - 1) {
(theStr(index), Try(theStr(index + 1)).getOrElse(null)) match {
case ('\\\\', '\\\\') => result += '\\\\'; index += 2
case ('\\\\', 'n') => result += '\\n'; index += 2
case ('\\\\', 'r') => result += '\\r'; index += 2
case ('\\\\', '0') => result += '\\0'; index += 2
case x => result += x._1; index += 1
}
}
result += theStr.last
result.toList.mkString
}
val post = process(pre)
val lit = if (post.startsWith("0x")) {
val bigInt = new BigInteger(pre.drop(2), 16);
bigInt.toString
} else {
post
}
val result = if (lit.head == '\\"' && lit.last == '\\"') {
StringLiteral(lit)
} else if (lit.head == '\\'' && lit.last == '\\'') {
RValue(lit.toCharArray.apply(1).toByte, new CBasicType(IBasicType.Kind.eChar, 0))
} else if (isLong) {
RValue(lit.toLong, new CBasicType(IBasicType.Kind.eInt, IBasicType.IS_LONG))
} else if (isIntNumber(lit)) {
RValue(lit.toInt, TypeHelper.intType)
} else if (isLongNumber(lit)) {
RValue(lit.toLong, new CBasicType(IBasicType.Kind.eInt, IBasicType.IS_LONG))
} else if (lit.contains('F') || lit.contains('f')) {
val num = lit.toCharArray.filter(x => x != 'f' && x != 'F').mkString
RValue(num.toFloat, TypeHelper.floatType)
} else {
RValue(lit.toDouble, TypeHelper.doubleType)
}
result
}
} | bdwashbu/cEngine | src/scala/c/engine/Literal.scala | Scala | apache-2.0 | 2,329 |
package com.outr.arango.query
import com.outr.arango.Query
class Filter(left: Query, condition: String, right: Query) {
def &&(filter: Filter): Filter = {
new Filter(build(), "&&", filter.build())
}
def ||(filter: Filter): Filter = {
new Filter(build(), "||", filter.build())
}
def build(): Query = Query(s"${left.value} $condition ${right.value}", left.args ++ right.args)
}
| outr/arangodb-scala | driver/src/main/scala/com/outr/arango/query/Filter.scala | Scala | mit | 397 |
package postgresweb.components
import ch.wsl.model.shared.JSONSchemaUI
import japgolly.scalajs.react.vdom.prefix_<^._
import japgolly.scalajs.react.{ReactComponentB, _}
import postgresweb.components.base.{SchemaForm, SchemaFormState}
import postgresweb.controllers.CRUDController
import postgresweb.css.CommonStyles
import scala.concurrent.ExecutionContext.Implicits.global
import scala.scalajs.js
import scala.scalajs.js.JSON
case class Updates(controller:CRUDController) {
case class State(schema:String, ui:JSONSchemaUI, value: Option[js.Any] = None)
class Backend(scope:BackendScope[Unit,State]) {
for{
schema <- controller.schemaAsString
form <- controller.uiSchema
value <- controller.get
} yield {
scope.modState(_.copy(schema = schema, ui=form, value = Some(value))).runNow()
}
def onSubmit(s:SchemaFormState):Unit = {
controller.onUpdate(s.formData)
}
def render(s:State) = {
<.div(CommonStyles.row,
<.div(CommonStyles.fullWidth,SchemaForm(SchemaForm.Props(s.schema,s.ui,onSubmit,s.value)))
)
}
}
val component = ReactComponentB[Unit]("ItemsInfo")
.initialState(State("{}",JSONSchemaUI.empty))
.renderBackend[Backend]
.buildU
def apply() = component()
}
| minettiandrea/postgres-restify | client/src/main/scala/postgresweb/components/Updates.scala | Scala | apache-2.0 | 1,282 |
package oneoff_header
import com.softwaremill.session.{SessionSerializer, SingleValueSessionSerializer}
import io.circe.generic.semiauto._
import io.circe.{Decoder, Encoder}
import scala.util.Try
case class Session(id: String)
object Session {
implicit def serializer: SessionSerializer[Session, String] =
new SingleValueSessionSerializer(_.id, (un: String) => Try { Session(un) })
implicit val encoder: Encoder[Session] = deriveEncoder
implicit val decoder: Decoder[Session] = deriveDecoder
}
| t-mochizuki/scala-study | akka-http-session-example/oneoff-header/src/main/scala/oneoff_header/Session.scala | Scala | mit | 509 |
/* sbt -- Simple Build Tool
* Copyright 2009, 2010 Mark Harrah
*/
package xsbt.boot
import Pre._
import scala.collection.{ Iterable, Iterator }
import scala.collection.immutable.List
// preserves iteration order
sealed class ListMap[K, V] private (backing: List[(K, V)]) extends Iterable[(K, V)] // use Iterable because Traversable.toStream loops
{
import ListMap.remove
def update(k: K, v: V) = this.+((k, v))
def +(pair: (K, V)) = copy(pair :: remove(backing, pair._1))
def -(k: K) = copy(remove(backing, k))
def get(k: K): Option[V] = backing.find(_._1 == k).map(_._2)
def keys: List[K] = backing.reverse.map(_._1)
def apply(k: K): V = getOrError(get(k), "Key " + k + " not found")
def contains(k: K): Boolean = get(k).isDefined
def iterator = backing.reverse.iterator
override def isEmpty: Boolean = backing.isEmpty
override def toList = backing.reverse
override def toSeq = toList
protected def copy(newBacking: List[(K, V)]): ListMap[K, V] = new ListMap(newBacking)
def default(defaultF: K => V): ListMap[K, V] =
new ListMap[K, V](backing) {
override def apply(k: K) = super.get(k).getOrElse(defaultF(k))
override def copy(newBacking: List[(K, V)]) = super.copy(newBacking).default(defaultF)
}
override def toString = backing.mkString("ListMap(", ",", ")")
}
object ListMap {
def apply[K, V](pairs: (K, V)*) = new ListMap[K, V](pairs.toList.distinct)
def empty[K, V] = new ListMap[K, V](Nil)
private def remove[K, V](backing: List[(K, V)], k: K) = backing.filter(_._1 != k)
}
| jaceklaskowski/sbt | launch/src/main/scala/xsbt/boot/ListMap.scala | Scala | bsd-3-clause | 1,543 |
package io.bartholomews.spotify4s
import cats.data.NonEmptySet
import io.bartholomews.fsclient.core.FsClient
import io.bartholomews.fsclient.core.config.UserAgent
import io.bartholomews.fsclient.core.oauth.v2.OAuthV2.{AccessToken, RedirectUri, RefreshToken}
import io.bartholomews.fsclient.core.oauth.v2.{ClientId, ClientPassword, ClientSecret}
import io.bartholomews.fsclient.core.oauth.{AccessTokenSigner, ClientPasswordAuthentication, Scope}
import io.bartholomews.iso_country.CountryCodeAlpha2
import io.bartholomews.spotify4s.Test._
import io.bartholomews.spotify4s.circe._
import io.bartholomews.spotify4s.core.SpotifyClient
import io.bartholomews.spotify4s.core.api.AuthApi.SpotifyUserAuthorizationRequest
import io.bartholomews.spotify4s.core.entities.{SpotifyId, SpotifyScope}
import sttp.client.{HttpURLConnectionBackend, Identity, Response, ResponseError, UriContext}
object Test {
// $COVERAGE-OFF$
private val userAgent =
UserAgent("spotify4s", Some("0.0.1"), Some("https://github.com/bartholomews/spotify4s"))
private val signer = ClientPasswordAuthentication(
ClientPassword(
clientId = ClientId(System.getenv("MUSICGENE_SPOTIFY_CLIENT_ID")),
clientSecret = ClientSecret(System.getenv("MUSICGENE_SPOTIFY_CLIENT_SECRET"))
)
)
val spotifyUserAuthorizationRequest: SpotifyUserAuthorizationRequest =
SpotifyUserAuthorizationRequest(
state = Some("wat"),
redirectUri = RedirectUri(uri"https://bartholomews.io/callback"),
scopes = List(
SpotifyScope.PLAYLIST_READ_PRIVATE,
SpotifyScope.APP_REMOTE_CONTROL,
SpotifyScope.PLAYLIST_MODIFY_PUBLIC
)
)
def printBody[E, A](re: Response[Either[ResponseError[E], A]]): Unit =
re.body.fold(println, println)
val sttpClient: SpotifyClient[Identity] = {
new SpotifyClient[Identity](
FsClient(userAgent, signer, HttpURLConnectionBackend())
)
}
// $COVERAGE-ON$
}
/*
#authorization-code-flow"
#authorization-code-flow-with-proof-key-for-code-exchange-pkce
#implicit-grant-flow
#client-credentials-flow
*/
// https://developer.spotify.com/documentation/general/guides/authorization-guide/#client-credentials-flow
object ClientCredentialsFlow extends App {
// $COVERAGE-OFF$
import eu.timepit.refined.auto.autoRefineV
// 1. Request access token
sttpClient.auth
.clientCredentials[io.circe.Error]
.body
.fold(
println,
implicit nonRefreshableToken => {
// 2. Use access token
sttpClient.browse
.getNewReleases(
country = Some(CountryCodeAlpha2.ITALY),
limit = 3,
offset = 2
)
.body
.fold(println, println)
}
)
// $COVERAGE-ON$
}
// https://developer.spotify.com/documentation/general/guides/authorization-guide/#authorization-code-flow
object AuthorizationCodeFlow_1_GetAuthorizeUrl extends App {
// $COVERAGE-OFF$
println {
sttpClient.auth.authorizeUrl(Test.spotifyUserAuthorizationRequest)
}
// $COVERAGE-ON$
}
object AuthorizationCodeFlow_2_UseAuthorizeUrl extends App {
// $COVERAGE-OFF$
println {
val redirectionUriResponse =
uri"???"
println(redirectionUriResponse.toString())
sttpClient.auth.AuthorizationCode.acquire[io.circe.Error](
Test.spotifyUserAuthorizationRequest,
redirectionUriResponse
)
}
// $COVERAGE-ON$
}
object AuthorizationCodeFlow_3_UseAccessToken extends App {
// $COVERAGE-OFF$
def accessToken =
"???"
def refreshToken =
"???"
implicit val authorizationCodeResponse: AccessTokenSigner = AccessTokenSigner(
generatedAt = 10000000001L,
AccessToken(accessToken),
"Bearer",
3600,
Some(RefreshToken(refreshToken)),
Scope(List(""))
)
printBody {
// FIXME: Deserialization error
sttpClient.tracks.getTracks(
ids = NonEmptySet.of(
SpotifyId("458LTQbp2xTIIBtguCOFbU"),
SpotifyId("2Eg21mDTQ3tk1OiPSnONwq")
// SpotifyId("") // FIXME: I think SpotifyId needs to be nonEmpty otherwise troubles (400)
),
market = None
)
}
// $COVERAGE-ON$
}
object AuthorizationCodeFlow_4_RefreshAccessToken extends App {
val refreshToken: RefreshToken = RefreshToken(AuthorizationCodeFlow_3_UseAccessToken.refreshToken)
printBody {
sttpClient.auth.AuthorizationCode.refresh(refreshToken)
}
}
| bartholomews/spotify-scala-client | modules/circe/src/test/scala/io/bartholomews/spotify4s/Test.scala | Scala | mit | 4,364 |
package dotty.tools
package dotc
package core
package unpickleScala2
import java.io.IOException
import java.lang.Float.intBitsToFloat
import java.lang.Double.longBitsToDouble
import Contexts._, Symbols._, Types._, Scopes._, SymDenotations._, Names._, NameOps._
import StdNames._, Denotations._, NameOps._, Flags._, Constants._, Annotations._
import dotty.tools.dotc.typer.ProtoTypes.{FunProtoTyped, FunProto}
import util.Positions._
import dotty.tools.dotc.ast.{tpd, Trees, untpd}, ast.tpd._
import printing.Texts._
import printing.Printer
import io.AbstractFile
import util.common._
import typer.Checking.checkNonCyclic
import typer.Mode
import PickleBuffer._
import scala.reflect.internal.pickling.PickleFormat._
import Decorators._
import classfile.ClassfileParser
import scala.collection.{ mutable, immutable }
import scala.collection.mutable.ListBuffer
import scala.annotation.switch
object Scala2Unpickler {
/** Exception thrown if classfile is corrupted */
class BadSignature(msg: String) extends RuntimeException(msg)
case class TempPolyType(tparams: List[Symbol], tpe: Type) extends UncachedGroundType {
override def fallbackToText(printer: Printer): Text =
"[" ~ printer.dclsText(tparams, ", ") ~ "]" ~ printer.toText(tpe)
}
/** Temporary type for classinfos, will be decomposed on completion of the class */
case class TempClassInfoType(parentTypes: List[Type], decls: Scope, clazz: Symbol) extends UncachedGroundType
/** Convert temp poly type to some native Dotty idiom.
* @param denot The denotation that gets the converted type as info.
* If `denot` is not an abstract type, this simply returns an equivalent `PolyType`.
* If `denot` is an abstract type, it converts a
*
* TempPolyType(List(v_1 T_1, ..., v_n T_n), lo .. hi)
*
* to a type lambda using `parameterizeWith/LambdaAbstract`.
*/
def depoly(tp: Type, denot: SymDenotation)(implicit ctx: Context): Type = tp match {
case TempPolyType(tparams, restpe) =>
if (denot.isAbstractType)
restpe.LambdaAbstract(tparams) // bounds needed?
else if (denot.isAliasType) {
var err: Option[(String, Position)] = None
val result = restpe.parameterizeWith(tparams)
for ((msg, pos) <- err)
ctx.warning(
sm"""$msg
|originally parsed type : ${tp.show}
|will be approximated by: ${result.show}.
|Proceed at own risk.""")
result
}
else
PolyType.fromSymbols(tparams, restpe)
case tp => tp
}
def addConstructorTypeParams(denot: SymDenotation)(implicit ctx: Context) = {
assert(denot.isConstructor)
denot.info = PolyType.fromSymbols(denot.owner.typeParams, denot.info)
}
/** Convert array parameters denoting a repeated parameter of a Java method
* to `RepeatedParamClass` types.
*/
def arrayToRepeated(tp: Type)(implicit ctx: Context): Type = tp match {
case tp @ MethodType(paramNames, paramTypes) =>
val lastArg = paramTypes.last
assert(lastArg isRef defn.ArrayClass)
val elemtp0 :: Nil = lastArg.baseArgInfos(defn.ArrayClass)
val elemtp = elemtp0 match {
case AndType(t1, t2) if t1.typeSymbol.isAbstractType && (t2 isRef defn.ObjectClass) =>
t1 // drop intersection with Object for abstract types in varargs. UnCurry can handle them.
case _ =>
elemtp0
}
tp.derivedMethodType(
paramNames,
paramTypes.init :+ defn.RepeatedParamType.appliedTo(elemtp),
tp.resultType)
case tp @ PolyType(paramNames) =>
tp.derivedPolyType(paramNames, tp.paramBounds, arrayToRepeated(tp.resultType))
}
def ensureConstructor(cls: ClassSymbol, scope: Scope)(implicit ctx: Context) =
if (scope.lookup(nme.CONSTRUCTOR) == NoSymbol) {
val constr = ctx.newDefaultConstructor(cls)
addConstructorTypeParams(constr)
cls.enter(constr, scope)
}
def setClassInfo(denot: ClassDenotation, info: Type, selfInfo: Type = NoType)(implicit ctx: Context): Unit = {
val cls = denot.classSymbol
val (tparams, TempClassInfoType(parents, decls, clazz)) = info match {
case TempPolyType(tps, cinfo) => (tps, cinfo)
case cinfo => (Nil, cinfo)
}
var parentRefs = ctx.normalizeToClassRefs(parents, cls, decls)
if (parentRefs.isEmpty) parentRefs = defn.ObjectClass.typeRef :: Nil
for (tparam <- tparams) {
val tsym = decls.lookup(tparam.name)
if (tsym.exists) tsym.setFlag(TypeParam)
else denot.enter(tparam, decls)
}
val ost =
if ((selfInfo eq NoType) && (denot is ModuleClass))
denot.owner.thisType select denot.sourceModule
else selfInfo
if (!(denot.flagsUNSAFE is JavaModule)) ensureConstructor(denot.symbol.asClass, decls)
val scalacCompanion = denot.classSymbol.scalacLinkedClass
def registerCompanionPair(module: Symbol, claz: Symbol) = {
val companionClassMethod = ctx.synthesizeCompanionMethod(nme.COMPANION_CLASS_METHOD, claz, module)
if (companionClassMethod.exists)
companionClassMethod.entered
val companionModuleMethod = ctx.synthesizeCompanionMethod(nme.COMPANION_MODULE_METHOD, module, claz)
if (companionModuleMethod.exists)
companionModuleMethod.entered
}
if (denot.flagsUNSAFE is Module) {
registerCompanionPair(denot.classSymbol, scalacCompanion)
} else {
registerCompanionPair(scalacCompanion, denot.classSymbol)
}
denot.info = ClassInfo(denot.owner.thisType, denot.classSymbol, parentRefs, decls, ost)
}
}
/** Unpickle symbol table information descending from a class and/or module root
* from an array of bytes.
* @param bytes bytearray from which we unpickle
* @param classroot the top-level class which is unpickled, or NoSymbol if inapplicable
* @param moduleroot the top-level module class which is unpickled, or NoSymbol if inapplicable
* @param filename filename associated with bytearray, only used for error messages
*/
class Scala2Unpickler(bytes: Array[Byte], classRoot: ClassDenotation, moduleClassRoot: ClassDenotation)(ictx: Context)
extends PickleBuffer(bytes, 0, -1) with ClassfileParser.Embedded {
def showPickled() = {
atReadPos(0, () => {
println(s"classRoot = ${classRoot.debugString}, moduleClassRoot = ${moduleClassRoot.debugString}")
util.ShowPickled.printFile(this)
})
}
// print("unpickling "); showPickled() // !!! DEBUG
import Scala2Unpickler._
val moduleRoot = moduleClassRoot.sourceModule(ictx).denot(ictx)
assert(moduleRoot.isTerm)
checkVersion(ictx)
private val loadingMirror = defn(ictx) // was: mirrorThatLoaded(classRoot)
/** A map from entry numbers to array offsets */
private val index = createIndex
/** A map from entry numbers to symbols, types, or annotations */
private val entries = new Array[AnyRef](index.length)
/** A map from symbols to their associated `decls` scopes */
private val symScopes = mutable.AnyRefMap[Symbol, Scope]()
protected def errorBadSignature(msg: String, original: Option[RuntimeException] = None)(implicit ctx: Context) = {
val ex = new BadSignature(
sm"""error reading Scala signature of $classRoot from $source:
|error occurred at position $readIndex: $msg""")
/*if (debug)*/ original.getOrElse(ex).printStackTrace() // !!! DEBUG
throw ex
}
protected def handleRuntimeException(ex: RuntimeException)(implicit ctx: Context) = ex match {
case ex: BadSignature => throw ex
case _ => errorBadSignature(s"a runtime exception occurred: $ex", Some(ex))
}
private var postReadOp: Context => Unit = null
def run()(implicit ctx: Context) =
try {
var i = 0
while (i < index.length) {
if (entries(i) == null && isSymbolEntry(i)) {
val savedIndex = readIndex
readIndex = index(i)
entries(i) = readSymbol()
if (postReadOp != null) {
postReadOp(ctx)
postReadOp = null
}
readIndex = savedIndex
}
i += 1
}
// read children last, fix for #3951
i = 0
while (i < index.length) {
if (entries(i) == null) {
if (isSymbolAnnotationEntry(i)) {
val savedIndex = readIndex
readIndex = index(i)
readSymbolAnnotation()
readIndex = savedIndex
} else if (isChildrenEntry(i)) {
val savedIndex = readIndex
readIndex = index(i)
readChildren()
readIndex = savedIndex
}
}
i += 1
}
} catch {
case ex: RuntimeException => handleRuntimeException(ex)
}
def source(implicit ctx: Context): AbstractFile = {
val f = classRoot.symbol.associatedFile
if (f != null) f else moduleClassRoot.symbol.associatedFile
}
private def checkVersion(implicit ctx: Context): Unit = {
val major = readNat()
val minor = readNat()
if (major != MajorVersion || minor > MinorVersion)
throw new IOException("Scala signature " + classRoot.fullName.decode +
" has wrong version\\n expected: " +
MajorVersion + "." + MinorVersion +
"\\n found: " + major + "." + minor +
" in " + source)
}
/** The `decls` scope associated with given symbol */
protected def symScope(sym: Symbol) = symScopes.getOrElseUpdate(sym, newScope)
/** Does entry represent an (internal) symbol */
protected def isSymbolEntry(i: Int)(implicit ctx: Context): Boolean = {
val tag = bytes(index(i)).toInt
(firstSymTag <= tag && tag <= lastSymTag &&
(tag != CLASSsym || !isRefinementSymbolEntry(i)))
}
/** Does entry represent an (internal or external) symbol */
protected def isSymbolRef(i: Int): Boolean = {
val tag = bytes(index(i))
(firstSymTag <= tag && tag <= lastExtSymTag)
}
/** Does entry represent a name? */
protected def isNameEntry(i: Int): Boolean = {
val tag = bytes(index(i)).toInt
tag == TERMname || tag == TYPEname
}
/** Does entry represent a symbol annotation? */
protected def isSymbolAnnotationEntry(i: Int): Boolean = {
val tag = bytes(index(i)).toInt
tag == SYMANNOT
}
/** Does the entry represent children of a symbol? */
protected def isChildrenEntry(i: Int): Boolean = {
val tag = bytes(index(i)).toInt
tag == CHILDREN
}
/** Does entry represent a refinement symbol?
* pre: Entry is a class symbol
*/
protected def isRefinementSymbolEntry(i: Int)(implicit ctx: Context): Boolean = {
val savedIndex = readIndex
readIndex = index(i)
val tag = readByte().toInt
assert(tag == CLASSsym)
readNat(); // read length
val result = readNameRef() == tpnme.REFINE_CLASS
readIndex = savedIndex
result
}
protected def isRefinementClass(sym: Symbol)(implicit ctx: Context): Boolean =
sym.name == tpnme.REFINE_CLASS
protected def isLocal(sym: Symbol)(implicit ctx: Context) = isUnpickleRoot(sym.topLevelClass)
protected def isUnpickleRoot(sym: Symbol)(implicit ctx: Context) = {
val d = sym.denot
d == moduleRoot || d == moduleClassRoot || d == classRoot
}
/** If entry at <code>i</code> is undefined, define it by performing
* operation <code>op</code> with <code>readIndex at start of i'th
* entry. Restore <code>readIndex</code> afterwards.
*/
protected def at[T <: AnyRef](i: Int, op: () => T): T = {
var r = entries(i)
if (r eq null) {
r = atReadPos(index(i), op)
assert(entries(i) eq null, entries(i))
entries(i) = r
}
r.asInstanceOf[T]
}
protected def atReadPos[T](start: Int, op: () => T): T = {
val savedIndex = readIndex
readIndex = start
try op()
finally readIndex = savedIndex
}
/** Read a name */
protected def readName()(implicit ctx: Context): Name = {
val tag = readByte()
val len = readNat()
tag match {
case TERMname => termName(bytes, readIndex, len)
case TYPEname => typeName(bytes, readIndex, len)
case _ => errorBadSignature("bad name tag: " + tag)
}
}
protected def readTermName()(implicit ctx: Context): TermName = readName().toTermName
protected def readTypeName()(implicit ctx: Context): TypeName = readName().toTypeName
/** Read a symbol */
protected def readSymbol()(implicit ctx: Context): Symbol = readDisambiguatedSymbol(alwaysTrue)()
/** Read a symbol, with possible disambiguation */
protected def readDisambiguatedSymbol(p: Symbol => Boolean)()(implicit ctx: Context): Symbol = {
val start = indexCoord(readIndex)
val tag = readByte()
val end = readNat() + readIndex
def atEnd = readIndex == end
def readExtSymbol(): Symbol = {
val name = readNameRef()
val owner = if (atEnd) loadingMirror.RootClass else readSymbolRef()
def adjust(denot: Denotation) = {
val denot1 = denot.disambiguate(d => p(d.symbol))
val sym = denot1.symbol
if (denot.exists && !denot1.exists) { // !!!DEBUG
val alts = denot.alternatives map (d => d + ":" + d.info + "/" + d.signature)
System.err.println(s"!!! disambiguation failure: $alts")
val members = denot.alternatives.head.symbol.owner.info.decls.toList map (d => d + ":" + d.info + "/" + d.signature)
System.err.println(s"!!! all members: $members")
}
if (tag == EXTref) sym else sym.moduleClass
}
def fromName(name: Name): Symbol = name.toTermName match {
case nme.ROOT => loadingMirror.RootClass
case nme.ROOTPKG => loadingMirror.RootPackage
case _ =>
def declIn(owner: Symbol) = adjust(owner.info.decl(name))
val sym = declIn(owner)
if (sym.exists || owner.ne(defn.ObjectClass)) sym else declIn(defn.AnyClass)
}
def nestedObjectSymbol: Symbol = {
// If the owner is overloaded (i.e. a method), it's not possible to select the
// right member, so return NoSymbol. This can only happen when unpickling a tree.
// the "case Apply" in readTree() takes care of selecting the correct alternative
// after parsing the arguments.
//if (owner.isOverloaded)
// return NoSymbol
if (tag == EXTMODCLASSref) {
val module = owner.info.decl(name.toTermName).suchThat(_ is Module)
module.info // force it, as completer does not yet point to module class.
module.symbol.moduleClass
/* was:
val moduleVar = owner.info.decl(name.toTermName.moduleVarName).symbol
if (moduleVar.isLazyAccessor)
return moduleVar.lazyAccessor.lazyAccessor
*/
} else NoSymbol
}
// println(s"read ext symbol $name from ${owner.denot.debugString} in ${classRoot.debugString}") // !!! DEBUG
// (1) Try name.
fromName(name) orElse {
// (2) Try with expanded name. Can happen if references to private
// symbols are read from outside: for instance when checking the children
// of a class. See #1722.
fromName(name.toTermName.expandedName(owner)) orElse {
// (3) Try as a nested object symbol.
nestedObjectSymbol orElse {
// // (4) Call the mirror's "missing" hook.
adjust(ctx.base.missingHook(owner, name)) orElse {
// println(owner.info.decls.toList.map(_.debugString).mkString("\\n ")) // !!! DEBUG
// }
// (5) Create a stub symbol to defer hard failure a little longer.
ctx.newStubSymbol(owner, name, source)
}
}
}
}
}
tag match {
case NONEsym => return NoSymbol
case EXTref | EXTMODCLASSref => return readExtSymbol()
case _ =>
}
// symbols that were pickled with Pickler.writeSymInfo
val nameref = readNat()
val name0 = at(nameref, readName)
val owner = readSymbolRef()
var flags = unpickleScalaFlags(readLongNat(), name0.isTypeName)
if (flags is DefaultParameter) {
// DefaultParameterized flag now on method, not parameter
//assert(flags is Param, s"$name0 in $owner")
flags = flags &~ DefaultParameterized
owner.setFlag(DefaultParameterized)
}
val name1 = name0.adjustIfModuleClass(flags)
val name = if (name1 == nme.TRAIT_CONSTRUCTOR) nme.CONSTRUCTOR else name1
def isClassRoot = (name == classRoot.name) && (owner == classRoot.owner) && !(flags is ModuleClass)
def isModuleClassRoot = (name == moduleClassRoot.name) && (owner == moduleClassRoot.owner) && (flags is Module)
def isModuleRoot = (name == moduleClassRoot.name.sourceModuleName) && (owner == moduleClassRoot.owner) && (flags is Module)
//if (isClassRoot) println(s"classRoot of $classRoot found at $readIndex, flags = $flags") // !!! DEBUG
//if (isModuleRoot) println(s"moduleRoot of $moduleRoot found at $readIndex, flags = $flags") // !!! DEBUG
//if (isModuleClassRoot) println(s"moduleClassRoot of $moduleClassRoot found at $readIndex, flags = $flags") // !!! DEBUG
def completeRoot(denot: ClassDenotation, completer: LazyType): Symbol = {
denot.setFlag(flags)
denot.resetFlag(Touched) // allow one more completion
denot.info = completer
denot.symbol
}
def finishSym(sym: Symbol): Symbol = {
val owner = sym.owner
if (owner.isClass &&
!( isUnpickleRoot(sym)
|| (sym is Scala2Existential)
|| isRefinementClass(sym)
)
)
owner.asClass.enter(sym, symScope(owner))
else if (isRefinementClass(owner))
symScope(owner).openForMutations.enter(sym)
sym
}
finishSym(tag match {
case TYPEsym | ALIASsym =>
var name1 = name.asTypeName
var flags1 = flags
if (flags is TypeParam) {
name1 = name1.expandedName(owner)
flags1 |= owner.typeParamCreationFlags | ExpandedName
}
ctx.newSymbol(owner, name1, flags1, localMemberUnpickler, coord = start)
case CLASSsym =>
val infoRef = readNat()
postReadOp = implicit ctx => atReadPos(index(infoRef), readTypeParams) // force reading type params early, so they get entered in the right order.
if (isClassRoot)
completeRoot(
classRoot, rootClassUnpickler(start, classRoot.symbol, NoSymbol))
else if (isModuleClassRoot)
completeRoot(
moduleClassRoot, rootClassUnpickler(start, moduleClassRoot.symbol, moduleClassRoot.sourceModule))
else if (name == tpnme.REFINE_CLASS)
// create a type alias instead
ctx.newSymbol(owner, name, flags, localMemberUnpickler, coord = start)
else {
def completer(cls: Symbol) = {
val unpickler = new LocalUnpickler() withDecls symScope(cls)
if (flags is ModuleClass)
unpickler withSourceModule (implicit ctx =>
cls.owner.info.decls.lookup(cls.name.sourceModuleName)
.suchThat(_ is Module).symbol)
else unpickler
}
ctx.newClassSymbol(owner, name.asTypeName, flags, completer, coord = start)
}
case VALsym =>
ctx.newSymbol(owner, name.asTermName, flags, localMemberUnpickler, coord = start)
case MODULEsym =>
if (isModuleRoot) {
moduleRoot setFlag flags
moduleRoot.symbol
} else ctx.newSymbol(owner, name.asTermName, flags,
new LocalUnpickler() withModuleClass(implicit ctx =>
owner.info.decls.lookup(name.moduleClassName)
.suchThat(_ is Module).symbol)
, coord = start)
case _ =>
errorBadSignature("bad symbol tag: " + tag)
})
}
class LocalUnpickler extends LazyType {
def startCoord(denot: SymDenotation): Coord = denot.symbol.coord
def complete(denot: SymDenotation)(implicit ctx: Context): Unit = try {
def parseToCompletion(denot: SymDenotation)(implicit ctx: Context) = {
val tag = readByte()
val end = readNat() + readIndex
def atEnd = readIndex == end
val unusedNameref = readNat()
val unusedOwnerref = readNat()
val unusedFlags = readLongNat()
var inforef = readNat()
denot.privateWithin =
if (!isSymbolRef(inforef)) NoSymbol
else {
val pw = at(inforef, readSymbol)
inforef = readNat()
pw
}
// println("reading type for " + denot) // !!! DEBUG
val tp = at(inforef, readType)
denot match {
case denot: ClassDenotation =>
val selfInfo = if (atEnd) NoType else readTypeRef()
setClassInfo(denot, tp, selfInfo)
denot setFlag Scala2x
case denot =>
val tp1 = depoly(tp, denot)
denot.info =
if (tag == ALIASsym) TypeAlias(tp1)
else if (denot.isType) checkNonCyclic(denot.symbol, tp1, reportErrors = false)
// we need the checkNonCyclic call to insert LazyRefs for F-bounded cycles
else if (!denot.is(Param)) tp1.underlyingIfRepeated(isJava = false)
else tp1
if (denot.isConstructor) addConstructorTypeParams(denot)
if (atEnd) {
assert(!(denot is SuperAccessor), denot)
} else {
assert(denot is (SuperAccessor | ParamAccessor), denot)
def disambiguate(alt: Symbol) = { // !!! DEBUG
ctx.debugTraceIndented(s"disambiguating ${denot.info} =:= ${denot.owner.thisType.memberInfo(alt)} ${denot.owner}") {
denot.info matches denot.owner.thisType.memberInfo(alt)
}
}
val alias = readDisambiguatedSymbolRef(disambiguate).asTerm
denot.addAnnotation(Annotation.makeAlias(alias))
}
}
// println(s"unpickled ${denot.debugString}, info = ${denot.info}") !!! DEBUG
}
atReadPos(startCoord(denot).toIndex,
() => parseToCompletion(denot)(ctx.addMode(Mode.Scala2Unpickling)))
} catch {
case ex: RuntimeException => handleRuntimeException(ex)
}
}
object localMemberUnpickler extends LocalUnpickler
def rootClassUnpickler(start: Coord, cls: Symbol, module: Symbol) =
(new LocalUnpickler with SymbolLoaders.SecondCompleter {
override def startCoord(denot: SymDenotation): Coord = start
}) withDecls symScope(cls) withSourceModule (_ => module)
/** Convert
* tp { type name = sym } forSome { sym >: L <: H }
* to
* tp { name >: L <: H }
* and
* tp { name: sym } forSome { sym <: T with Singleton }
* to
* tp { name: T }
*/
def elimExistentials(boundSyms: List[Symbol], tp: Type)(implicit ctx: Context): Type = {
def removeSingleton(tp: Type): Type =
if (tp isRef defn.SingletonClass) defn.AnyType else tp
def elim(tp: Type): Type = tp match {
case tp @ RefinedType(parent, name) =>
val parent1 = elim(tp.parent)
tp.refinedInfo match {
case TypeAlias(info: TypeRef) if boundSyms contains info.symbol =>
RefinedType(parent1, name, info.symbol.info)
case info: TypeRef if boundSyms contains info.symbol =>
val info1 = info.symbol.info
assert(info1.derivesFrom(defn.SingletonClass))
RefinedType(parent1, name, info1.mapReduceAnd(removeSingleton)(_ & _))
case info =>
tp.derivedRefinedType(parent1, name, info)
}
case tp @ TypeRef(pre, tpnme.Apply) if pre.isLambda =>
elim(pre)
case _ =>
tp
}
val tp1 = elim(tp)
val isBound = (tp: Type) => boundSyms contains tp.typeSymbol
if (tp1 existsPart isBound) {
val anyTypes = boundSyms map (_ => defn.AnyType)
val boundBounds = boundSyms map (_.info.bounds.hi)
val tp2 = tp1.subst(boundSyms, boundBounds).subst(boundSyms, anyTypes)
ctx.warning(s"""failure to eliminate existential
|original type : $tp forSome {${ctx.dclsText(boundSyms, "; ").show}
|reduces to : $tp1
|type used instead: $tp2
|proceed at own risk.""".stripMargin)
tp2
} else tp1
}
/** Read a type
*
* @param forceProperType is used to ease the transition to NullaryMethodTypes (commentmarker: NMT_TRANSITION)
* the flag say that a type of kind * is expected, so that PolyType(tps, restpe) can be disambiguated to PolyType(tps, NullaryMethodType(restpe))
* (if restpe is not a ClassInfoType, a MethodType or a NullaryMethodType, which leaves TypeRef/SingletonType -- the latter would make the polytype a type constructor)
*/
protected def readType()(implicit ctx: Context): Type = {
val tag = readByte()
val end = readNat() + readIndex
(tag: @switch) match {
case NOtpe =>
NoType
case NOPREFIXtpe =>
NoPrefix
case THIStpe =>
readSymbolRef().thisType
case SINGLEtpe =>
val pre = readTypeRef()
val sym = readDisambiguatedSymbolRef(_.info.isParameterless)
if (isLocal(sym) || (pre == NoPrefix)) pre select sym
else TermRef.withSig(pre, sym.name.asTermName, Signature.NotAMethod) // !!! should become redundant
case SUPERtpe =>
val thistpe = readTypeRef()
val supertpe = readTypeRef()
SuperType(thistpe, supertpe)
case CONSTANTtpe =>
ConstantType(readConstantRef())
case TYPEREFtpe =>
var pre = readTypeRef()
val sym = readSymbolRef()
pre match {
case thispre: ThisType =>
// The problem is that class references super.C get pickled as
// this.C. Dereferencing the member might then get an overriding class
// instance. The problem arises for instance for LinkedHashMap#MapValues
// and also for the inner Transform class in all views. We fix it by
// replacing the this with the appropriate super.
if (sym.owner != thispre.cls) {
val overriding = thispre.cls.info.decls.lookup(sym.name)
if (overriding.exists && overriding != sym) {
val base = pre.baseTypeWithArgs(sym.owner)
assert(base.exists)
pre = SuperType(thispre, base)
}
}
case _ =>
}
val tycon =
if (isLocal(sym) || pre == NoPrefix) {
val pre1 = if ((pre eq NoPrefix) && (sym is TypeParam)) sym.owner.thisType else pre
pre1 select sym
}
else TypeRef(pre, sym.name.asTypeName)
val args = until(end, readTypeRef)
if (sym == defn.ByNameParamClass2x) ExprType(args.head)
else tycon.appliedTo(args)
case TYPEBOUNDStpe =>
TypeBounds(readTypeRef(), readTypeRef())
case REFINEDtpe =>
val clazz = readSymbolRef()
val decls = symScope(clazz)
symScopes(clazz) = EmptyScope // prevent further additions
val parents = until(end, readTypeRef)
val parent = parents.reduceLeft(AndType(_, _))
if (decls.isEmpty) parent
else {
def addRefinement(tp: Type, sym: Symbol) = {
def subst(info: Type, rt: RefinedType) =
if (clazz.isClass) info.substThis(clazz.asClass, RefinedThis(rt))
else info // turns out some symbols read into `clazz` are not classes, not sure why this is the case.
RefinedType(tp, sym.name, subst(sym.info, _))
}
(parent /: decls.toList)(addRefinement).asInstanceOf[RefinedType]
}
case CLASSINFOtpe =>
val clazz = readSymbolRef()
TempClassInfoType(until(end, readTypeRef), symScope(clazz), clazz)
case METHODtpe | IMPLICITMETHODtpe =>
val restpe = readTypeRef()
val params = until(end, readSymbolRef)
def isImplicit =
tag == IMPLICITMETHODtpe ||
params.nonEmpty && (params.head is Implicit)
val maker = if (isImplicit) ImplicitMethodType else MethodType
maker.fromSymbols(params, restpe)
case POLYtpe =>
val restpe = readTypeRef()
val typeParams = until(end, readSymbolRef)
if (typeParams.nonEmpty) TempPolyType(typeParams, restpe.widenExpr)
else ExprType(restpe)
case EXISTENTIALtpe =>
val restpe = readTypeRef()
val boundSyms = until(end, readSymbolRef)
elimExistentials(boundSyms, restpe)
case ANNOTATEDtpe =>
val tp = readTypeRef()
// no annotation self type is supported, so no test whether this is a symbol ref
val annots = until(end, readAnnotationRef)
AnnotatedType.make(annots, tp)
case _ =>
noSuchTypeTag(tag, end)
}
}
def readTypeParams()(implicit ctx: Context): List[Symbol] = {
val tag = readByte()
val end = readNat() + readIndex
if (tag == POLYtpe) {
val unusedRestperef = readNat()
until(end, readSymbolRef)
} else Nil
}
def noSuchTypeTag(tag: Int, end: Int)(implicit ctx: Context): Type =
errorBadSignature("bad type tag: " + tag)
/** Read a constant */
protected def readConstant()(implicit ctx: Context): Constant = {
val tag = readByte().toInt
val len = readNat()
(tag: @switch) match {
case LITERALunit => Constant(())
case LITERALboolean => Constant(readLong(len) != 0L)
case LITERALbyte => Constant(readLong(len).toByte)
case LITERALshort => Constant(readLong(len).toShort)
case LITERALchar => Constant(readLong(len).toChar)
case LITERALint => Constant(readLong(len).toInt)
case LITERALlong => Constant(readLong(len))
case LITERALfloat => Constant(intBitsToFloat(readLong(len).toInt))
case LITERALdouble => Constant(longBitsToDouble(readLong(len)))
case LITERALstring => Constant(readNameRef().toString)
case LITERALnull => Constant(null)
case LITERALclass => Constant(readTypeRef())
case LITERALenum => Constant(readSymbolRef())
case _ => noSuchConstantTag(tag, len)
}
}
def noSuchConstantTag(tag: Int, len: Int)(implicit ctx: Context): Constant =
errorBadSignature("bad constant tag: " + tag)
/** Read children and store them into the corresponding symbol.
*/
protected def readChildren()(implicit ctx: Context): Unit = {
val tag = readByte()
assert(tag == CHILDREN)
val end = readNat() + readIndex
val target = readSymbolRef()
while (readIndex != end)
target.addAnnotation(Annotation.makeChild(readSymbolRef()))
}
/* Read a reference to a pickled item */
protected def readSymbolRef()(implicit ctx: Context): Symbol = { //OPT inlined from: at(readNat(), readSymbol) to save on closure creation
val i = readNat()
var r = entries(i)
if (r eq null) {
val savedIndex = readIndex
readIndex = index(i)
r = readSymbol()
assert(entries(i) eq null, entries(i))
entries(i) = r
readIndex = savedIndex
}
r.asInstanceOf[Symbol]
}
protected def readDisambiguatedSymbolRef(p: Symbol => Boolean)(implicit ctx: Context): Symbol =
at(readNat(), readDisambiguatedSymbol(p))
protected def readNameRef()(implicit ctx: Context): Name = at(readNat(), readName)
protected def readTypeRef()(implicit ctx: Context): Type = at(readNat(), () => readType()) // after the NMT_TRANSITION period, we can leave off the () => ... ()
protected def readConstantRef()(implicit ctx: Context): Constant = at(readNat(), readConstant)
protected def readTypeNameRef()(implicit ctx: Context): TypeName = readNameRef().toTypeName
protected def readTermNameRef()(implicit ctx: Context): TermName = readNameRef().toTermName
protected def readAnnotationRef()(implicit ctx: Context): Annotation = at(readNat(), readAnnotation)
protected def readModifiersRef(isType: Boolean)(implicit ctx: Context): Modifiers = at(readNat(), () => readModifiers(isType))
protected def readTreeRef()(implicit ctx: Context): Tree = at(readNat(), readTree)
/** Read an annotation argument, which is pickled either
* as a Constant or a Tree.
*/
protected def readAnnotArg(i: Int)(implicit ctx: Context): Tree = bytes(index(i)) match {
case TREE => at(i, readTree)
case _ => Literal(at(i, readConstant))
}
/** Read a ClassfileAnnotArg (argument to a classfile annotation)
*/
private def readArrayAnnotArg()(implicit ctx: Context): Tree = {
readByte() // skip the `annotargarray` tag
val end = readNat() + readIndex
// array elements are trees representing instances of scala.annotation.Annotation
SeqLiteral(
defn.SeqType.appliedTo(defn.AnnotationClass.typeRef :: Nil),
until(end, () => readClassfileAnnotArg(readNat())))
}
private def readAnnotInfoArg()(implicit ctx: Context): Tree = {
readByte() // skip the `annotinfo` tag
val end = readNat() + readIndex
readAnnotationContents(end)
}
protected def readClassfileAnnotArg(i: Int)(implicit ctx: Context): Tree = bytes(index(i)) match {
case ANNOTINFO => at(i, readAnnotInfoArg)
case ANNOTARGARRAY => at(i, readArrayAnnotArg)
case _ => readAnnotArg(i)
}
/** Read an annotation's contents. Not to be called directly, use
* readAnnotation, readSymbolAnnotation, or readAnnotInfoArg
*/
protected def readAnnotationContents(end: Int)(implicit ctx: Context): Tree = {
val atp = readTypeRef()
val args = {
val t = new ListBuffer[Tree]
while (readIndex != end) {
val argref = readNat()
t += {
if (isNameEntry(argref)) {
val name = at(argref, readName)
val arg = readClassfileAnnotArg(readNat())
NamedArg(name.asTermName, arg)
} else readAnnotArg(argref)
}
}
t.toList
}
// println(atp)
val targs = atp.argTypes
tpd.applyOverloaded(tpd.New(atp withoutArgs targs), nme.CONSTRUCTOR, args, targs, atp)
}
/** Read an annotation and as a side effect store it into
* the symbol it requests. Called at top-level, for all
* (symbol, annotInfo) entries.
*/
protected def readSymbolAnnotation()(implicit ctx: Context): Unit = {
val tag = readByte()
if (tag != SYMANNOT)
errorBadSignature("symbol annotation expected (" + tag + ")")
val end = readNat() + readIndex
val target = readSymbolRef()
target.addAnnotation(deferredAnnot(end))
}
/** Read an annotation and return it. Used when unpickling
* an ANNOTATED(WSELF)tpe or a NestedAnnotArg
*/
protected def readAnnotation()(implicit ctx: Context): Annotation = {
val tag = readByte()
if (tag != ANNOTINFO)
errorBadSignature("annotation expected (" + tag + ")")
val end = readNat() + readIndex
deferredAnnot(end)
}
/** A deferred annotation that can be completed by reading
* the bytes between `readIndex` and `end`.
*/
protected def deferredAnnot(end: Int)(implicit ctx: Context): Annotation = {
val start = readIndex
val atp = readTypeRef()
Annotation.deferred(
atp.typeSymbol, implicit ctx => atReadPos(start, () => readAnnotationContents(end)))
}
/* Read an abstract syntax tree */
protected def readTree()(implicit ctx: Context): Tree = {
val outerTag = readByte()
if (outerTag != TREE)
errorBadSignature("tree expected (" + outerTag + ")")
val end = readNat() + readIndex
val tag = readByte()
val tpe = if (tag == EMPTYtree) NoType else readTypeRef()
// Set by the three functions to follow. If symbol is non-null
// after the new tree 't' has been created, t has its Symbol
// set to symbol; and it always has its Type set to tpe.
var symbol: Symbol = null
var mods: Modifiers = null
var name: Name = null
/** Read a Symbol, Modifiers, and a Name */
def setSymModsName(): Unit = {
symbol = readSymbolRef()
mods = readModifiersRef(symbol.isType)
name = readNameRef()
}
/** Read a Symbol and a Name */
def setSymName(): Unit = {
symbol = readSymbolRef()
name = readNameRef()
}
/** Read a Symbol */
def setSym(): Unit = {
symbol = readSymbolRef()
}
implicit val pos: Position = NoPosition
tag match {
case EMPTYtree =>
EmptyTree
case PACKAGEtree =>
setSym()
val pid = readTreeRef().asInstanceOf[RefTree]
val stats = until(end, readTreeRef)
PackageDef(pid, stats)
case CLASStree =>
setSymModsName()
val impl = readTemplateRef()
val tparams = until(end, readTypeDefRef)
val cls = symbol.asClass
val ((constr: DefDef) :: Nil, stats) =
impl.body.partition(_.symbol == cls.primaryConstructor)
ClassDef(cls, constr, tparams ++ stats)
case MODULEtree =>
setSymModsName()
ModuleDef(symbol.asTerm, readTemplateRef().body)
case VALDEFtree =>
setSymModsName()
val tpt = readTreeRef()
val rhs = readTreeRef()
ValDef(symbol.asTerm, rhs)
case DEFDEFtree =>
setSymModsName()
val tparams = times(readNat(), readTypeDefRef)
val vparamss = times(readNat(), () => times(readNat(), readValDefRef))
val tpt = readTreeRef()
val rhs = readTreeRef()
DefDef(symbol.asTerm, rhs)
case TYPEDEFtree =>
setSymModsName()
val rhs = readTreeRef()
val tparams = until(end, readTypeDefRef)
TypeDef(symbol.asType)
case LABELtree =>
setSymName()
val rhs = readTreeRef()
val params = until(end, readIdentRef)
val ldef = DefDef(symbol.asTerm, rhs)
def isCaseLabel(sym: Symbol) = sym.name.startsWith(nme.CASEkw)
if (isCaseLabel(symbol)) ldef
else Block(ldef :: Nil, Apply(Ident(symbol.termRef), Nil))
case IMPORTtree =>
setSym()
val expr = readTreeRef()
val selectors = until(end, () => {
val fromName = readNameRef()
val toName = readNameRef()
val from = untpd.Ident(fromName)
val to = untpd.Ident(toName)
if (toName.isEmpty) from else untpd.Pair(from, untpd.Ident(toName))
})
Import(expr, selectors)
case TEMPLATEtree =>
setSym()
val parents = times(readNat(), readTreeRef)
val self = readValDefRef()
val body = until(end, readTreeRef)
untpd.Template(???, parents, self, body) // !!! TODO: pull out primary constructor
.withType(symbol.namedType)
case BLOCKtree =>
val expr = readTreeRef()
val stats = until(end, readTreeRef)
Block(stats, expr)
case CASEtree =>
val pat = readTreeRef()
val guard = readTreeRef()
val body = readTreeRef()
CaseDef(pat, guard, body)
case ALTERNATIVEtree =>
Alternative(until(end, readTreeRef))
case STARtree =>
readTreeRef()
unimplementedTree("STAR")
case BINDtree =>
setSymName()
Bind(symbol.asTerm, readTreeRef())
case UNAPPLYtree =>
val fun = readTreeRef()
val args = until(end, readTreeRef)
UnApply(fun, Nil, args, defn.AnyType) // !!! this is wrong in general
case ARRAYVALUEtree =>
val elemtpt = readTreeRef()
val trees = until(end, readTreeRef)
SeqLiteral(defn.SeqType.appliedTo(elemtpt.tpe :: Nil), trees)
// note can't deal with trees passed to Java methods as arrays here
case FUNCTIONtree =>
setSym()
val body = readTreeRef()
val vparams = until(end, readValDefRef)
val applyType = MethodType(vparams map (_.name), vparams map (_.tpt.tpe), body.tpe)
val applyMeth = ctx.newSymbol(symbol.owner, nme.apply, Method, applyType)
Closure(applyMeth, Function.const(body.changeOwner(symbol, applyMeth)) _)
case ASSIGNtree =>
val lhs = readTreeRef()
val rhs = readTreeRef()
Assign(lhs, rhs)
case IFtree =>
val cond = readTreeRef()
val thenp = readTreeRef()
val elsep = readTreeRef()
If(cond, thenp, elsep)
case MATCHtree =>
val selector = readTreeRef()
val cases = until(end, readCaseDefRef)
Match(selector, cases)
case RETURNtree =>
setSym()
Return(readTreeRef(), Ident(symbol.termRef))
case TREtree =>
val block = readTreeRef()
val finalizer = readTreeRef()
val catches = until(end, readCaseDefRef)
Try(block, catches, finalizer)
case THROWtree =>
Throw(readTreeRef())
case NEWtree =>
New(readTreeRef().tpe)
case TYPEDtree =>
val expr = readTreeRef()
val tpt = readTreeRef()
Typed(expr, tpt)
case TYPEAPPLYtree =>
val fun = readTreeRef()
val args = until(end, readTreeRef)
TypeApply(fun, args)
case APPLYtree =>
val fun = readTreeRef()
val args = until(end, readTreeRef)
/*
if (fun.symbol.isOverloaded) {
fun.setType(fun.symbol.info)
inferMethodAlternative(fun, args map (_.tpe), tpe)
}
*/
Apply(fun, args) // note: can't deal with overloaded syms yet
case APPLYDYNAMICtree =>
setSym()
val qual = readTreeRef()
val args = until(end, readTreeRef)
unimplementedTree("APPLYDYNAMIC")
case SUPERtree =>
setSym()
val qual = readTreeRef()
val mix = readTypeNameRef()
Super(qual, mix, inConstrCall = false) // todo: revise
case THIStree =>
setSym()
val name = readTypeNameRef()
This(symbol.asClass)
case SELECTtree =>
setSym()
val qualifier = readTreeRef()
val selector = readNameRef()
qualifier.select(symbol.namedType)
case IDENTtree =>
setSymName()
Ident(symbol.namedType)
case LITERALtree =>
Literal(readConstantRef())
case TYPEtree =>
TypeTree(tpe)
case ANNOTATEDtree =>
val annot = readTreeRef()
val arg = readTreeRef()
Annotated(annot, arg)
case SINGLETONTYPEtree =>
SingletonTypeTree(readTreeRef())
case SELECTFROMTYPEtree =>
val qualifier = readTreeRef()
val selector = readTypeNameRef()
SelectFromTypeTree(qualifier, symbol.namedType)
case COMPOUNDTYPEtree =>
readTemplateRef()
TypeTree(tpe)
case APPLIEDTYPEtree =>
val tpt = readTreeRef()
val args = until(end, readTreeRef)
AppliedTypeTree(tpt, args)
case TYPEBOUNDStree =>
val lo = readTreeRef()
val hi = readTreeRef()
TypeBoundsTree(lo, hi)
case EXISTENTIALTYPEtree =>
val tpt = readTreeRef()
val whereClauses = until(end, readTreeRef)
TypeTree(tpe)
case _ =>
noSuchTreeTag(tag, end)
}
}
def noSuchTreeTag(tag: Int, end: Int)(implicit ctx: Context) =
errorBadSignature("unknown tree type (" + tag + ")")
def unimplementedTree(what: String)(implicit ctx: Context) =
errorBadSignature(s"cannot read $what trees from Scala 2.x signatures")
def readModifiers(isType: Boolean)(implicit ctx: Context): Modifiers = {
val tag = readNat()
if (tag != MODIFIERS)
errorBadSignature("expected a modifiers tag (" + tag + ")")
val end = readNat() + readIndex
val pflagsHi = readNat()
val pflagsLo = readNat()
val pflags = (pflagsHi.toLong << 32) + pflagsLo
val flags = unpickleScalaFlags(pflags, isType)
val privateWithin = readNameRef().asTypeName
Trees.Modifiers[Type](flags, privateWithin, Nil)
}
protected def readTemplateRef()(implicit ctx: Context): Template =
readTreeRef() match {
case templ: Template => templ
case other =>
errorBadSignature("expected a template (" + other + ")")
}
protected def readCaseDefRef()(implicit ctx: Context): CaseDef =
readTreeRef() match {
case tree: CaseDef => tree
case other =>
errorBadSignature("expected a case def (" + other + ")")
}
protected def readValDefRef()(implicit ctx: Context): ValDef =
readTreeRef() match {
case tree: ValDef => tree
case other =>
errorBadSignature("expected a ValDef (" + other + ")")
}
protected def readIdentRef()(implicit ctx: Context): Ident =
readTreeRef() match {
case tree: Ident => tree
case other =>
errorBadSignature("expected an Ident (" + other + ")")
}
protected def readTypeDefRef()(implicit ctx: Context): TypeDef =
readTreeRef() match {
case tree: TypeDef => tree
case other =>
errorBadSignature("expected an TypeDef (" + other + ")")
}
}
| yusuke2255/dotty | src/dotty/tools/dotc/core/unpickleScala2/Scala2Unpickler.scala | Scala | bsd-3-clause | 45,001 |
abstract class Error {
val c: Cycle[_]
}
object Test {
trait Quux[T] extends Cycle[Quux[T]]
val x = new Quux[Int] { def doStuff(): Unit = { } }
def main(args: Array[String]): Unit = {
}
}
| AlexSikia/dotty | tests/pos/java-interop/t2940/Error.scala | Scala | bsd-3-clause | 200 |
package org.jetbrains.plugins.scala
package javaHighlighting
import com.intellij.ide.highlighter.JavaFileType
import com.intellij.lang.annotation.HighlightSeverity
import com.intellij.pom.java.LanguageLevel
import com.intellij.psi.{PsiDocumentManager, PsiFile}
import org.jetbrains.plugins.scala.annotator.{AnnotatorHolderMock, ScalaAnnotator, _}
import org.jetbrains.plugins.scala.base.{ScalaFixtureTestCase, ScalaLibraryLoader}
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.util.TestUtils
import org.junit.Assert
/**
* Author: Svyatoslav Ilinskiy
* Date: 7/8/15
*/
class JavaHighlightingTest extends ScalaFixtureTestCase {
def testProtected() = {
val scala =
"""
|class MeaningOfLifeSpec {
| val c = new UltimateQuestion {}
| def meaningOfLifeScala() {
| c.meaningOfLife()
| }
|}
""".stripMargin
val java =
"""
|public class UltimateQuestion {
| protected int meaningOfLife() {
| return 42; //Answer to the Ultimate Question of Life, the Universe, and Everything
| }
|}
""".stripMargin
assertNoErrors(messagesFromScalaCode(scala, java))
}
def testValueTypes(): Unit = {
val scala =
"""
|class Order(val limitPrice: Price, val qty: Quantity)
|class Prices(val prices: java.util.List[Price])
|
|class Price(val doubleVal: Double) extends AnyVal
|class Quantity(val doubleVal: Double) extends AnyVal
|class Bar
|class BarWrapper(val s: Bar) extends AnyVal
|class BarWrappers(val bars: java.util.List[BarWrapper])
|
""".stripMargin
val java =
"""
|import java.util.ArrayList;
|
|public class JavaHighlightingValueTypes {
|
| public static void main(String[] args) {
| Order o = new Order(19.0, 10);
| System.out.println("Hello World! " + o.limitPrice());
| Price p = new Price(10);
|
| Prices pr = new Prices(new ArrayList<Price>());
| BarWrappers barWrappers = new BarWrappers(new ArrayList<Bar>());
|
| doublePrice(new Price(10.0));
| doublePrice(42.0);
| }
|
| public static void doublePrice(Price p) {
| System.out.println(p.doubleVal() * 2);
| }
|
|}
""".stripMargin
assertMatches(messagesFromJavaCode(scala, java, javaClassName = "JavaHighlightingValueTypes")) {
case Error("(42.0)", CannotBeApplied()) :: Nil =>
}
}
def testOptionApply(): Unit = {
val java =
"""
|import scala.Option;
|
|public abstract class OptionApply {
|
| public OptionApply() {
| setAction(Option.apply("importVCardFile"));
| }
|
| public abstract void setAction(Option<String> bar);
|}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scalaFileText = "", java, javaClassName = "OptionApply"))
}
def testAccessBacktick(): Unit = {
val scala =
"""
|import scala.beans.BeanProperty
|
|case class TestAccessBacktick(@BeanProperty `type`:String)
""".stripMargin
val java =
"""
|public class TestJavaAAA {
| public static void main(String[] args) {
| TestAccessBacktick t = new TestAccessBacktick("42");
| t.type();
| t.getType();
| t.get$u0060type$u0060();
| }
|}
""".stripMargin
assertMatches(messagesFromJavaCode(scala, java, javaClassName = "TestJavaAAA")) {
case Error("get$u0060type$u0060", CannotResolveMethod()) :: Nil =>
}
}
def testMultipleThrowStatements(): Unit = {
val scala = ""
val java =
"""
|import scala.concurrent.Await;
|import scala.concurrent.Future;
|import scala.concurrent.duration.Duration;
|
|import java.util.concurrent.TimeoutException;
|
|public class ThrowsJava {
| public void bar(Future<Integer> scalaFuture) {
| try {
| Await.ready(scalaFuture, Duration.Inf());
| } catch (InterruptedException e) {
| e.printStackTrace();
| } catch (TimeoutException e) {
| e.printStackTrace();
| }
| }
|}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scala, java, javaClassName = "ThrowsJava"))
}
def testOverrideFinal(): Unit = {
val scala = ""
val java =
"""
|import scala.Function1;
|import scala.concurrent.ExecutionContext;
|
|public abstract class Future<T> implements scala.concurrent.Future<T> {
|
| @Override
| public scala.concurrent.Future<T> withFilter(Function1<T, Object> pred, ExecutionContext executor) {
| return null;
| }
|}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scala, java, "Future"))
}
def testSCL5617Option(): Unit = {
val scala = ""
val java =
"""
|import scala.Function1;
|import scala.Option;
|import scala.runtime.BoxedUnit;
|import java.util.concurrent.atomic.AtomicReference;
|import scala.runtime.AbstractFunction1;
|
|public class SCL5617 {
| public static void main(String[] args) {
| AtomicReference<Function1<Object, BoxedUnit>> f = new AtomicReference<Function1<Object, BoxedUnit>>(new AbstractFunction1<Object, BoxedUnit>() {
| public BoxedUnit apply(Object o) {
| Option<String> option = Option.empty();
| return BoxedUnit.UNIT;
| }
| });
|
| Option<Function1<Object, BoxedUnit>> o = Option.apply(f.get());
| }
|}
|
""".stripMargin
assertNoErrors(messagesFromJavaCode(scala, java, "SCL5617"))
}
def testCaseClassImplement() = {
val scala = "case class CaseClass()"
val java =
"""
|public class CaseClassExtended extends CaseClass {
|
|}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scala, java, javaClassName = "CaseClassExtended"))
}
def testClassParameter(): Unit = {
val scala =
"""
|class ScalaClass (var name: String, var surname: String)
|
|object Start {
| def main(args: Array[String]) {
| val scalaClassObj = new ScalaClass("Dom", "Sien")
| println(scalaClassObj.name)
| println(scalaClassObj.surname)
|
| val javaClassObj = new JavaClass("Dom2", "Sien2", 31)
| println(javaClassObj.name)
| println(javaClassObj.surname)
| println(javaClassObj.getAge)
| }
|}
""".stripMargin
val java =
"""
|public class JavaClass extends ScalaClass {
| private int age;
|
| public JavaClass(String name, String surname, int age) {
| super(name, surname);
| this.age = age;
| }
|
| public int getAge() {
| return age;
| }
|
| public void setAge(int age) {
| this.age = age;
| }
|}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scala, java, "JavaClass"))
assertNoErrors(messagesFromScalaCode(scala, java))
}
def testSCL3390ParamAccessor(): Unit = {
val scalaCode =
"""
|object ScalaClient {
| def main(args: Array[String]) {
| new Sub(1).x
| }
|}
|
|class Super(val x: Int)
|
|class Sub(x: Int) extends Super(x)
""".stripMargin
val javaCode =
"""
|public class JavaClientSCL3390 {
| public static void main(String[] args) {
| new Sub(1).x();
| }
|}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scalaCode, javaCode, "JavaClientSCL3390"))
assertNoErrors(messagesFromScalaCode(scalaCode, javaCode))
}
def testSCL3498ExistentialTypesFromJava(): Unit = {
val javaCode =
"""
|public @interface Transactional {
| Class<? extends Throwable>[] noRollbackFor() default {};
|}
""".stripMargin
val scalaCode =
"""
|@Transactional(noRollbackFor = Array(classOf[RuntimeException])) // expected Array[Class[_ <: Throwable] found Array[Class[RuntimeException]]
|class A
""".stripMargin
assertNoErrors(messagesFromScalaCode(scalaCode, javaCode))
}
def testResolvePublicJavaFieldSameNameAsMethod(): Unit = {
val scalaCode =
"""
|package SCL3679
|
|object ResolvePublicJavaFieldSameNameAsMethod {
| def main(args: Array[String]) {
| println("foo")
| new ResolvePublicJavaFieldSameNameAsMethodJavaClass().hasIsCompressed
| }
|}
""".stripMargin
val javaCode =
"""
|package SCL3679;
|
|public class ResolvePublicJavaFieldSameNameAsMethodJavaClass {
| public boolean hasIsCompressed;
| public boolean hasIsCompressed() {
| System.out.println("In the method!");
| return hasIsCompressed;
| }
|
|}
""".stripMargin
assertNoErrors(messagesFromScalaCode(scalaCode, javaCode))
}
def testGenericsPlainInnerClass(): Unit = {
val scalaCode =
"""
|trait FSM[S, D] {
| final class TransformHelper {}
| final def transform(): TransformHelper = ???
|}
|
|
|abstract class Base[S, D] extends FSM[S, D]
""".stripMargin
val javaCode =
"""
|public class SCL8866A extends Base<String, String> {}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scalaCode, javaCode, javaClassName = "SCL8866A"))
}
def testGenericsParameterizedInnerClass(): Unit = {
val scalaCode =
"""
|abstract class FSM[S, D] {
| class TransformHelper[T]
| def transform(): TransformHelper[Int] = ???
|}
|
|abstract class Base extends FSM[Int, String] {
| override def transform(): TransformHelper[Int] = ???
|}
""".stripMargin
val javaCode =
"""
|public class SCL8866B extends Base {
|
|}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scalaCode, javaCode, "SCL8866B"))
}
def testSpecializedFields(): Unit = {
val scalaCode = "class SpecClass[@specialized(Int) T](val t: T, val s: String)"
val javaCode =
"""
|public class Pair extends SpecClass<Integer> {
| public Pair(SpecClass<Integer> i) {
| super(i.t, "");
| }
|}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scalaCode, javaCode, "Pair"))
}
def testConstructorReturnTypeNull(): Unit = {
val scalaCode =
"""
|class Scala(val s: String) {
| def this(i: Integer) = this(i.toString)
|}
""".stripMargin
val javaCode =
"""
|import java.util.stream.Stream;
|
|public class SCL9412 {
| Stream<Scala> testScala() {
| return Stream.of(1).map(Scala::new);
| }
|}
""".stripMargin
assertNoErrors(messagesFromJavaCode(scalaCode, javaCode, "SCL9412"))
}
def messagesFromJavaCode(scalaFileText: String, javaFileText: String, javaClassName: String): List[Message] = {
myFixture.addFileToProject("dummy.scala", scalaFileText)
val myFile: PsiFile = myFixture.addFileToProject(javaClassName + JavaFileType.DOT_DEFAULT_EXTENSION, javaFileText)
myFixture.openFileInEditor(myFile.getVirtualFile)
val allInfo = myFixture.doHighlighting()
import scala.collection.JavaConverters._
allInfo.asScala.toList.collect {
case highlightInfo if highlightInfo.`type`.getSeverity(null) == HighlightSeverity.ERROR =>
new Error(highlightInfo.getText, highlightInfo.getDescription)
}
}
def messagesFromScalaCode(scalaFileText: String, javaFileText: String): List[Message] = {
myFixture.addFileToProject("dummy.java", javaFileText)
myFixture.configureByText("dummy.scala", scalaFileText)
PsiDocumentManager.getInstance(getProject).commitAllDocuments()
val mock = new AnnotatorHolderMock
val annotator = new ScalaAnnotator
getFile.depthFirst.foreach(annotator.annotate(_, mock))
mock.annotations.filter {
case Error(_, null) | Error(null, _) => false
case Error(_, _) => true
case _ => false
}
}
def assertMatches[T](actual: T)(pattern: PartialFunction[T, Unit]) {
Assert.assertTrue("actual: " + actual.toString, pattern.isDefinedAt(actual))
}
def assertNoErrors(messages: List[Message]): Unit = {
assertMatches(messages) {
case Nil =>
}
}
val CannotResolveMethod = ContainsPattern("Cannot resolve method")
val CannotBeApplied = ContainsPattern("cannot be applied")
case class ContainsPattern(fragment: String) {
def unapply(s: String) = s.contains(fragment)
}
private var scalaLibraryLoader: ScalaLibraryLoader = null
override def setUp() = {
super.setUp()
TestUtils.setLanguageLevel(getProject, LanguageLevel.JDK_1_8)
scalaLibraryLoader = new ScalaLibraryLoader(getProject, myFixture.getModule, null)
scalaLibraryLoader.loadScala(TestUtils.DEFAULT_SCALA_SDK_VERSION)
}
override def tearDown(): Unit = {
scalaLibraryLoader.clean()
super.tearDown()
}
}
| ghik/intellij-scala | test/org/jetbrains/plugins/scala/javaHighlighting/JavaHighlightingTest.scala | Scala | apache-2.0 | 13,947 |
package relationshipextractor
import org.easyrules.core.RulesEngineBuilder
import scala.collection.mutable
/**
* Created by erik on 05/05/16.
*/
object RelationInferrer {
def infer(persons: mutable.Set[Person]) = {
val rulesEngine = RulesEngineBuilder.aNewRulesEngine().withSilentMode(true).build()
val rules = Seq[RelationshipRule](new InferFather())
rules.foreach(r => rulesEngine.registerRule(r))
//do {
rules.foreach(r => r.resetHasFired())
for(person <- persons){
rules.foreach(r => r.input(person))
rulesEngine.fireRules
}
// } while(rules.exists(r => r.hasFired()))
}
}
| ErikGartner/relationship-extractor | src/main/scala/relationshipextractor/RelationInferrer.scala | Scala | apache-2.0 | 649 |
package lila.video
import org.joda.time.DateTime
import reactivemongo.bson._
import reactivemongo.core.commands._
import scala.concurrent.duration._
import lila.common.paginator._
import lila.db.paginator.BSONAdapter
import lila.db.Types.Coll
import lila.memo.AsyncCache
import lila.user.{ User, UserRepo }
private[video] final class VideoApi(
videoColl: Coll,
viewColl: Coll,
filterColl: Coll) {
import lila.db.BSON.BSONJodaDateTimeHandler
import reactivemongo.bson.Macros
private implicit val YoutubeBSONHandler = {
import Youtube.Metadata
Macros.handler[Metadata]
}
private implicit val VideoBSONHandler = Macros.handler[Video]
private implicit val TagNbBSONHandler = Macros.handler[TagNb]
import View.viewBSONHandler
private def videoViews(userOption: Option[User])(videos: Seq[Video]): Fu[Seq[VideoView]] = userOption match {
case None => fuccess {
videos map { VideoView(_, false) }
}
case Some(user) => view.seenVideoIds(user, videos) map { ids =>
videos.map { v =>
VideoView(v, ids contains v.id)
}
}
}
object video {
private val maxPerPage = 18
def find(id: Video.ID): Fu[Option[Video]] =
videoColl.find(BSONDocument("_id" -> id)).one[Video]
def search(user: Option[User], query: String, page: Int): Fu[Paginator[VideoView]] = {
val q = query.split(' ').map { word => s""""$word"""" } mkString " "
val textScore = BSONDocument("score" -> BSONDocument("$meta" -> "textScore"))
Paginator(
adapter = new BSONAdapter[Video](
collection = videoColl,
selector = BSONDocument(
"$text" -> BSONDocument("$search" -> q)
),
projection = textScore,
sort = textScore
) mapFutureList videoViews(user),
currentPage = page,
maxPerPage = maxPerPage)
}
def save(video: Video): Funit =
videoColl.update(
BSONDocument("_id" -> video.id),
BSONDocument("$set" -> video),
upsert = true).void
def removeNotIn(ids: List[Video.ID]) =
videoColl.remove(
BSONDocument("_id" -> BSONDocument("$nin" -> ids))
).void
def setMetadata(id: Video.ID, metadata: Youtube.Metadata) =
videoColl.update(
BSONDocument("_id" -> id),
BSONDocument("$set" -> BSONDocument("metadata" -> metadata)),
upsert = false
).void
def allIds: Fu[List[Video.ID]] =
videoColl.find(
BSONDocument(),
BSONDocument("_id" -> true)
).cursor[BSONDocument].collect[List]() map { doc =>
doc flatMap (_.getAs[String]("_id"))
}
def popular(user: Option[User], page: Int): Fu[Paginator[VideoView]] = Paginator(
adapter = new BSONAdapter[Video](
collection = videoColl,
selector = BSONDocument(),
projection = BSONDocument(),
sort = BSONDocument("metadata.likes" -> -1)
) mapFutureList videoViews(user),
currentPage = page,
maxPerPage = maxPerPage)
def byTags(user: Option[User], tags: List[Tag], page: Int): Fu[Paginator[VideoView]] =
if (tags.isEmpty) popular(user, page)
else Paginator(
adapter = new BSONAdapter[Video](
collection = videoColl,
selector = BSONDocument(
"tags" -> BSONDocument("$all" -> tags)
),
projection = BSONDocument(),
sort = BSONDocument("metadata.likes" -> -1)
) mapFutureList videoViews(user),
currentPage = page,
maxPerPage = maxPerPage)
def byAuthor(user: Option[User], author: String, page: Int): Fu[Paginator[VideoView]] =
Paginator(
adapter = new BSONAdapter[Video](
collection = videoColl,
selector = BSONDocument(
"author" -> author
),
projection = BSONDocument(),
sort = BSONDocument("metadata.likes" -> -1)
) mapFutureList videoViews(user),
currentPage = page,
maxPerPage = maxPerPage)
def similar(user: Option[User], video: Video, max: Int): Fu[Seq[VideoView]] =
videoColl.find(BSONDocument(
"tags" -> BSONDocument("$in" -> video.tags),
"_id" -> BSONDocument("$ne" -> video.id)
)).sort(BSONDocument("metadata.likes" -> -1))
.cursor[Video]
.collect[List]().map { videos =>
videos.sortBy { v => -v.similarity(video) } take max
} flatMap videoViews(user)
object count {
private val cache = AsyncCache.single(
f = videoColl.db command Count(videoColl.name, none),
timeToLive = 1.day)
def clearCache = cache.clear
def apply: Fu[Int] = cache apply true
}
}
object view {
def find(videoId: Video.ID, userId: String): Fu[Option[View]] =
viewColl.find(BSONDocument(
View.BSONFields.id -> View.makeId(videoId, userId)
)).one[View]
def add(a: View) = (viewColl insert a).void recover {
case e: reactivemongo.core.commands.LastError if e.getMessage.contains("duplicate key error") => ()
}
def hasSeen(user: User, video: Video): Fu[Boolean] =
viewColl.db command Count(viewColl.name, BSONDocument(
View.BSONFields.id -> View.makeId(video.id, user.id)
).some) map (0!=)
def seenVideoIds(user: User, videos: Seq[Video]): Fu[Set[Video.ID]] =
viewColl.find(
BSONDocument(
"_id" -> BSONDocument("$in" -> videos.map { v =>
View.makeId(v.id, user.id)
})
),
BSONDocument(View.BSONFields.videoId -> true, "_id" -> false)
).cursor[BSONDocument].collect[List]() map { docs =>
docs.flatMap(_.getAs[String](View.BSONFields.videoId)).toSet
}
}
object tag {
def paths(filterTags: List[Tag]): Fu[List[TagNb]] = pathsCache(filterTags.sorted)
def allPopular: Fu[List[TagNb]] = popularCache(true)
def clearCache = pathsCache.clear >> popularCache.clear
private val max = 25
private val pathsCache = AsyncCache[List[Tag], List[TagNb]](
f = filterTags => {
val allPaths =
if (filterTags.isEmpty) allPopular map { tags =>
tags.filterNot(_.isNumeric)
}
else {
val command = Aggregate(videoColl.name, Seq(
Match(BSONDocument("tags" -> BSONDocument("$all" -> filterTags))),
Project("tags" -> BSONBoolean(true)),
Unwind("tags"),
GroupField("tags")("nb" -> SumValue(1))
))
videoColl.db.command(command) map {
_.toList.flatMap(_.asOpt[TagNb])
}
}
allPopular zip allPaths map {
case (all, paths) =>
val tags = all map { t =>
paths find (_._id == t._id) getOrElse TagNb(t._id, 0)
} filterNot (_.empty) take max
val missing = filterTags filterNot { t =>
tags exists (_.tag == t)
}
val list = tags.take(max - missing.size) ::: missing.flatMap { t =>
all find (_.tag == t)
}
list.sortBy { t =>
if (filterTags contains t.tag) Int.MinValue
else -t.nb
}
}
},
timeToLive = 1.day)
private val popularCache = AsyncCache.single[List[TagNb]](
f = {
val command = Aggregate(videoColl.name, Seq(
Project("tags" -> BSONBoolean(true)),
Unwind("tags"),
GroupField("tags")("nb" -> SumValue(1)),
Sort(Seq(Descending("nb")))
))
videoColl.db.command(command) map {
_.toList.flatMap(_.asOpt[TagNb])
}
},
timeToLive = 1.day)
}
}
| Happy0/lila | modules/video/src/main/VideoApi.scala | Scala | mit | 7,691 |
object Test extends App {
Macros.abort
} | felixmulder/scala | test/files/neg/macro-abort/Test_2.scala | Scala | bsd-3-clause | 42 |
/*
* Copyright 2015 Andreas Mosburger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming._
import org.apache.spark.streaming.dstream.DStream
class Query10 extends Query {
def process(triple_objects: DStream[Seq[String]], static_data: RDD[(String, String)]) : DStream[Seq[String]] = {
val windowed = triple_objects.window(Seconds(1), Seconds(1))
val delayed = windowed.filter(_(1).contains("hasDelay"))
//will only sort inside a micro-batch
val sorted = delayed.map(x => (x(2), x(0))).transform(rdd => rdd.sortByKey(false))
val result = sorted.map(x => Seq(x._2, x._1))
return result
}
}
| mosimos/sr_data_generator | spark/src/main/scala/queries/Query10.scala | Scala | apache-2.0 | 1,217 |
package fr.laas.fape.anml.model.ir
import fr.laas.fape.anml.model.abs.AbstractChronicle
import fr.laas.fape.anml.model.abs.statements.AbstractStatement
abstract class AbstractChronicleGroup {
def firsts : List[AbstractStatement]
def lasts : List[AbstractStatement]
def statements : List[AbstractStatement]
def chronicle : AbstractChronicle
}
| athy/fape | anml-parser/src/main/scala/fr/laas/fape/anml/model/ir/AbstractChronicleGroup.scala | Scala | bsd-2-clause | 354 |
package dx.compiler
import java.nio.file.{Path, Paths}
import dx.api._
import dx.compiler.Main.SuccessIR
import dx.compiler.ParameterMeta.{translateMetaKVs, translateMetaValue => translate}
import dx.core.util.MainUtils.{Failure, UnsuccessfulTermination}
import org.scalatest.Inside._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import wdlTools.eval.WdlValues
import wdlTools.generators.code.WdlV1Generator
import wdlTools.types.{WdlTypes, TypedAbstractSyntax => TAT}
import wdlTools.util.Logger
// These tests involve compilation -without- access to the platform.
//
class GenerateIRTest extends AnyFlatSpec with Matchers {
private val dxApi = DxApi(Logger.Quiet)
private def pathFromBasename(dir: String, basename: String): Path = {
val p = getClass.getResource(s"/${dir}/${basename}").getPath
Paths.get(p)
}
private val dxProject = dxApi.currentProject
// task compilation
private val cFlags = List("--compileMode",
"ir",
"-quiet",
"-fatalValidationWarnings",
"--locked",
"--project",
dxProject.getId)
private val cFlagsUnlocked =
List("--compileMode", "ir", "-quiet", "-fatalValidationWarnings", "--project", dxProject.getId)
val dbgFlags = List("--compileMode",
"ir",
"--verbose",
"--verboseKey",
"GenerateIR",
"--locked",
"--project",
dxProject.id)
private def getParamMeta(task: TAT.Task, iDef: TAT.InputDefinition): Option[TAT.MetaValue] = {
task.parameterMeta match {
case None => None
case Some(TAT.MetaSection(kvs, _)) =>
kvs.get(iDef.name)
}
}
it should "IR compile a single WDL task" in {
val path = pathFromBasename("compiler", "add.wdl")
Main.compile(path.toString :: cFlags) shouldBe a[SuccessIR]
}
it should "IR compile a task with docker" in {
val path = pathFromBasename("compiler", "BroadGenomicsDocker.wdl")
Main.compile(path.toString :: cFlags) shouldBe a[SuccessIR]
}
// workflow compilation
it should "IR compile a linear WDL workflow without expressions" in {
val path = pathFromBasename("compiler", "wf_linear_no_expr.wdl")
Main.compile(path.toString :: cFlags) shouldBe a[SuccessIR]
}
it should "IR compile a linear WDL workflow" in {
val path = pathFromBasename("compiler", "wf_linear.wdl")
Main.compile(
path.toString :: cFlags
) shouldBe a[SuccessIR]
}
it should "IR compile unlocked workflow" in {
val path = pathFromBasename("compiler", "wf_linear.wdl")
Main.compile(
path.toString :: cFlagsUnlocked
) shouldBe a[SuccessIR]
}
it should "IR compile a non trivial linear workflow with variable coercions" in {
val path = pathFromBasename("compiler", "cast.wdl")
Main.compile(path.toString :: cFlags) shouldBe a[SuccessIR]
}
it should "IR compile a workflow with two consecutive calls" in {
val path = pathFromBasename("compiler", "strings.wdl")
Main.compile(path.toString :: cFlags) shouldBe a[SuccessIR]
}
it should "IR compile a workflow with a scatter without a call" in {
val path = pathFromBasename("compiler", "scatter_no_call.wdl")
Main.compile(
path.toString :: cFlags
) shouldBe a[SuccessIR]
}
it should "IR compile optionals" in {
val path = pathFromBasename("compiler", "optionals.wdl")
Main.compile(
path.toString
// :: "--verbose"
// :: "--verboseKey" :: "GenerateIR"
:: cFlags
) shouldBe a[SuccessIR]
}
it should "support imports" in {
val path = pathFromBasename("compiler", "check_imports.wdl")
Main.compile(
path.toString :: cFlags
) shouldBe a[SuccessIR]
}
it should "IR compile a draft2 workflow" in {
val path = pathFromBasename("draft2", "shapes.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
}
it should "expressions in an output block" in {
val path = pathFromBasename("compiler", "expr_output_block.wdl")
Main.compile(
path.toString :: cFlags
) shouldBe a[SuccessIR]
}
/* ignore should "scatters over maps" in {
val path = pathFromBasename("compiler", "dict2.wdl")
Main.compile(
path.toString :: cFlags
) shouldBe a[SuccessIR]
}*/
it should "skip missing optional arguments" in {
val path = pathFromBasename("util", "missing_inputs_to_direct_call.wdl")
Main.compile(
path.toString :: cFlags
) shouldBe a[SuccessIR]
}
it should "handle calling subworkflows" in {
val path = pathFromBasename("subworkflows", "trains.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val irwf = retval match {
case SuccessIR(irwf, _) => irwf
case _ => throw new Exception("unexpected")
}
val primaryWf: IR.Workflow = irwf.primaryCallable match {
case Some(wf: IR.Workflow) => wf
case _ => throw new Exception("unexpected")
}
primaryWf.stages.size shouldBe 2
}
it should "compile a sub-block with several calls" in {
val path = pathFromBasename("compiler", "subblock_several_calls.wdl")
Main.compile(
path.toString :: cFlags
) shouldBe a[SuccessIR]
}
it should "missing workflow inputs" in {
val path = pathFromBasename("input_file", "missing_args.wdl")
Main.compile(
path.toString :: List("--compileMode", "ir", "--quiet", "--project", dxProject.id)
) shouldBe a[SuccessIR]
}
// Nested blocks
it should "compile two level nested workflow" in {
val path = pathFromBasename("nested", "two_levels.wdl")
Main.compile(
path.toString :: cFlags
) shouldBe a[SuccessIR]
}
it should "handle passing closure arguments to nested blocks" in {
val path = pathFromBasename("nested", "param_passing.wdl")
Main.compile(
path.toString :: cFlags
) shouldBe a[SuccessIR]
}
it should "compile a workflow calling a subworkflow as a direct call" in {
val path = pathFromBasename("draft2", "movies.wdl")
val bundle: IR.Bundle = Main.compile(path.toString :: cFlags) match {
case SuccessIR(bundle, _) => bundle
case other =>
Logger.error(other.toString)
throw new Exception(s"Failed to compile ${path}")
}
val wf: IR.Workflow = bundle.primaryCallable match {
case Some(wf: IR.Workflow) =>
wf
case _ => throw new Exception("bad value in bundle")
}
val stage = wf.stages.head
stage.description shouldBe "review"
}
it should "compile a workflow calling a subworkflow as a direct call with 2.0 version" in {
val path = pathFromBasename("v2", "movies.wdl")
val bundle: IR.Bundle = Main.compile(path.toString :: cFlags) match {
case SuccessIR(bundle, _) => bundle
case other =>
Logger.error(other.toString)
throw new Exception(s"Failed to compile ${path}")
}
val wf: IR.Workflow = bundle.primaryCallable match {
case Some(wf: IR.Workflow) =>
wf
case _ => throw new Exception("bad value in bundle")
}
val stage = wf.stages.head
stage.description shouldBe "review"
}
it should "compile a workflow calling a subworkflow with native DNANexus applet as a direct call with 2.0 version" in {
val path = pathFromBasename("v2", "call_dnanexus_applet.wdl")
val bundle: IR.Bundle = Main.compile(path.toString :: cFlags) match {
case SuccessIR(bundle, _) => bundle
case other =>
Logger.error(other.toString)
throw new Exception(s"Failed to compile ${path}")
}
val wf: IR.Workflow = bundle.primaryCallable match {
case Some(wf: IR.Workflow) =>
wf
case _ => throw new Exception("bad value in bundle")
}
wf.stages.size shouldBe 2
wf.stages(0).description shouldBe "native_sum_012"
wf.stages(1).description shouldBe "native_sum_wf"
}
it should "three nesting levels" in {
val path = pathFromBasename("nested", "three_levels.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val primary: IR.Callable = bundle.primaryCallable.get
val wf = primary match {
case wf: IR.Workflow => wf
case _ => throw new Exception("unexpected")
}
wf.stages.size shouldBe 1
val level2 = bundle.allCallables(wf.name)
level2 shouldBe a[IR.Workflow]
val wfLevel2 = level2.asInstanceOf[IR.Workflow]
wfLevel2.stages.size shouldBe 1
}
it should "four nesting levels" in {
val path = pathFromBasename("nested", "four_levels.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
/* inside(retval) {
case Main.UnsuccessfulTermination(errMsg) =>
errMsg should include ("nested scatter")
}*/
retval shouldBe a[SuccessIR]
}
private def getAppletByName(name: String, bundle: IR.Bundle): IR.Applet =
bundle.allCallables(name) match {
case a: IR.Applet => a
case _ => throw new Exception(s"${name} is not an applet")
}
private def getTaskByName(
name: String,
bundle: IR.Bundle
): TAT.Task = {
val applet = getAppletByName(name, bundle)
val task: TAT.Task = applet.kind match {
case IR.AppletKindTask(x) => x
case _ => throw new Exception(s"${name} is not a task")
}
task
}
// Check parameter_meta `pattern` keyword
it should "recognize pattern in parameters_meta via CVar for input CVars" in {
val path = pathFromBasename("compiler", "pattern_params.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("pattern_params_cgrep", bundle)
cgrepApplet.inputs.iterator sameElements Vector(
IR.CVar(
"in_file",
WdlTypes.T_File,
None,
Some(
Vector(
IR.IOAttrHelp("The input file to be searched"),
IR.IOAttrPatterns(IR.PatternsReprArray(Vector("*.txt", "*.tsv"))),
IR.IOAttrGroup("Common"),
IR.IOAttrLabel("Input file")
)
)
),
IR.CVar(
"pattern",
WdlTypes.T_String,
None,
Some(
Vector(
IR.IOAttrHelp("The pattern to use to search in_file"),
IR.IOAttrGroup("Common"),
IR.IOAttrLabel("Search pattern")
)
)
)
)
cgrepApplet.outputs shouldBe Vector(
IR.CVar("count", WdlTypes.T_Int, None, None),
IR.CVar(
"out_file",
WdlTypes.T_File,
None,
None
)
)
}
// Check parameter_meta `pattern` keyword
it should "recognize pattern object in parameters_obj_meta via CVar for input CVars" in {
val path = pathFromBasename("compiler", "pattern_obj_params.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("pattern_params_obj_cgrep", bundle)
cgrepApplet.inputs.iterator sameElements Vector(
IR.CVar(
"in_file",
WdlTypes.T_File,
None,
Some(
Vector(
IR.IOAttrHelp("The input file to be searched"),
IR.IOAttrPatterns(
IR.PatternsReprObj(
Some(Vector("*.txt", "*.tsv")),
Some("file"),
Some(Vector("foo", "bar"))
)
),
IR.IOAttrGroup("Common"),
IR.IOAttrLabel("Input file")
)
)
),
IR.CVar(
"pattern",
WdlTypes.T_String,
None,
Some(
Vector(
IR.IOAttrHelp("The pattern to use to search in_file"),
IR.IOAttrGroup("Common"),
IR.IOAttrLabel("Search pattern")
)
)
)
)
cgrepApplet.outputs shouldBe Vector(
IR.CVar("count", WdlTypes.T_Int, None, None),
IR.CVar(
"out_file",
WdlTypes.T_File,
None,
None
)
)
}
// Check parameter_meta pattern: ["array"]
it should "recognize pattern in parameters_meta via WDL" in {
val path = pathFromBasename("compiler", "pattern_params.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepTask = getTaskByName("pattern_params_cgrep", bundle)
val sansText = Map(
"in_file" -> IR.MetaValueObject(
Map(
"help" -> IR.MetaValueString("The input file to be searched"),
"patterns" -> IR.MetaValueArray(
Vector(
IR.MetaValueString("*.txt"),
IR.MetaValueString("*.tsv")
)
),
"group" ->
IR.MetaValueString("Common"),
"label" ->
IR.MetaValueString("Input file")
)
),
"pattern" -> IR.MetaValueObject(
Map(
"help" ->
IR.MetaValueString("The pattern to use to search in_file"),
"group" ->
IR.MetaValueString("Common"),
"label" ->
IR.MetaValueString("Search pattern")
)
),
"out_file" -> IR.MetaValueObject(
Map(
"patterns" -> IR.MetaValueArray(
Vector(
IR.MetaValueString("*.txt"),
IR.MetaValueString("*.tsv")
)
),
"group" -> IR.MetaValueString("Common"),
"label" -> IR.MetaValueString("Output file")
)
)
)
inside(cgrepTask.parameterMeta) {
case Some(TAT.MetaSection(kvs, _)) =>
translateMetaKVs(kvs) shouldBe sansText
}
val iDef = cgrepTask.inputs.find(_.name == "in_file").get
val sansText2 =
IR.MetaValueObject(
Map(
"group" ->
IR.MetaValueString("Common"),
"help" ->
IR.MetaValueString("The input file to be searched"),
"patterns" -> IR.MetaValueArray(
Vector(
IR.MetaValueString("*.txt"),
IR.MetaValueString("*.tsv")
)
),
"label" ->
IR.MetaValueString("Input file")
)
)
inside(getParamMeta(cgrepTask, iDef)) {
case Some(metaValue) =>
translate(metaValue) shouldBe sansText2
}
}
// Check parameter_meta pattern: {"object"}
it should "recognize pattern object in parameters_meta via WDL" in {
val path = pathFromBasename("compiler", "pattern_obj_params.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepTask = getTaskByName("pattern_params_obj_cgrep", bundle)
val sansText =
Map(
"in_file" -> IR.MetaValueObject(
Map(
"help" ->
IR.MetaValueString("The input file to be searched"),
"patterns" -> IR.MetaValueObject(
Map(
"class" -> IR.MetaValueString("file"),
"tag" -> IR.MetaValueArray(
Vector(IR.MetaValueString("foo"), IR.MetaValueString("bar"))
),
"name" -> IR.MetaValueArray(
Vector(IR.MetaValueString("*.txt"), IR.MetaValueString("*.tsv"))
)
)
),
"group" ->
IR.MetaValueString("Common"),
"label" ->
IR.MetaValueString("Input file")
)
),
"pattern" -> IR.MetaValueObject(
Map(
"help" ->
IR.MetaValueString("The pattern to use to search in_file"),
"group" ->
IR.MetaValueString("Common"),
"label" ->
IR.MetaValueString("Search pattern")
)
),
"out_file" -> IR.MetaValueObject(
Map(
"patterns" -> IR.MetaValueArray(
Vector(
IR.MetaValueString("*.txt"),
IR.MetaValueString("*.tsv")
)
),
"group" -> IR.MetaValueString("Common"),
"label" -> IR.MetaValueString("Output file")
)
)
)
inside(cgrepTask.parameterMeta) {
case Some(TAT.MetaSection(kvs, _)) =>
translateMetaKVs(kvs) shouldBe sansText
}
val iDef = cgrepTask.inputs.find(_.name == "in_file").get
val sansText2 =
IR.MetaValueObject(
Map(
"group" ->
IR.MetaValueString("Common"),
"help" ->
IR.MetaValueString("The input file to be searched"),
"patterns" -> IR.MetaValueObject(
Map(
"class" -> IR.MetaValueString("file"),
"tag" -> IR.MetaValueArray(
Vector(IR.MetaValueString("foo"), IR.MetaValueString("bar"))
),
"name" -> IR.MetaValueArray(
Vector(IR.MetaValueString("*.txt"), IR.MetaValueString("*.tsv"))
)
)
),
"label" ->
IR.MetaValueString("Input file")
)
)
inside(getParamMeta(cgrepTask, iDef)) {
case Some(metaValue) =>
translate(metaValue) shouldBe sansText2
}
}
// Check parameter_meta `choices` keyword
it should "recognize choices in parameters_meta via CVar for input CVars" in {
val path = pathFromBasename("compiler", "choice_values.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("choice_values_cgrep", bundle)
cgrepApplet.inputs.iterator sameElements Vector(
IR.CVar(
"in_file",
WdlTypes.T_File,
None,
Some(
Vector(
IR.IOAttrChoices(
Vector(
IR.ChoiceReprFile(
name = None,
value = "dx://file-Fg5PgBQ0ffP7B8bg3xqB115G"
),
IR.ChoiceReprFile(
name = None,
value = "dx://file-Fg5PgBj0ffPP0Jjv3zfv0yxq"
)
)
)
)
)
),
IR.CVar(
"pattern",
WdlTypes.T_String,
None,
Some(
Vector(
IR.IOAttrChoices(
Vector(
IR.ChoiceReprString(value = "A"),
IR.ChoiceReprString(value = "B")
)
)
)
)
)
)
}
// Check parameter_meta `choices` keyword with annotated values
it should "recognize annotated choices in parameters_meta via CVar for input CVars" in {
val path = pathFromBasename("compiler", "choice_obj_values.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("choice_values_cgrep", bundle)
cgrepApplet.inputs.iterator sameElements Vector(
IR.CVar(
"in_file",
WdlTypes.T_File,
None,
Some(
Vector(
IR.IOAttrChoices(
Vector(
IR.ChoiceReprFile(
name = Some("file1"),
value = "dx://file-Fg5PgBQ0ffP7B8bg3xqB115G"
),
IR.ChoiceReprFile(
name = Some("file2"),
value = "dx://file-Fg5PgBj0ffPP0Jjv3zfv0yxq"
)
)
)
)
)
),
IR.CVar(
"pattern",
WdlTypes.T_String,
None,
Some(
Vector(
IR.IOAttrChoices(
Vector(
IR.ChoiceReprString(value = "A"),
IR.ChoiceReprString(value = "B")
)
)
)
)
)
)
}
// Check parameter_meta `suggestion` keyword fails when there is a type mismatch
it should "throw exception when choice types don't match parameter types" in {
val path = pathFromBasename("compiler", "choices_type_mismatch.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[UnsuccessfulTermination]
// TODO: make assertion about exception message
}
// Check parameter_meta `suggestions` keyword
it should "recognize suggestions in parameters_meta via CVar for input CVars" in {
val path = pathFromBasename("compiler", "suggestion_values.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("suggestion_values_cgrep", bundle)
cgrepApplet.inputs.iterator sameElements Vector(
IR.CVar(
"in_file",
WdlTypes.T_File,
None,
Some(
Vector(
IR.IOAttrSuggestions(
Vector(
IR.SuggestionReprFile(
name = None,
value = Some("dx://file-Fg5PgBQ0ffP7B8bg3xqB115G"),
project = None,
path = None
),
IR.SuggestionReprFile(
name = None,
value = Some("dx://file-Fg5PgBj0ffPP0Jjv3zfv0yxq"),
project = None,
path = None
)
)
)
)
)
),
IR.CVar(
"pattern",
WdlTypes.T_String,
None,
Some(
Vector(
IR.IOAttrSuggestions(
Vector(
IR.SuggestionReprString(value = "A"),
IR.SuggestionReprString(value = "B")
)
)
)
)
)
)
}
// Check parameter_meta `suggestions` keyword with annotated values
it should "recognize annotated suggestions in parameters_meta via CVar for input CVars" in {
val path = pathFromBasename("compiler", "suggestion_obj_values.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("suggestion_values_cgrep", bundle)
cgrepApplet.inputs.iterator sameElements Vector(
IR.CVar(
"in_file",
WdlTypes.T_File,
None,
Some(
Vector(
IR.IOAttrSuggestions(
Vector(
IR.SuggestionReprFile(
name = Some("file1"),
value = Some("dx://file-Fg5PgBQ0ffP7B8bg3xqB115G"),
project = None,
path = None
),
IR.SuggestionReprFile(
name = Some("file2"),
value = None,
project = Some("project-FGpfqjQ0ffPF1Q106JYP2j3v"),
path = Some("/test_data/f2.txt.gz")
)
)
)
)
)
),
IR.CVar(
"pattern",
WdlTypes.T_String,
None,
Some(
Vector(
IR.IOAttrSuggestions(
Vector(
IR.SuggestionReprString(value = "A"),
IR.SuggestionReprString(value = "B")
)
)
)
)
)
)
}
// Check parameter_meta `suggestions` keyword fails when there is a parameter mismatch
it should "throw exception when suggestion types don't match parameter types" in {
val path = pathFromBasename("compiler", "suggestions_type_mismatch.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[UnsuccessfulTermination]
// TODO: make assertion about exception message
}
// Check parameter_meta `suggestions` keyword fails when there is a missing keyword
it should "throw exception when file suggestion is missing a keyword" in {
val path = pathFromBasename("compiler", "suggestions_missing_arg.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[UnsuccessfulTermination]
// TODO: make assertion about exception message
}
// Check parameter_meta `dx_type` keyword
it should "recognize dx_type in parameters_meta via CVar for input CVars" in {
val path = pathFromBasename("compiler", "add_dx_type.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("add_dx_type", bundle)
cgrepApplet.inputs shouldBe Vector(
IR.CVar(
"a",
WdlTypes.T_File,
None,
Some(
Vector(
IR.IOAttrType(IR.ConstraintReprString("fastq"))
)
)
),
IR.CVar(
"b",
WdlTypes.T_File,
None,
Some(
Vector(
IR.IOAttrType(
IR.ConstraintReprOper(
ConstraintOper.AND,
Vector(
IR.ConstraintReprString("fastq"),
IR.ConstraintReprOper(
ConstraintOper.OR,
Vector(
IR.ConstraintReprString("Read1"),
IR.ConstraintReprString("Read2")
)
)
)
)
)
)
)
)
)
}
// Check parameter_meta `dx_type` keyword fails when specified for a non-file parameter
it should "throw exception when dx_type is used on non-file parameter" in {
val path = pathFromBasename("compiler", "dx_type_nonfile.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[UnsuccessfulTermination]
// TODO: make assertion about exception message
}
// Check parameter_meta `default` keyword
it should "recognize default in parameters_meta via CVar for input CVars" in {
val path = pathFromBasename("compiler", "add_default.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("add_default", bundle)
cgrepApplet.inputs shouldBe Vector(
IR.CVar(
"a",
WdlTypes.T_Int,
Some(WdlValues.V_Int(1)),
None
),
IR.CVar(
"b",
WdlTypes.T_Optional(WdlTypes.T_Int),
None,
Some(Vector(IR.IOAttrDefault(IR.DefaultReprInteger(2))))
)
)
}
// Check parameter_meta `default` keyword fails when there is a type mismatch
it should "throw exception when default types don't match parameter types" in {
val path = pathFromBasename("compiler", "default_type_mismatch.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[UnsuccessfulTermination]
// TODO: make assertion about exception message
}
it should "recognize help, group, and label in parameters_meta via WDL" in {
val path = pathFromBasename("compiler", "help_input_params.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepTask = getTaskByName("help_input_params_cgrep", bundle)
val sansText =
Map(
"in_file" -> IR.MetaValueObject(
Map(
"help" ->
IR.MetaValueString("The input file to be searched"),
"group" -> IR.MetaValueString("Common"),
"label" -> IR.MetaValueString("Input file")
)
),
"pattern" -> IR.MetaValueObject(
Map(
"description" ->
IR.MetaValueString("The pattern to use to search in_file"),
"group" -> IR.MetaValueString("Common"),
"label" -> IR.MetaValueString("Search pattern")
)
),
"s" -> IR.MetaValueString("This is help for s")
)
inside(cgrepTask.parameterMeta) {
case Some(TAT.MetaSection(kvs, _)) =>
translateMetaKVs(kvs) shouldBe sansText
}
val sansText2 =
IR.MetaValueObject(
Map(
"help" ->
IR.MetaValueString("The input file to be searched"),
"group" -> IR.MetaValueString("Common"),
"label" -> IR.MetaValueString("Input file")
)
)
val iDef = cgrepTask.inputs.find(_.name == "in_file").get
inside(getParamMeta(cgrepTask, iDef)) {
case Some(metaValue) =>
translate(metaValue) shouldBe sansText2
}
val diffTask = getTaskByName("help_input_params_diff", bundle)
inside(diffTask.parameterMeta) {
case Some(TAT.MetaSection(kvs, _)) =>
translateMetaKVs(kvs) shouldBe Map(
"a" -> IR.MetaValueObject(
Map(
"help" -> IR.MetaValueString("lefthand file"),
"group" -> IR.MetaValueString("Files"),
"label" -> IR.MetaValueString("File A")
)
),
"b" -> IR.MetaValueObject(
Map(
"help" -> IR.MetaValueString("righthand file"),
"group" -> IR.MetaValueString("Files"),
"label" -> IR.MetaValueString("File B")
)
)
)
}
}
it should "recognize help in parameters_meta via CVar for input CVars" in {
val path = pathFromBasename("compiler", "help_input_params.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("help_input_params_cgrep", bundle)
cgrepApplet.inputs.iterator sameElements Vector(
IR.CVar(
"s",
WdlTypes.T_String,
None,
Some(
Vector(
IR.IOAttrHelp("This is help for s")
)
)
),
IR.CVar(
"in_file",
WdlTypes.T_File,
None,
Some(
Vector(
IR.IOAttrHelp("The input file to be searched"),
IR.IOAttrGroup("Common"),
IR.IOAttrLabel("Input file")
)
)
),
IR.CVar(
"pattern",
WdlTypes.T_String,
None,
Some(
Vector(
IR.IOAttrHelp("The pattern to use to search in_file"),
IR.IOAttrGroup("Common"),
IR.IOAttrLabel("Search pattern")
)
)
)
)
}
// This is actually more of a test to confirm that symbols that are not input
// variables are ignored. WDL doesn't include a paramMeta member for the output
// var class anyways, so it's basically impossible for this to happen
it should "ignore help in parameters_meta via CVar for output CVars" in {
val path = pathFromBasename("compiler", "help_output_params.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("help_output_params_cgrep", bundle)
cgrepApplet.outputs shouldBe Vector(
IR.CVar(
"count",
WdlTypes.T_Int,
None,
None
)
)
}
it should "recognize app metadata" in {
val path = pathFromBasename("compiler", "add_app_meta.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("add", bundle)
cgrepApplet.meta.iterator sameElements
Vector(
IR.TaskAttrDeveloperNotes("Check out my sick bash expression! Three dolla signs!!!"),
IR.TaskAttrDescription(
"Adds two int together. This app adds together two integers and returns the sum"
),
IR.TaskAttrTags(Vector("add", "ints")),
IR.TaskAttrOpenSource(true),
IR.TaskAttrVersion("1.0"),
IR.TaskAttrProperties(Map("foo" -> "bar")),
IR.TaskAttrCategories(Vector("Assembly")),
IR.TaskAttrDetails(
Map(
"contactEmail" -> IR.MetaValueString("joe@dev.com"),
"upstreamVersion" -> IR.MetaValueString("1.0"),
"upstreamAuthor" -> IR.MetaValueString("Joe Developer"),
"upstreamUrl" -> IR.MetaValueString("https://dev.com/joe"),
"upstreamLicenses" -> IR.MetaValueArray(
Vector(
IR.MetaValueString("MIT")
)
),
"whatsNew" -> IR.MetaValueArray(
Vector(
IR.MetaValueObject(
Map(
"version" -> IR.MetaValueString("1.1"),
"changes" -> IR.MetaValueArray(
Vector(
IR.MetaValueString("Added parameter --foo"),
IR.MetaValueString("Added cowsay easter-egg")
)
)
)
),
IR.MetaValueObject(
Map(
"version" -> IR.MetaValueString("1.0"),
"changes" -> IR.MetaValueArray(
Vector(
IR.MetaValueString("Initial version")
)
)
)
)
)
)
)
),
IR.TaskAttrTitle("Add Ints"),
IR.TaskAttrTypes(Vector("Adder"))
)
}
it should "recognize runtime hints" in {
val path = pathFromBasename("compiler", "add_runtime_hints.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepApplet = getAppletByName("add_runtime_hints", bundle)
cgrepApplet.runtimeHints shouldBe Some(
Vector(
IR.RuntimeHintIgnoreReuse(true),
IR.RuntimeHintRestart(
max = Some(5),
default = Some(1),
errors = Some(Map("UnresponsiveWorker" -> 2, "ExecutionError" -> 2))
),
IR.RuntimeHintTimeout(hours = Some(12), minutes = Some(30)),
IR.RuntimeHintAccess(network = Some(Vector("*")), developer = Some(true))
)
)
}
it should "ignore dx_instance_type when evaluating runtime hints" in {
val path = pathFromBasename("compiler", "instance_type_test.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
}
it should "handle streaming files" in {
val path = pathFromBasename("compiler", "streaming_files.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepTask = getTaskByName("cgrep", bundle)
inside(cgrepTask.parameterMeta) {
case Some(TAT.MetaSection(kvs, _)) =>
translateMetaKVs(kvs) shouldBe Map("in_file" -> IR.MetaValueString("stream"))
}
val iDef = cgrepTask.inputs.find(_.name == "in_file").get
inside(getParamMeta(cgrepTask, iDef)) {
case Some(metaValue) =>
translate(metaValue) shouldBe IR.MetaValueString("stream")
}
val diffTask = getTaskByName("diff", bundle)
inside(diffTask.parameterMeta) {
case Some(TAT.MetaSection(kvs, _)) =>
translateMetaKVs(kvs) shouldBe Map(
"a" -> IR.MetaValueString("stream"),
"b" -> IR.MetaValueString("stream")
)
}
}
it should "recognize the streaming object annotation" in {
val path = pathFromBasename("compiler", "streaming_files_obj.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val cgrepTask = getTaskByName("cgrep", bundle)
inside(cgrepTask.parameterMeta) {
case Some(TAT.MetaSection(kvs, _)) =>
translateMetaKVs(kvs) shouldBe Map(
"in_file" -> IR.MetaValueObject(
Map("stream" -> IR.MetaValueBoolean(true))
)
)
}
val iDef = cgrepTask.inputs.find(_.name == "in_file").get
inside(getParamMeta(cgrepTask, iDef)) {
case Some(metaValue) =>
translate(metaValue) shouldBe IR.MetaValueObject(
Map("stream" -> IR.MetaValueBoolean(true))
)
}
val diffTask = getTaskByName("diff", bundle)
inside(diffTask.parameterMeta) {
case Some(TAT.MetaSection(kvs, _)) =>
translateMetaKVs(kvs) shouldBe Map(
"a" -> IR.MetaValueObject(
Map("stream" -> IR.MetaValueBoolean(true))
),
"b" -> IR.MetaValueObject(
Map("stream" -> IR.MetaValueBoolean(true))
)
)
}
}
it should "recognize the streaming annotation for wdl draft2" in {
val path = pathFromBasename("draft2", "streaming.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val diffTask = getTaskByName("diff", bundle)
inside(diffTask.parameterMeta) {
case Some(TAT.MetaSection(kvs, _)) =>
translateMetaKVs(kvs) shouldBe Map("a" -> IR.MetaValueString("stream"),
"b" -> IR.MetaValueString("stream"))
}
}
it should "handle an empty workflow" in {
val path = pathFromBasename("util", "empty_workflow.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
}
it should "handle structs" in {
val path = pathFromBasename("struct", "Person.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
}
it should "recognize that an argument with a default can be omitted at the call site" in {
val path = pathFromBasename("compiler", "call_level2.wdl")
val retval = Main.compile(path.toString :: cFlags)
retval shouldBe a[SuccessIR]
}
it should "check for reserved symbols" in {
val path = pathFromBasename("compiler", "reserved.wdl")
val retval = Main.compile(path.toString :: cFlags)
inside(retval) {
case Failure(_, Some(e)) =>
e.getMessage should include("reserved substring ___")
}
}
it should "do nested scatters" in {
val path = pathFromBasename("compiler", "nested_scatter.wdl")
val retval = Main.compile(path.toString :: cFlags)
retval shouldBe a[SuccessIR]
}
it should "handle struct imported several times" in {
val path = pathFromBasename("struct/struct_imported_twice", "file3.wdl")
val retval = Main.compile(path.toString :: cFlags)
retval shouldBe a[SuccessIR]
}
it should "handle file constants in a workflow" in {
val path = pathFromBasename("compiler", "wf_constants.wdl")
val retval = Main.compile(path.toString :: cFlags)
retval shouldBe a[SuccessIR]
}
it should "respect import flag" in {
val path = pathFromBasename("compiler/imports", "A.wdl")
val libraryPath = path.getParent.resolve("lib")
val retval = Main.compile(path.toString :: "--imports" :: libraryPath.toString :: cFlags)
retval shouldBe a[SuccessIR]
}
it should "respect import -p flag" in {
val path = pathFromBasename("compiler/imports", "A.wdl")
val libraryPath = path.getParent.resolve("lib")
val retval = Main.compile(path.toString :: "--p" :: libraryPath.toString :: cFlags)
retval shouldBe a[SuccessIR]
}
it should "pass environment between deep stages" in {
val path = pathFromBasename("compiler", "environment_passing_deep_nesting.wdl")
val retval = Main.compile(path.toString :: cFlags)
retval shouldBe a[SuccessIR]
}
it should "handle multiple struct definitions" in {
val path = pathFromBasename("struct/DEVEX-1196-struct-resolution-wrong-order", "file3.wdl")
val retval = Main.compile(path.toString :: cFlags)
retval shouldBe a[SuccessIR]
}
it should "retain all characters in a WDL task" in {
val path = pathFromBasename("bugs", "missing_chars_in_task.wdl")
val retval = Main.compile(
path.toString
// :: "--verbose"
// :: "--verboseKey" :: "GenerateIR"
:: cFlags
)
retval shouldBe a[SuccessIR]
val commandSection =
"""| command <<<
| echo 1 hello world | sed 's/world/wdl/'
| echo 2 hello \
| world \
| | sed 's/world/wdl/'
| echo 3 hello \
| world | \
| sed 's/world/wdl/'
| >>>
|""".stripMargin
inside(retval) {
case SuccessIR(bundle, _) =>
bundle.allCallables.size shouldBe 1
val (_, callable) = bundle.allCallables.head
callable shouldBe a[IR.Applet]
val task = callable.asInstanceOf[IR.Applet]
val generator = WdlV1Generator()
val taskSource = generator.generateDocument(task.document).mkString("\n")
taskSource should include(commandSection)
}
}
it should "correctly flatten a workflow with imports" in {
val path = pathFromBasename("compiler", "wf_to_flatten.wdl")
val retval = Main.compile(path.toString :: cFlags)
retval shouldBe a[SuccessIR]
}
it should "detect a request for GPU" in {
val path = pathFromBasename("compiler", "GPU.wdl")
val retval = Main.compile(
path.toString
// :: "--verbose"
// :: "--verboseKey" :: "GenerateIR"
:: cFlags
)
retval shouldBe a[SuccessIR]
inside(retval) {
case SuccessIR(bundle, _) =>
bundle.allCallables.size shouldBe 1
val (_, callable) = bundle.allCallables.head
callable shouldBe a[IR.Applet]
val task = callable.asInstanceOf[IR.Applet]
task.instanceType shouldBe IR.InstanceTypeConst(Some("mem3_ssd1_gpu_x8"),
None,
None,
None,
None)
}
}
it should "compile a scatter with a sub-workflow that has an optional argument" in {
val path = pathFromBasename("compiler", "scatter_subworkflow_with_optional.wdl")
val retval = Main.compile(
path.toString
// :: "--verbose"
// :: "--verboseKey" :: "GenerateIR"
:: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(bundle, _) => bundle
case _ => throw new Exception("unexpected")
}
val wfs: Vector[IR.Workflow] = bundle.allCallables.flatMap {
case (_, wf: IR.Workflow) if wf.locked && wf.level == IR.Level.Sub => Some(wf)
case (_, _) => None
}.toVector
wfs.length shouldBe 1
val wf = wfs.head
val samtools = wf.inputs.find { case (cVar, _) => cVar.name == "samtools_memory" }
inside(samtools) {
/*case Some((cVar, _)) =>
cVar.wdlType shouldBe (WdlTypes.T_Optional(WdlTypes.T_String))*/
case None => ()
}
}
it should "compile a workflow taking arguments from a Pair" in {
val path = pathFromBasename("draft2", "pair.wdl")
val retval = Main.compile(
path.toString
// :: "--verbose"
// :: "--verboseKey" :: "GenerateIR"
:: cFlags
)
retval shouldBe a[SuccessIR]
}
it should "pass as subworkflows do not have expression statement in output block" in {
val path = pathFromBasename("subworkflows", basename = "trains.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
}
// this is currently failing.
it should "pass with subworkflows having expression" in {
val path = pathFromBasename("subworkflows", basename = "ensure_trains.wdl")
/* ensure_trains workflow
* trains workflow
* check_route workflow
* concat task
*/
val retval = Main.compile(
path.toString
// :: "--verbose"
// :: "--verboseKey" :: "GenerateIR"
:: cFlags
)
retval shouldBe a[SuccessIR]
}
ignore should "recognize workflow metadata" in {
val path = pathFromBasename("compiler", "wf_meta.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val workflow = bundle.primaryCallable match {
case Some(wf: IR.Workflow) => wf
case _ => throw new Exception("primaryCallable is not a workflow")
}
workflow.meta shouldBe Some(
Vector(
IR.WorkflowAttrDescription("This is a workflow that defines some metadata"),
IR.WorkflowAttrTags(Vector("foo", "bar")),
IR.WorkflowAttrVersion("1.0"),
IR.WorkflowAttrProperties(Map("foo" -> "bar")),
IR.WorkflowAttrDetails(
Map("whatsNew" -> IR.MetaValueString("v1.0: First release"))
),
IR.WorkflowAttrTitle("Workflow with metadata"),
IR.WorkflowAttrTypes(Vector("calculator")),
IR.WorkflowAttrSummary("A workflow that defines some metadata")
)
)
}
ignore should "recognize workflow parameter metadata" in {
val path = pathFromBasename("compiler", "wf_param_meta.wdl")
val retval = Main.compile(
path.toString :: cFlags
)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val workflow = bundle.primaryCallable match {
case Some(wf: IR.Workflow) => wf
case _ => throw new Exception("primaryCallable is not a workflow")
}
val input_cvars: Vector[IR.CVar] = workflow.inputs.map {
case (c: IR.CVar, _) => c
case _ => throw new Exception("Invalid workflow input ${other}")
}
input_cvars.sortWith(_.name < _.name) shouldBe Vector(
IR.CVar(
"x",
WdlTypes.T_Int,
Some(WdlValues.V_Int(3)),
Some(
Vector(
IR.IOAttrLabel("Left-hand side"),
IR.IOAttrDefault(IR.DefaultReprInteger(3))
)
)
),
IR.CVar(
"y",
WdlTypes.T_Int,
Some(WdlValues.V_Int(5)),
Some(
Vector(
IR.IOAttrLabel("Right-hand side"),
IR.IOAttrDefault(IR.DefaultReprInteger(5))
)
)
)
)
}
ignore should "handle adjunct files in workflows and tasks" in {
val path = pathFromBasename("compiler", "wf_readme.wdl")
val retval = Main.compile(path.toString :: cFlags)
retval shouldBe a[SuccessIR]
val bundle = retval match {
case SuccessIR(ir, _) => ir
case _ => throw new Exception("unexpected")
}
val workflow = bundle.primaryCallable match {
case Some(wf: IR.Workflow) => wf
case _ => throw new Exception("primaryCallable is not a workflow")
}
workflow.meta match {
case Some(array) =>
array.size shouldBe 1
array.foreach({
case IR.WorkflowAttrDescription(desc) =>
desc shouldBe "This is the readme for the wf_linear workflow."
case other => throw new Exception(s"Unexpected workflow meta ${other}")
})
case _ => throw new Exception("Expected workflow meta")
}
val addApp = getAppletByName("add", bundle)
addApp.meta match {
case Some(v: Vector[IR.TaskAttr]) =>
v.size shouldBe 2
v.foreach {
case IR.TaskAttrDescription(text) =>
text shouldBe "This is the readme for the wf_linear add task."
case IR.TaskAttrDeveloperNotes(text) =>
text shouldBe "Developer notes defined in WDL"
case other => throw new Exception(s"Invalid TaskAttr for add task ${other}")
}
case _ => throw new Exception("meta is None or is not a Vector of TaskAttr for add task")
}
val mulApp = getAppletByName("mul", bundle)
mulApp.meta match {
case Some(v: Vector[IR.TaskAttr]) =>
v.size shouldBe 1
v.foreach {
case IR.TaskAttrDescription(text) =>
text shouldBe "Description defined in WDL"
case other => throw new Exception(s"Invalid TaskAttr for mul task ${other}")
}
case _ => throw new Exception("meta is None or is not a Vector of TaskAttr for mul task")
}
val incApp = getAppletByName("inc", bundle)
incApp.meta match {
case Some(v: Vector[IR.TaskAttr]) => v.size shouldBe 0
case None => None
case _ => throw new Exception("meta is not None or empty for inc task")
}
}
it should "work correctly with pairs in a scatter" taggedAs EdgeTest in {
val path = pathFromBasename("subworkflows", basename = "scatter_subworkflow_with_optional.wdl")
val cFlagsNotQuiet = cFlags.filter(_ != "-quiet")
val retval = Main.compile(
path.toString
// :: "--verbose"
// :: "--verboseKey" :: "GenerateIR"
:: cFlagsNotQuiet
)
retval shouldBe a[SuccessIR]
}
}
| dnanexus-rnd/dxWDL | src/test/scala/dx/compiler/GenerateIRTest.scala | Scala | apache-2.0 | 55,545 |
package ucesoft.cbm.peripheral.cia
import ucesoft.cbm.peripheral.keyboard.Keyboard
import ucesoft.cbm.peripheral.controlport.ControlPort
import ucesoft.cbm.peripheral.Connector
object CIA1Connectors {
class PortAConnector(kb:Keyboard,ctrlPort:ControlPort) extends Connector {
val componentID = "CIA1 Port A Connector"
final def read = {
val port = ctrlPort.readPort
kb.readRow & (latch | ~ddr) & port
}
final protected def performWrite(data:Int) = {
val port = ctrlPort.readPort
kb.selectRow((data | ~ddr) & port & 0xFF)
}
}
class PortBConnector(kb:Keyboard,ctrlPort:ControlPort,lightPenTriggerHandler : () => Unit) extends Connector {
val componentID = "CIA1 Port B Connector"
private[this] var lastLPOn = false
final def read = {
val port = ctrlPort.readPort
val reg = kb.readCol & (latch | ~ddr) & port
reg
}
final protected def performWrite(data:Int) : Unit = {
val port = ctrlPort.readPort
val lpPressed = (data & ddr & 0x10) > 0
if (lastLPOn && !lpPressed) lightPenTriggerHandler() // FF -> 00
lastLPOn = lpPressed
kb.selectCol((data | ~ddr) & port & 0xFF)
}
}
} | abbruzze/kernal64 | Kernal64/src/ucesoft/cbm/peripheral/cia/CIA1Connectors.scala | Scala | mit | 1,181 |
package com.metebalci
import org.scalatest._
import org.scalatest.Assertions._
class ConstantLensSpec extends FunSuite {
test("constant lens") {
val foo = (1, 2)
assert( Lenses.constant(4).get(foo) == 4 )
assert( Lenses.constant(4).put(3, foo) == (1, 2) )
}
}
| metebalci/experiment-lenses-scala | src/test/scala/05-ConstantLensSpec.scala | Scala | gpl-2.0 | 288 |
import scala.collection.JavaConverters._
object Test extends dotty.runtime.LegacyApp {
def bench(label: String)(body: => Unit): Long = {
val start = System.nanoTime
0.until(10).foreach(_ => body)
val end = System.nanoTime
//println("%s: %s ms".format(label, (end - start) / 1000.0 / 1000.0))
end - start
}
def benchJava(values: java.util.Collection[Int]) = {
bench("Java Set") {
val set = new java.util.HashSet[Int]
set.addAll(values)
}
}
def benchScala(values: Iterable[Int]) = {
bench("Scala Set") {
val set = new scala.collection.mutable.HashSet[Int]
set ++= values
}
}
def benchScalaSorted(values: Iterable[Int]) = {
bench("Scala Set sorted") {
val set = new scala.collection.mutable.HashSet[Int]
set ++= values.toArray.sorted
}
}
def benchScalaPar(values: Iterable[Int]) = {
bench("Scala ParSet") {
val set = new scala.collection.parallel.mutable.ParHashSet[Int] map { x => x }
set ++= values
}
}
val values = 0 until 50000
val set = scala.collection.mutable.HashSet.empty[Int]
set ++= values
// warmup
for (x <- 0 until 5) {
benchJava(set.asJava)
benchScala(set)
benchScalaPar(set)
benchJava(set.asJava)
benchScala(set)
benchScalaPar(set)
}
val javaset = benchJava(set.asJava)
val scalaset = benchScala(set)
val scalaparset = benchScalaPar(set)
assert(scalaset < (javaset * 8), "scalaset: " + scalaset + " vs. javaset: " + javaset)
assert(scalaparset < (javaset * 8), "scalaparset: " + scalaparset + " vs. javaset: " + javaset)
}
| folone/dotty | tests/run/t5293.scala | Scala | bsd-3-clause | 1,634 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.raft
import kafka.log.{Defaults, UnifiedLog, SegmentDeletion}
import kafka.server.KafkaConfig.{MetadataLogSegmentBytesProp, MetadataLogSegmentMillisProp, MetadataLogSegmentMinBytesProp, NodeIdProp, ProcessRolesProp, QuorumVotersProp}
import kafka.server.{KafkaConfig, KafkaRaftServer}
import kafka.utils.{MockTime, TestUtils}
import org.apache.kafka.common.errors.{InvalidConfigurationException, RecordTooLargeException}
import org.apache.kafka.common.protocol
import org.apache.kafka.common.protocol.{ObjectSerializationCache, Writable}
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, SimpleRecord}
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.raft.internals.BatchBuilder
import org.apache.kafka.raft._
import org.apache.kafka.server.common.serialization.RecordSerde
import org.apache.kafka.snapshot.{RawSnapshotReader, RawSnapshotWriter, SnapshotPath, Snapshots}
import org.apache.kafka.test.TestUtils.assertOptional
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import java.io.File
import java.nio.ByteBuffer
import java.nio.file.{Files, Path}
import java.util
import java.util.{Collections, Optional, Properties}
final class KafkaMetadataLogTest {
import KafkaMetadataLogTest._
var tempDir: File = _
val mockTime = new MockTime()
@BeforeEach
def setUp(): Unit = {
tempDir = TestUtils.tempDir()
}
@AfterEach
def tearDown(): Unit = {
Utils.delete(tempDir)
}
@Test
def testConfig(): Unit = {
val props = new Properties()
props.put(ProcessRolesProp, util.Arrays.asList("broker"))
props.put(QuorumVotersProp, "1@localhost:9092")
props.put(NodeIdProp, Int.box(2))
props.put(MetadataLogSegmentBytesProp, Int.box(10240))
props.put(MetadataLogSegmentMillisProp, Int.box(10 * 1024))
assertThrows(classOf[InvalidConfigurationException], () => {
val kafkaConfig = KafkaConfig.fromProps(props)
val metadataConfig = MetadataLogConfig.apply(kafkaConfig, KafkaRaftClient.MAX_BATCH_SIZE_BYTES, KafkaRaftClient.MAX_FETCH_SIZE_BYTES)
buildMetadataLog(tempDir, mockTime, metadataConfig)
})
props.put(MetadataLogSegmentMinBytesProp, Int.box(10240))
val kafkaConfig = KafkaConfig.fromProps(props)
val metadataConfig = MetadataLogConfig.apply(kafkaConfig, KafkaRaftClient.MAX_BATCH_SIZE_BYTES, KafkaRaftClient.MAX_FETCH_SIZE_BYTES)
buildMetadataLog(tempDir, mockTime, metadataConfig)
}
@Test
def testUnexpectedAppendOffset(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val recordFoo = new SimpleRecord("foo".getBytes())
val currentEpoch = 3
val initialOffset = log.endOffset().offset
log.appendAsLeader(
MemoryRecords.withRecords(initialOffset, CompressionType.NONE, currentEpoch, recordFoo),
currentEpoch
)
// Throw exception for out of order records
assertThrows(
classOf[RuntimeException],
() => {
log.appendAsLeader(
MemoryRecords.withRecords(initialOffset, CompressionType.NONE, currentEpoch, recordFoo),
currentEpoch
)
}
)
assertThrows(
classOf[RuntimeException],
() => {
log.appendAsFollower(
MemoryRecords.withRecords(initialOffset, CompressionType.NONE, currentEpoch, recordFoo)
)
}
)
}
@Test
def testCreateSnapshot(): Unit = {
val numberOfRecords = 10
val epoch = 1
val snapshotId = new OffsetAndEpoch(numberOfRecords, epoch)
val log = buildMetadataLog(tempDir, mockTime)
append(log, numberOfRecords, epoch)
log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords))
TestUtils.resource(log.createNewSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
assertEquals(0, log.readSnapshot(snapshotId).get().sizeInBytes())
}
@Test
def testCreateSnapshotFromEndOffset(): Unit = {
val numberOfRecords = 10
val firstEpoch = 1
val secondEpoch = 3
val log = buildMetadataLog(tempDir, mockTime)
append(log, numberOfRecords, firstEpoch)
append(log, numberOfRecords, secondEpoch)
log.updateHighWatermark(new LogOffsetMetadata(2 * numberOfRecords))
// Test finding the first epoch
log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords, firstEpoch)).get().close()
log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords - 1, firstEpoch)).get().close()
log.createNewSnapshot(new OffsetAndEpoch(1, firstEpoch)).get().close()
// Test finding the second epoch
log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords, secondEpoch)).get().close()
log.createNewSnapshot(new OffsetAndEpoch(2 * numberOfRecords - 1, secondEpoch)).get().close()
log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords + 1, secondEpoch)).get().close()
}
@Test
def testCreateSnapshotLaterThanHighWatermark(): Unit = {
val numberOfRecords = 10
val epoch = 1
val log = buildMetadataLog(tempDir, mockTime)
append(log, numberOfRecords, epoch)
log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords))
assertThrows(
classOf[IllegalArgumentException],
() => log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords + 1, epoch))
)
}
@Test
def testCreateSnapshotMuchLaterEpoch(): Unit = {
val numberOfRecords = 10
val epoch = 1
val log = buildMetadataLog(tempDir, mockTime)
append(log, numberOfRecords, epoch)
log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords))
assertThrows(
classOf[IllegalArgumentException],
() => log.createNewSnapshot(new OffsetAndEpoch(numberOfRecords, epoch + 1))
)
}
@Test
def testCreateSnapshotBeforeLogStartOffset(): Unit = {
val numberOfRecords = 10
val epoch = 1
val snapshotId = new OffsetAndEpoch(numberOfRecords-4, epoch)
val log = buildMetadataLog(tempDir, mockTime)
append(log, numberOfRecords, epoch)
log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords))
TestUtils.resource(log.createNewSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
// Simulate log cleanup that advances the LSO
log.log.maybeIncrementLogStartOffset(snapshotId.offset - 1, SegmentDeletion)
assertEquals(Optional.empty(), log.createNewSnapshot(new OffsetAndEpoch(snapshotId.offset - 2, snapshotId.epoch)))
}
@Test
def testCreateSnapshotDivergingEpoch(): Unit = {
val numberOfRecords = 10
val epoch = 2
val snapshotId = new OffsetAndEpoch(numberOfRecords, epoch)
val log = buildMetadataLog(tempDir, mockTime)
append(log, numberOfRecords, epoch)
log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords))
assertThrows(
classOf[IllegalArgumentException],
() => log.createNewSnapshot(new OffsetAndEpoch(snapshotId.offset, snapshotId.epoch - 1))
)
}
@Test
def testCreateSnapshotOlderEpoch(): Unit = {
val numberOfRecords = 10
val epoch = 2
val snapshotId = new OffsetAndEpoch(numberOfRecords, epoch)
val log = buildMetadataLog(tempDir, mockTime)
append(log, numberOfRecords, epoch)
log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords))
TestUtils.resource(log.createNewSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
assertThrows(
classOf[IllegalArgumentException],
() => log.createNewSnapshot(new OffsetAndEpoch(snapshotId.offset, snapshotId.epoch - 1))
)
}
@Test
def testCreateSnapshotWithMissingEpoch(): Unit = {
val firstBatchRecords = 5
val firstEpoch = 1
val missingEpoch = firstEpoch + 1
val secondBatchRecords = 5
val secondEpoch = missingEpoch + 1
val numberOfRecords = firstBatchRecords + secondBatchRecords
val log = buildMetadataLog(tempDir, mockTime)
append(log, firstBatchRecords, firstEpoch)
append(log, secondBatchRecords, secondEpoch)
log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords))
assertThrows(
classOf[IllegalArgumentException],
() => log.createNewSnapshot(new OffsetAndEpoch(1, missingEpoch))
)
assertThrows(
classOf[IllegalArgumentException],
() => log.createNewSnapshot(new OffsetAndEpoch(firstBatchRecords, missingEpoch))
)
assertThrows(
classOf[IllegalArgumentException],
() => log.createNewSnapshot(new OffsetAndEpoch(secondBatchRecords, missingEpoch))
)
}
@Test
def testCreateExistingSnapshot(): Unit = {
val numberOfRecords = 10
val epoch = 1
val snapshotId = new OffsetAndEpoch(numberOfRecords - 1, epoch)
val log = buildMetadataLog(tempDir, mockTime)
append(log, numberOfRecords, epoch)
log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords))
TestUtils.resource(log.createNewSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
assertEquals(Optional.empty(), log.createNewSnapshot(snapshotId),
"Creating an existing snapshot should not do anything")
}
@Test
def testTopicId(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
assertEquals(KafkaRaftServer.MetadataTopicId, log.topicId())
}
@Test
def testReadMissingSnapshot(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
assertEquals(Optional.empty(), log.readSnapshot(new OffsetAndEpoch(10, 0)))
}
@Test
def testDeleteNonExistentSnapshot(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val offset = 10
val epoch = 0
append(log, offset, epoch)
log.updateHighWatermark(new LogOffsetMetadata(offset))
assertFalse(log.deleteBeforeSnapshot(new OffsetAndEpoch(2L, epoch)))
assertEquals(0, log.startOffset)
assertEquals(epoch, log.lastFetchedEpoch)
assertEquals(offset, log.endOffset().offset)
assertEquals(offset, log.highWatermark.offset)
}
@Test
def testTruncateFullyToLatestSnapshot(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val numberOfRecords = 10
val epoch = 0
val sameEpochSnapshotId = new OffsetAndEpoch(2 * numberOfRecords, epoch)
append(log, numberOfRecords, epoch)
TestUtils.resource(log.storeSnapshot(sameEpochSnapshotId).get()) { snapshot =>
snapshot.freeze()
}
assertTrue(log.truncateToLatestSnapshot())
assertEquals(sameEpochSnapshotId.offset, log.startOffset)
assertEquals(sameEpochSnapshotId.epoch, log.lastFetchedEpoch)
assertEquals(sameEpochSnapshotId.offset, log.endOffset().offset)
assertEquals(sameEpochSnapshotId.offset, log.highWatermark.offset)
val greaterEpochSnapshotId = new OffsetAndEpoch(3 * numberOfRecords, epoch + 1)
append(log, numberOfRecords, epoch)
TestUtils.resource(log.storeSnapshot(greaterEpochSnapshotId).get()) { snapshot =>
snapshot.freeze()
}
assertTrue(log.truncateToLatestSnapshot())
assertEquals(greaterEpochSnapshotId.offset, log.startOffset)
assertEquals(greaterEpochSnapshotId.epoch, log.lastFetchedEpoch)
assertEquals(greaterEpochSnapshotId.offset, log.endOffset().offset)
assertEquals(greaterEpochSnapshotId.offset, log.highWatermark.offset)
}
@Test
def testTruncateWillRemoveOlderSnapshot(): Unit = {
val (logDir, log, config) = buildMetadataLogAndDir(tempDir, mockTime)
val numberOfRecords = 10
val epoch = 1
append(log, 1, epoch - 1)
val oldSnapshotId1 = new OffsetAndEpoch(1, epoch - 1)
TestUtils.resource(log.storeSnapshot(oldSnapshotId1).get()) { snapshot =>
snapshot.freeze()
}
append(log, 1, epoch)
val oldSnapshotId2 = new OffsetAndEpoch(2, epoch)
TestUtils.resource(log.storeSnapshot(oldSnapshotId2).get()) { snapshot =>
snapshot.freeze()
}
append(log, numberOfRecords - 2, epoch)
val oldSnapshotId3 = new OffsetAndEpoch(numberOfRecords, epoch)
TestUtils.resource(log.storeSnapshot(oldSnapshotId3).get()) { snapshot =>
snapshot.freeze()
}
val greaterSnapshotId = new OffsetAndEpoch(3 * numberOfRecords, epoch)
append(log, numberOfRecords, epoch)
TestUtils.resource(log.storeSnapshot(greaterSnapshotId).get()) { snapshot =>
snapshot.freeze()
}
assertNotEquals(log.earliestSnapshotId(), log.latestSnapshotId())
assertTrue(log.truncateToLatestSnapshot())
assertEquals(log.earliestSnapshotId(), log.latestSnapshotId())
log.close()
mockTime.sleep(config.fileDeleteDelayMs)
// Assert that the log dir doesn't contain any older snapshots
Files
.walk(logDir, 1)
.map[Optional[SnapshotPath]](Snapshots.parse)
.filter(_.isPresent)
.forEach { path =>
assertFalse(path.get.snapshotId.offset < log.startOffset)
}
}
@Test
def testDoesntTruncateFully(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val numberOfRecords = 10
val epoch = 1
append(log, numberOfRecords, epoch)
val olderEpochSnapshotId = new OffsetAndEpoch(numberOfRecords, epoch - 1)
TestUtils.resource(log.storeSnapshot(olderEpochSnapshotId).get()) { snapshot =>
snapshot.freeze()
}
assertFalse(log.truncateToLatestSnapshot())
append(log, numberOfRecords, epoch)
val olderOffsetSnapshotId = new OffsetAndEpoch(numberOfRecords, epoch)
TestUtils.resource(log.storeSnapshot(olderOffsetSnapshotId).get()) { snapshot =>
snapshot.freeze()
}
assertFalse(log.truncateToLatestSnapshot())
}
@Test
def testCleanupPartialSnapshots(): Unit = {
val (logDir, log, config) = buildMetadataLogAndDir(tempDir, mockTime)
val numberOfRecords = 10
val epoch = 1
val snapshotId = new OffsetAndEpoch(1, epoch)
append(log, numberOfRecords, epoch)
TestUtils.resource(log.storeSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
log.close()
// Create a few partial snapshots
Snapshots.createTempFile(logDir, new OffsetAndEpoch(0, epoch - 1))
Snapshots.createTempFile(logDir, new OffsetAndEpoch(1, epoch))
Snapshots.createTempFile(logDir, new OffsetAndEpoch(2, epoch + 1))
val secondLog = buildMetadataLog(tempDir, mockTime)
assertEquals(snapshotId, secondLog.latestSnapshotId().get)
assertEquals(0, log.startOffset)
assertEquals(epoch, log.lastFetchedEpoch)
assertEquals(numberOfRecords, log.endOffset().offset)
assertEquals(0, secondLog.highWatermark.offset)
// Assert that the log dir doesn't contain any partial snapshots
Files
.walk(logDir, 1)
.map[Optional[SnapshotPath]](Snapshots.parse)
.filter(_.isPresent)
.forEach { path =>
assertFalse(path.get.partial)
}
}
@Test
def testCleanupOlderSnapshots(): Unit = {
val (logDir, log, config) = buildMetadataLogAndDir(tempDir, mockTime)
val numberOfRecords = 10
val epoch = 1
append(log, 1, epoch - 1)
val oldSnapshotId1 = new OffsetAndEpoch(1, epoch - 1)
TestUtils.resource(log.storeSnapshot(oldSnapshotId1).get()) { snapshot =>
snapshot.freeze()
}
append(log, 1, epoch)
val oldSnapshotId2 = new OffsetAndEpoch(2, epoch)
TestUtils.resource(log.storeSnapshot(oldSnapshotId2).get()) { snapshot =>
snapshot.freeze()
}
append(log, numberOfRecords - 2, epoch)
val oldSnapshotId3 = new OffsetAndEpoch(numberOfRecords, epoch)
TestUtils.resource(log.storeSnapshot(oldSnapshotId3).get()) { snapshot =>
snapshot.freeze()
}
val greaterSnapshotId = new OffsetAndEpoch(3 * numberOfRecords, epoch)
append(log, numberOfRecords, epoch)
TestUtils.resource(log.storeSnapshot(greaterSnapshotId).get()) { snapshot =>
snapshot.freeze()
}
log.close()
val secondLog = buildMetadataLog(tempDir, mockTime)
assertEquals(greaterSnapshotId, secondLog.latestSnapshotId().get)
assertEquals(3 * numberOfRecords, secondLog.startOffset)
assertEquals(epoch, secondLog.lastFetchedEpoch)
mockTime.sleep(config.fileDeleteDelayMs)
// Assert that the log dir doesn't contain any older snapshots
Files
.walk(logDir, 1)
.map[Optional[SnapshotPath]](Snapshots.parse)
.filter(_.isPresent)
.forEach { path =>
assertFalse(path.get.snapshotId.offset < log.startOffset)
}
}
@Test
def testCreateReplicatedLogTruncatesFully(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val numberOfRecords = 10
val epoch = 1
val snapshotId = new OffsetAndEpoch(numberOfRecords + 1, epoch + 1)
append(log, numberOfRecords, epoch)
TestUtils.resource(log.storeSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
log.close()
val secondLog = buildMetadataLog(tempDir, mockTime)
assertEquals(snapshotId, secondLog.latestSnapshotId().get)
assertEquals(snapshotId.offset, secondLog.startOffset)
assertEquals(snapshotId.epoch, secondLog.lastFetchedEpoch)
assertEquals(snapshotId.offset, secondLog.endOffset().offset)
assertEquals(snapshotId.offset, secondLog.highWatermark.offset)
}
@Test
def testMaxBatchSize(): Unit = {
val leaderEpoch = 5
val maxBatchSizeInBytes = 16384
val recordSize = 64
val log = buildMetadataLog(tempDir, mockTime, DefaultMetadataLogConfig.copy(maxBatchSizeInBytes = maxBatchSizeInBytes))
val oversizeBatch = buildFullBatch(leaderEpoch, recordSize, maxBatchSizeInBytes + recordSize)
assertThrows(classOf[RecordTooLargeException], () => {
log.appendAsLeader(oversizeBatch, leaderEpoch)
})
val undersizeBatch = buildFullBatch(leaderEpoch, recordSize, maxBatchSizeInBytes)
val appendInfo = log.appendAsLeader(undersizeBatch, leaderEpoch)
assertEquals(0L, appendInfo.firstOffset)
}
@Test
def testTruncateBelowHighWatermark(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val numRecords = 10
val epoch = 5
append(log, numRecords, epoch)
assertEquals(numRecords.toLong, log.endOffset.offset)
log.updateHighWatermark(new LogOffsetMetadata(numRecords))
assertEquals(numRecords.toLong, log.highWatermark.offset)
assertThrows(classOf[IllegalArgumentException], () => log.truncateTo(5L))
assertEquals(numRecords.toLong, log.highWatermark.offset)
}
private def buildFullBatch(
leaderEpoch: Int,
recordSize: Int,
maxBatchSizeInBytes: Int
): MemoryRecords = {
val buffer = ByteBuffer.allocate(maxBatchSizeInBytes)
val batchBuilder = new BatchBuilder[Array[Byte]](
buffer,
new ByteArraySerde,
CompressionType.NONE,
0L,
mockTime.milliseconds(),
false,
leaderEpoch,
maxBatchSizeInBytes
)
val serializationCache = new ObjectSerializationCache
val records = Collections.singletonList(new Array[Byte](recordSize))
while (!batchBuilder.bytesNeeded(records, serializationCache).isPresent) {
batchBuilder.appendRecord(records.get(0), serializationCache)
}
batchBuilder.build()
}
@Test
def testValidateEpochGreaterThanLastKnownEpoch(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val numberOfRecords = 1
val epoch = 1
append(log, numberOfRecords, epoch)
val resultOffsetAndEpoch = log.validateOffsetAndEpoch(numberOfRecords, epoch + 1)
assertEquals(ValidOffsetAndEpoch.Kind.DIVERGING, resultOffsetAndEpoch.kind)
assertEquals(new OffsetAndEpoch(log.endOffset.offset, epoch), resultOffsetAndEpoch.offsetAndEpoch())
}
@Test
def testValidateEpochLessThanOldestSnapshotEpoch(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val numberOfRecords = 10
val epoch = 1
append(log, numberOfRecords, epoch)
log.updateHighWatermark(new LogOffsetMetadata(numberOfRecords))
val snapshotId = new OffsetAndEpoch(numberOfRecords, epoch)
TestUtils.resource(log.createNewSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
val resultOffsetAndEpoch = log.validateOffsetAndEpoch(numberOfRecords, epoch - 1)
assertEquals(ValidOffsetAndEpoch.Kind.SNAPSHOT, resultOffsetAndEpoch.kind)
assertEquals(snapshotId, resultOffsetAndEpoch.offsetAndEpoch())
}
@Test
def testValidateOffsetLessThanOldestSnapshotOffset(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val offset = 2
val epoch = 1
append(log, offset, epoch)
log.updateHighWatermark(new LogOffsetMetadata(offset))
val snapshotId = new OffsetAndEpoch(offset, epoch)
TestUtils.resource(log.createNewSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
// Simulate log cleaning advancing the LSO
log.log.maybeIncrementLogStartOffset(offset, SegmentDeletion);
val resultOffsetAndEpoch = log.validateOffsetAndEpoch(offset - 1, epoch)
assertEquals(ValidOffsetAndEpoch.Kind.SNAPSHOT, resultOffsetAndEpoch.kind)
assertEquals(snapshotId, resultOffsetAndEpoch.offsetAndEpoch())
}
@Test
def testValidateOffsetEqualToOldestSnapshotOffset(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val offset = 2
val epoch = 1
append(log, offset, epoch)
log.updateHighWatermark(new LogOffsetMetadata(offset))
val snapshotId = new OffsetAndEpoch(offset, epoch)
TestUtils.resource(log.createNewSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
val resultOffsetAndEpoch = log.validateOffsetAndEpoch(offset, epoch)
assertEquals(ValidOffsetAndEpoch.Kind.VALID, resultOffsetAndEpoch.kind)
assertEquals(snapshotId, resultOffsetAndEpoch.offsetAndEpoch())
}
@Test
def testValidateUnknownEpochLessThanLastKnownGreaterThanOldestSnapshot(): Unit = {
val offset = 10
val numOfRecords = 5
val log = buildMetadataLog(tempDir, mockTime)
log.updateHighWatermark(new LogOffsetMetadata(offset))
val snapshotId = new OffsetAndEpoch(offset, 1)
TestUtils.resource(log.storeSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
log.truncateToLatestSnapshot()
append(log, numOfRecords, epoch = 1)
append(log, numOfRecords, epoch = 2)
append(log, numOfRecords, epoch = 4)
// offset is not equal to oldest snapshot's offset
val resultOffsetAndEpoch = log.validateOffsetAndEpoch(100, 3)
assertEquals(ValidOffsetAndEpoch.Kind.DIVERGING, resultOffsetAndEpoch.kind)
assertEquals(new OffsetAndEpoch(20, 2), resultOffsetAndEpoch.offsetAndEpoch())
}
@Test
def testValidateEpochLessThanFirstEpochInLog(): Unit = {
val offset = 10
val numOfRecords = 5
val log = buildMetadataLog(tempDir, mockTime)
log.updateHighWatermark(new LogOffsetMetadata(offset))
val snapshotId = new OffsetAndEpoch(offset, 1)
TestUtils.resource(log.storeSnapshot(snapshotId).get()) { snapshot =>
snapshot.freeze()
}
log.truncateToLatestSnapshot()
append(log, numOfRecords, epoch = 3)
// offset is not equal to oldest snapshot's offset
val resultOffsetAndEpoch = log.validateOffsetAndEpoch(100, 2)
assertEquals(ValidOffsetAndEpoch.Kind.DIVERGING, resultOffsetAndEpoch.kind)
assertEquals(snapshotId, resultOffsetAndEpoch.offsetAndEpoch())
}
@Test
def testValidateOffsetGreatThanEndOffset(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val numberOfRecords = 1
val epoch = 1
append(log, numberOfRecords, epoch)
val resultOffsetAndEpoch = log.validateOffsetAndEpoch(numberOfRecords + 1, epoch)
assertEquals(ValidOffsetAndEpoch.Kind.DIVERGING, resultOffsetAndEpoch.kind)
assertEquals(new OffsetAndEpoch(log.endOffset.offset, epoch), resultOffsetAndEpoch.offsetAndEpoch())
}
@Test
def testValidateOffsetLessThanLEO(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val numberOfRecords = 10
val epoch = 1
append(log, numberOfRecords, epoch)
append(log, numberOfRecords, epoch + 1)
val resultOffsetAndEpoch = log.validateOffsetAndEpoch(11, epoch)
assertEquals(ValidOffsetAndEpoch.Kind.DIVERGING, resultOffsetAndEpoch.kind)
assertEquals(new OffsetAndEpoch(10, epoch), resultOffsetAndEpoch.offsetAndEpoch())
}
@Test
def testValidateValidEpochAndOffset(): Unit = {
val log = buildMetadataLog(tempDir, mockTime)
val numberOfRecords = 5
val epoch = 1
append(log, numberOfRecords, epoch)
val resultOffsetAndEpoch = log.validateOffsetAndEpoch(numberOfRecords - 1, epoch)
assertEquals(ValidOffsetAndEpoch.Kind.VALID, resultOffsetAndEpoch.kind)
assertEquals(new OffsetAndEpoch(numberOfRecords - 1, epoch), resultOffsetAndEpoch.offsetAndEpoch())
}
@Test
def testAdvanceLogStartOffsetAfterCleaning(): Unit = {
val config = MetadataLogConfig(
logSegmentBytes = 512,
logSegmentMinBytes = 512,
logSegmentMillis = 10 * 1000,
retentionMaxBytes = 256,
retentionMillis = 60 * 1000,
maxBatchSizeInBytes = 512,
maxFetchSizeInBytes = DefaultMetadataLogConfig.maxFetchSizeInBytes,
fileDeleteDelayMs = Defaults.FileDeleteDelayMs,
nodeId = 1
)
config.copy()
val log = buildMetadataLog(tempDir, mockTime, config)
// Generate some segments
for(_ <- 0 to 100) {
append(log, 47, 1) // An odd number of records to avoid offset alignment
}
assertFalse(log.maybeClean(), "Should not clean since HW was still 0")
log.updateHighWatermark(new LogOffsetMetadata(4000))
assertFalse(log.maybeClean(), "Should not clean since no snapshots exist")
val snapshotId1 = new OffsetAndEpoch(1000, 1)
TestUtils.resource(log.storeSnapshot(snapshotId1).get()) { snapshot =>
append(snapshot, 100)
snapshot.freeze()
}
val snapshotId2 = new OffsetAndEpoch(2000, 1)
TestUtils.resource(log.storeSnapshot(snapshotId2).get()) { snapshot =>
append(snapshot, 100)
snapshot.freeze()
}
val lsoBefore = log.startOffset()
assertTrue(log.maybeClean(), "Expected to clean since there was at least one snapshot")
val lsoAfter = log.startOffset()
assertTrue(lsoAfter > lsoBefore, "Log Start Offset should have increased after cleaning")
assertTrue(lsoAfter == snapshotId2.offset, "Expected the Log Start Offset to be less than or equal to the snapshot offset")
}
@Test
def testDeleteSnapshots(): Unit = {
// Generate some logs and a few snapshots, set retention low and verify that cleaning occurs
val config = DefaultMetadataLogConfig.copy(
logSegmentBytes = 1024,
logSegmentMinBytes = 1024,
logSegmentMillis = 10 * 1000,
retentionMaxBytes = 1024,
retentionMillis = 60 * 1000,
maxBatchSizeInBytes = 100
)
val log = buildMetadataLog(tempDir, mockTime, config)
for(_ <- 0 to 1000) {
append(log, 1, 1)
}
log.updateHighWatermark(new LogOffsetMetadata(1001))
for(offset <- Seq(100, 200, 300, 400, 500, 600)) {
val snapshotId = new OffsetAndEpoch(offset, 1)
TestUtils.resource(log.storeSnapshot(snapshotId).get()) { snapshot =>
append(snapshot, 10)
snapshot.freeze()
}
}
assertEquals(6, log.snapshotCount())
assertTrue(log.maybeClean())
assertEquals(1, log.snapshotCount(), "Expected only one snapshot after cleaning")
assertOptional(log.latestSnapshotId(), (snapshotId: OffsetAndEpoch) => {
assertEquals(600, snapshotId.offset)
})
assertEquals(log.startOffset, 600)
}
@Test
def testSoftRetentionLimit(): Unit = {
// Set retention equal to the segment size and generate slightly more than one segment of logs
val config = DefaultMetadataLogConfig.copy(
logSegmentBytes = 10240,
logSegmentMinBytes = 10240,
logSegmentMillis = 10 * 1000,
retentionMaxBytes = 10240,
retentionMillis = 60 * 1000,
maxBatchSizeInBytes = 100
)
val log = buildMetadataLog(tempDir, mockTime, config)
for(_ <- 0 to 2000) {
append(log, 1, 1)
}
log.updateHighWatermark(new LogOffsetMetadata(2000))
// Then generate two snapshots
val snapshotId1 = new OffsetAndEpoch(1000, 1)
TestUtils.resource(log.storeSnapshot(snapshotId1).get()) { snapshot =>
append(snapshot, 500)
snapshot.freeze()
}
// Then generate a snapshot
val snapshotId2 = new OffsetAndEpoch(2000, 1)
TestUtils.resource(log.storeSnapshot(snapshotId2).get()) { snapshot =>
append(snapshot, 500)
snapshot.freeze()
}
// Cleaning should occur, but resulting size will not be under retention limit since we have to keep one snapshot
assertTrue(log.maybeClean())
assertEquals(1, log.snapshotCount(), "Expected one snapshot after cleaning")
assertOptional(log.latestSnapshotId(), (snapshotId: OffsetAndEpoch) => {
assertEquals(2000, snapshotId.offset, "Unexpected offset for latest snapshot")
assertOptional(log.readSnapshot(snapshotId), (reader: RawSnapshotReader) => {
assertTrue(reader.sizeInBytes() + log.log.size > config.retentionMaxBytes)
})
})
}
}
object KafkaMetadataLogTest {
class ByteArraySerde extends RecordSerde[Array[Byte]] {
override def recordSize(data: Array[Byte], serializationCache: ObjectSerializationCache): Int = {
data.length
}
override def write(data: Array[Byte], serializationCache: ObjectSerializationCache, out: Writable): Unit = {
out.writeByteArray(data)
}
override def read(input: protocol.Readable, size: Int): Array[Byte] = {
val array = new Array[Byte](size)
input.readArray(array)
array
}
}
val DefaultMetadataLogConfig = MetadataLogConfig(
logSegmentBytes = 100 * 1024,
logSegmentMinBytes = 100 * 1024,
logSegmentMillis = 10 * 1000,
retentionMaxBytes = 100 * 1024,
retentionMillis = 60 * 1000,
maxBatchSizeInBytes = KafkaRaftClient.MAX_BATCH_SIZE_BYTES,
maxFetchSizeInBytes = KafkaRaftClient.MAX_FETCH_SIZE_BYTES,
fileDeleteDelayMs = Defaults.FileDeleteDelayMs,
nodeId = 1
)
def buildMetadataLogAndDir(
tempDir: File,
time: MockTime,
metadataLogConfig: MetadataLogConfig = DefaultMetadataLogConfig
): (Path, KafkaMetadataLog, MetadataLogConfig) = {
val logDir = createLogDirectory(
tempDir,
UnifiedLog.logDirName(KafkaRaftServer.MetadataPartition)
)
val metadataLog = KafkaMetadataLog(
KafkaRaftServer.MetadataPartition,
KafkaRaftServer.MetadataTopicId,
logDir,
time,
time.scheduler,
metadataLogConfig
)
(logDir.toPath, metadataLog, metadataLogConfig)
}
def buildMetadataLog(
tempDir: File,
time: MockTime,
metadataLogConfig: MetadataLogConfig = DefaultMetadataLogConfig,
): KafkaMetadataLog = {
val (_, log, _) = buildMetadataLogAndDir(tempDir, time, metadataLogConfig)
log
}
def append(log: ReplicatedLog, numberOfRecords: Int, epoch: Int): LogAppendInfo = {
log.appendAsLeader(
MemoryRecords.withRecords(
log.endOffset().offset,
CompressionType.NONE,
epoch,
(0 until numberOfRecords).map(number => new SimpleRecord(number.toString.getBytes)): _*
),
epoch
)
}
def append(snapshotWriter: RawSnapshotWriter, numberOfRecords: Int): Unit = {
snapshotWriter.append(MemoryRecords.withRecords(
0,
CompressionType.NONE,
0,
(0 until numberOfRecords).map(number => new SimpleRecord(number.toString.getBytes)): _*
))
}
private def createLogDirectory(logDir: File, logDirName: String): File = {
val logDirPath = logDir.getAbsolutePath
val dir = new File(logDirPath, logDirName)
if (!Files.exists(dir.toPath)) {
Files.createDirectories(dir.toPath)
}
dir
}
}
| guozhangwang/kafka | core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala | Scala | apache-2.0 | 32,336 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms.core
import cats.instances.either._
import cats.instances.list._
import cats.syntax.either._
import cats.syntax.traverse._
import play.api.libs.json._
import uk.gov.hmrc.bforms.exceptions.InvalidState
import uk.gov.hmrc.bforms.models._
object FormValidator {
def conform(json: JsValue /* , schema: JsonSchema */ ): Opt[List[FormField]] = {
/* for {
* res <- TemplateValidator.conform(schema, json).toEither
* } yield res */
val res = (json \\ "fields").validate[List[FormField]]
res match {
case JsSuccess(success, _) => Right(success)
case JsError(error) => Left(InvalidState(s"""|Error when reading 'FormField' class:
|Error: $error
|Input json: """.stripMargin + Json.prettyPrint(json)))
}
}
def validate(formFields: List[FormField], section: Section): Opt[Unit] = {
val ffSet = formFields.filterNot(_.value.isEmpty()).map(_.id).toSet
val (templateFieldsMap, requiredFields) =
section.fields.foldLeft((Map.empty[FieldId, FieldValue], Set.empty[FieldId])) {
case ((acc, reqAcc), fieldValue) =>
fieldValue.`type` match {
case Address =>
val res: Map[FieldId, FieldValue] = Address.fields(fieldValue.id).map(_ -> fieldValue).toMap
val accRes = acc ++ res
(accRes, reqAcc)
case Date(_, _, _) =>
val res: Map[FieldId, FieldValue] = Date.fields(fieldValue.id).map(_ -> fieldValue).toMap
val accRes = acc ++ res
(accRes, reqAcc)
case Text(_) | Choice(_, _, _, _) =>
val id = fieldValue.id
val accRes = acc + (id -> fieldValue)
val reqAccRes =
fieldValue.mandatory match {
case true => reqAcc + id
case false => reqAcc
}
(accRes, reqAccRes)
}
}
val missingRequiredFields = (requiredFields diff ffSet)
val requirementCheck: Opt[Unit] = if (missingRequiredFields.isEmpty) {
Right(())
} else {
Left(InvalidState(s"Required fields ${missingRequiredFields.mkString(",")} are missing in form submission."))
}
val formFieldWithFieldValues: List[Opt[(FormField, FieldValue)]] =
formFields.map { formField =>
templateFieldsMap.get(formField.id) match {
case Some(templateField) => Right((formField, templateField))
case None => Left(InvalidState(s"Field ${formField.id} is not part of the template"))
}
}
val formFieldWithFieldValuesU: Opt[List[(FormField, FieldValue)]] = formFieldWithFieldValues.sequenceU
// TODO - All necessary validation of form fields based on their format
for {
_ <- requirementCheck
_ <- formFieldWithFieldValuesU
} yield ()
}
}
| VlachJosef/bforms | app/uk/gov/hmrc/bforms/core/FormValidator.scala | Scala | apache-2.0 | 3,539 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.status
import com.beust.jcommander.{Parameter, ParameterException, Parameters}
import org.geotools.data.DataStore
import org.locationtech.geomesa.tools.utils.KeywordParamSplitter
import org.locationtech.geomesa.tools.{Command, DataStoreCommand, RequiredTypeNameParam}
trait KeywordsCommand[DS <: DataStore] extends DataStoreCommand[DS] {
override val name: String = "keywords"
override def params: KeywordsParams
override def execute(): Unit = withDataStore(modifyKeywords)
protected def modifyKeywords(ds: DS): Unit = {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType._
import scala.collection.JavaConversions._
val sft = ds.getSchema(params.featureName)
if (sft == null) {
throw new ParameterException(s"Feature '${params.featureName}' not found")
}
if (params.removeAll) {
val confirm = System.console().readLine("Remove all keywords? (y/n): ").toLowerCase()
if (confirm.equals("y") || confirm.equals("yes")) {
sft.removeAllKeywords()
} else {
Command.user.info("Aborting operation")
return
}
} else if (params.keywordsToRemove != null) {
sft.removeKeywords(params.keywordsToRemove.toSet)
}
if (params.keywordsToAdd != null) {
sft.addKeywords(params.keywordsToAdd.toSet)
}
ds.updateSchema(params.featureName, sft)
if (params.list) {
Command.output.info("Keywords: " + ds.getSchema(sft.getTypeName).getKeywords.mkString(", "))
}
}
}
@Parameters(commandDescription = "Add/Remove/List keywords on an existing schema")
trait KeywordsParams extends RequiredTypeNameParam {
@Parameter(names = Array("-a", "--add"), description = "A keyword to add. Can be specified multiple times", splitter = classOf[KeywordParamSplitter])
var keywordsToAdd: java.util.List[String] = _
@Parameter(names = Array("-r", "--remove"), description = "A keyword to remove. Can be specified multiple times", splitter = classOf[KeywordParamSplitter])
var keywordsToRemove: java.util.List[String] = _
@Parameter(names = Array("-l", "--list"), description = "List all keywords on the schema")
var list: Boolean = false
@Parameter(names = Array("--removeAll"), description = "Remove all keywords on the schema")
var removeAll: Boolean = false
}
| jahhulbert-ccri/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/status/KeywordsCommand.scala | Scala | apache-2.0 | 2,805 |
/*
* NegatumDelaunay.scala
* (Anemone-Actiniaria)
*
* Copyright (c) 2014-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.synth
package ugen
import de.sciss.negatum.Delaunay.TriangleIndex
import de.sciss.negatum.Speakers
import de.sciss.negatum.Speakers.{altitudeProjectionsN, selectN, tri}
import de.sciss.synth.UGenSource.Vec
/** A graph element that produces an amplitude signal from a spatial
* position (x, y).
*
* @param x horizontal position (in the `Speakers.select` space),
* normalized between 0 and 1
* @param y vertical position (in the `Speakers.select` space),
* normalized between 0 and 1
*/
final case class NegatumDelaunay(x: GE, y: GE) extends GE.Lazy {
def rate: MaybeRate = MaybeRate.max_?(x.rate, y.rate)
private def insideGE(px: GE, py: GE): Vec[GE] = {
import de.sciss.synth.Import._
val sq = tri.map { case TriangleIndex(i1, i2, i3) =>
val v1 = selectN(i1)
val v2 = selectN(i2)
val v3 = selectN(i3)
// cf. https://en.wikipedia.org/wiki/Barycentric_coordinate_system
val dx3 = px - v3.x
val dy3 = py - v3.y
// det of 2x2 matrix: r1c1 * r2c2 - r1c2 * r2c1
// where r1c1 = x1 - x3, r2c2 = y2 - y3,
// r1c2 = x2 - x3, r2c1 = y1 - y3
val detT = (v2.y - v3.y) * (v1.x - v3.x) + (v3.x - v2.x) * (v1.y - v3.y)
val alpha = ((v2.y - v3.y) * dx3 + (v3.x - v2.x) * dy3) / detT
val beta = ((v3.y - v1.y) * dx3 + (v1.x - v3.x) * dy3) / detT
val gamma = 1.0f - alpha - beta
alpha >= 0 & beta >= 0 & gamma >= 0
}
sq
}
private def ampGE(px: GE, py: GE): GE = {
val amps = Array.fill[GE](selectN.size)(Constant.C0)
val ins = insideGE(px, py)
import de.sciss.synth.Import._
tri.zipWithIndex.foreach { case (TriangleIndex(i1, i2, i3), triIdx) =>
val v1 = selectN(i1)
val v2 = selectN(i2)
val v3 = selectN(i3)
val (alt1, alt2, alt3) = altitudeProjectionsN(triIdx)
val a1x = alt1.x
val a1y = alt1.y
val a2x = alt2.x
val a2y = alt2.y
val a3x = alt3.x
val a3y = alt3.y
val loc1 = Speakers.projectPointLineLoc(a1x, a1y, v1.x, v1.y, px, py)
val loc2 = Speakers.projectPointLineLoc(a2x, a2y, v2.x, v2.y, px, py)
val loc3 = Speakers.projectPointLineLoc(a3x, a3y, v3.x, v3.y, px, py)
val amp1 = loc1.sqrt
val amp2 = loc2.sqrt
val amp3 = loc3.sqrt
val in = ins(triIdx)
amps(i1) += amp1 * in
amps(i2) += amp2 * in
amps(i3) += amp3 * in
}
amps.toIndexedSeq
}
protected def makeUGens: UGenInLike = ampGE(px = x, py = y)
}
| Sciss/AnemoneActiniaria | src/main/scala/de/sciss/synth/ugen/NegatumDelaunay.scala | Scala | gpl-3.0 | 2,853 |
/**
* Copyright (C) 2015 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.solr
import java.net.URL
import com.yourmediashelf.fedora.client.FedoraCredentials
case class Settings(batchSize: Int = 100,
timeout: Int = 1000,
testMode: Boolean = true,
output: Boolean = false,
datasets: List[String] = List(),
solr: SolrProvider,
fedora: FedoraProvider)
object Settings {
/** Backward compatible for EasyIngestFlow */
def apply(fedoraCredentials: FedoraCredentials,
dataset: String,
solr: URL,
userAgent: String,
): Settings = new Settings(
testMode = false,
datasets = List(dataset),
solr = SolrProviderImpl(solr, userAgent),
fedora = FedoraProviderImpl(fedoraCredentials))
}
| DANS-KNAW/easy-update-solr-index | lib/src/main/scala/nl.knaw.dans.easy.solr/Settings.scala | Scala | apache-2.0 | 1,463 |
package cn.edu.sjtu.omnilab.livewee.kc
import com.redis.RedisClient
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import scala.collection.mutable.HashMap
/**
* Summarize user distribution in WIFI networks.
*
* @param redis the redis database connection
*/
class Heatmap(redis: RedisClient) {
final val UPDATE_FRQ = 2
final val WINDOW_LEN = 60
final val MAX_TIME_SHIFT = 10 * 60 // ten minutes
var lastUpdate = System.currentTimeMillis() / 1000
case class LocationInfo(location: String, lat: String, lon: String)
case class UserRecord(systime: Long, mac: String)
var hmap: HashMap[LocationInfo, Set[UserRecord]] = new HashMap[LocationInfo, Set[UserRecord]]
/**
* Add a new movement data point to heatmap statistics
*/
def update(r: ManPoint): Unit = {
val systime = System.currentTimeMillis() / 1000
if (r != null && systime - r.time <= MAX_TIME_SHIFT) {
// add user record
val key = LocationInfo(r.location, r.lat, r.lon)
if ( !hmap.contains(key) )
hmap.put(key, Set[UserRecord]())
hmap(key) += UserRecord(systime, r.mac)
}
// update to redis
if (systime - lastUpdate >= UPDATE_FRQ) {
hmap.map { case (loc, users) => {
users.filter(u => systime - u.systime < WINDOW_LEN)
}}
syncToRedis(systime)
lastUpdate = systime
}
}
/**
* Write a heatmap snapshot into redis
*/
def syncToRedis(time: Long): Unit = {
// TODO: replace location with lat,lat
val stat = hmap.mapValues(_.size)
val statJSON = stat.map{ case (loc, count) => {
("location" -> loc.location) ~
("count" -> count) ~
("lat" -> loc.lat) ~
("lon" -> loc.lon)
}}
val snapshot = ("time" -> time) ~ ("heatmap" -> statJSON)
val jsonstr = compact(render(snapshot))
redis.lpush(RedisUtils.heatmapStatusHistory, jsonstr)
}
}
| OMNILab/LiveWee | kcwee/src/main/scala/cn/edu/sjtu/omnilab/livewee/kc/Heatmap.scala | Scala | mit | 1,896 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600e.v2.retriever.CT600EBoxRetriever
case class E21(value: Option[Int]) extends CtBoxIdentifier("UK Investments (excluding controlled companies)") with CtOptionalInteger with Input with ValidatableBox[CT600EBoxRetriever]{
override def validate(boxRetriever: CT600EBoxRetriever): Set[CtValidation] = validateZeroOrPositiveInteger(this)
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E21.scala | Scala | apache-2.0 | 1,031 |
package org.bfn.ninetynineprobs
object P12 {
private def timesAcc[T](count : Int, el : T, acc : List[T]) : List[T] =
if (count <= 0)
acc
else
timesAcc(count - 1, el, el :: acc)
private def times[T](count : Int, el : T) : List[T] = timesAcc(count, el, Nil)
private def decodeAcc[T](ls : P10.t[T], acc : List[T]) : List[T] = ls match {
case Nil => acc
case (c, e) :: tail => decodeAcc(tail, times(c, e) ::: acc)
}
/**
* Given a run-length code list generated as specified in problem P10,
* construct its uncompressed version.
**/
def decode[T](ls : P10.t[T]) : List[T] = P05.reverse(decodeAcc(ls, Nil))
}
| bfontaine/99Scala | src/main/scala/P12.scala | Scala | mit | 658 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.hadoop
import com.twitter.scalding._
import com.twitter.zipkin.gen.{SpanServiceName, Annotation}
import com.twitter.zipkin.hadoop.sources.{TimeGranularity, PreprocessedSpanSource}
import com.twitter.scalding.Tsv
import sources.PreprocessedSpanSource
import scala.Some
/**
* Finds traces with duplicate trace IDs
*/
class FindDuplicateTraces(args: Args) extends Job(args) with UtcDateRangeJob {
val maxDuration = augmentString(args.required("maximum_duration")).toInt
val result = PreprocessedSpanSource(TimeGranularity.Hour)
.read
.mapTo(0 ->('trace_id, 'annotations)) { s: SpanServiceName =>
(s.trace_id, s.annotations.toList)
}.flatMap('annotations -> 'first_and_last_timestamps ) {al : List[Annotation] =>
var first : Long = if (al.length > 0) al(0).timestamp else Int.MaxValue
var last : Long = if (al.length > 0) al(0).timestamp else -1
al.foreach { a : Annotation =>
val timestamp = a.timestamp
if (timestamp < first) first = timestamp
else if (timestamp > last) last = timestamp
}
if (first < Int.MaxValue && last > -1) Some(List(first, last)) else None
}.groupBy('trace_id){ _.reduce('first_and_last_timestamps -> 'first_and_last_timestamps) { (left : List[Long], right : List[Long]) =>
val first = if (left(0) > right(0)) right(0) else left(0)
val last = if (left(1) > right(1)) left(1) else right(1)
List(first, last)
}
}
.filter('first_and_last_timestamps) { timestamps : List[Long] =>
val durationInSeconds = (timestamps(1) - timestamps(0)) / 1000000
durationInSeconds >= maxDuration
}.project('trace_id)
.write(Tsv(args("output")))
}
| dduvnjak/zipkin | zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/FindDuplicateTraces.scala | Scala | apache-2.0 | 2,316 |
package scalaxy.streams
import scala.collection.generic.CanBuildFrom
private[streams] trait ArrayOpsSinks extends ArrayBuilderSinks {
val global: scala.reflect.api.Universe
import global._
case object ArrayOpsSink extends StreamSink
{
override def isFinalOnly = true
override def isJustAWrapper = true
override def describe = Some("ArrayOps")
override def lambdaCount = 0
override def subTrees = Nil
private[this] val arrayOpsClass = "scala.collection.mutable.ArrayOps"
private[this] lazy val anyValOpsClassNameByType: Map[Type, String] = Map(
typeOf[Boolean] -> (arrayOpsClass + ".ofBoolean"),
typeOf[Byte] -> (arrayOpsClass + ".ofByte"),
typeOf[Char] -> (arrayOpsClass + ".ofChar"),
typeOf[Double] -> (arrayOpsClass + ".ofDouble"),
typeOf[Float] -> (arrayOpsClass + ".ofFloat"),
typeOf[Int] -> (arrayOpsClass + ".ofInt"),
typeOf[Long] -> (arrayOpsClass + ".ofLong"),
typeOf[Short] -> (arrayOpsClass + ".ofShort"),
typeOf[Unit] -> (arrayOpsClass + ".ofUnit")
)
private def replaceLast[A](list: List[A], f: A => A): List[A] = {
val last :: reversedRest = list.reverse
(f(last) :: reversedRest).reverse
}
override def emit(input: StreamInput, outputNeeds: OutputNeeds, nextOps: OpsAndOutputNeeds): StreamOutput =
{
import input._
val arrayOutput = ArrayBuilderSink.emit(input, outputNeeds, nextOps)
val componentTpe = input.vars.tpe.dealias
def getResult(array: Tree) = typed(
anyValOpsClassNameByType.get(componentTpe) match {
case Some(primitiveOpsClass) =>
q"new ${rootMirror.staticClass(primitiveOpsClass)}($array)"
case _ if componentTpe <:< typeOf[AnyRef] =>
q"new scala.collection.mutable.ArrayOps.ofRef[$componentTpe]($array)"
case _ =>
q"genericArrayOps[$componentTpe]($array)"
}
)
arrayOutput.copy(ending = replaceLast[Tree](arrayOutput.ending, getResult(_)))
}
}
}
| nativelibs4java/scalaxy-streams | src/main/scala/streams/sinks/ArrayOpsSinks.scala | Scala | bsd-3-clause | 2,030 |
package org.deepdive.extraction.datastore
import scalikejdbc._
import org.deepdive.datastore.HSQLDataStore
import org.deepdive.Logging
import play.api.libs.json._
trait HSQLExtractionDataStoreComponent extends ExtractionDataStoreComponent {
val dataStore = new HSQLExtractionDataStore
}
class HSQLExtractionDataStore extends JdbcExtractionDataStore with Logging {
def ds = HSQLDataStore
def init() = {
log.debug("Initializing HSQL data store...")
}
def addBatch(result: Iterator[JsObject], outputRelation: String) : Unit = {
ds.DB.localTx { implicit session =>
HSQLDataStore.bulkInsertJSON(outputRelation, result)
}
}
} | dennybritz/deepdive | src/main/scala/org/deepdive/extraction/datastore/HSQLExtractionDataStore.scala | Scala | apache-2.0 | 658 |
package com.atomist.source.git
import com.atomist.source.{ArtifactSource, ArtifactSourceException}
trait GitHubSourceReader {
/**
* Return ArtifactSource for the given repository and branch. The returned contents will
* indicate the sha, which can be saved, but calling with the same [[com.atomist.source.ArtifactSourceLocator]]
* will produce the same results.
*
* @param id the GitHubArtifactSourceLocator
* @return an ArtifactSource
* @throws ArtifactSourceException if the ArtifactSource cannot be returned
*/
@throws[ArtifactSourceException]
def sourceFor(id: GitHubArtifactSourceLocator): ArtifactSource
/**
* Return the tree for the given sha. Always produces the same results.
* Because it also implements [[GitHubShaIdentifier]], a [[GitHubArtifactSourceIdentifier]]
* can be passed to this method, always returning the same result.
*
* @param id the GitHubArtifactSourceLocator
* @return an ArtifactSource
* @throws ArtifactSourceException if the tree for given sha cannot be returned
*/
@throws[ArtifactSourceException]
def treeFor(id: GitHubShaIdentifier): ArtifactSource
}
| atomist/artifact-source | src/main/scala/com/atomist/source/git/GitHubSourceReader.scala | Scala | gpl-3.0 | 1,166 |
package com.delprks.alphasorter
import com.delprks.alphasorter.AlphaSorter._
import org.specs2.mutable.Specification
class AlphaSorter$Test extends Specification {
"AlphaSorter atoz sort" should {
"sort based on alphabetical order" in {
val listOfStrings: List[String] = List("B string", "A string")
listOfStrings sortBy atoz shouldEqual List("A string", "B string")
}
"ignore character casing" in {
val listOfStrings: List[String] = List("B string", "A string", "a string")
listOfStrings sortBy atoz shouldEqual List("A string", "a string", "B string")
}
"ignore quotations when sorting" in {
val listOfStrings: List[String] = List("'B string'", "A string", "\\"D string\\"", "`E string`", "“F string”", "'C string'")
listOfStrings sortBy atoz shouldEqual List("A string", "'B string'", "'C string'", "\\"D string\\"", "`E string`", "“F string”")
}
"ignore 'the' from beginning of string" in {
val listOfStrings: List[String] = List("the B string", "The A string", "C string")
listOfStrings sortBy atoz shouldEqual List("The A string", "the B string", "C string")
}
"ignore 'the' from beginning of string when it is in quotation marks" in {
val listOfStrings: List[String] = List("1 string", "'the B string'", "\\"The A string\\"", "C string")
listOfStrings sortBy atoz shouldEqual List("1 string", "\\"The A string\\"", "'the B string'", "C string")
}
"sort the strings that start with numbers naturally" in {
val listOfStrings: List[String] = List("1 string", "10 string", "2 string", "11 string")
listOfStrings sortBy atoz shouldEqual List("1 string", "2 string", "10 string", "11 string")
}
"sort the strings that start with numbers naturally, even if they start with 'the' or are wrapped in quotations" in {
val listOfStrings: List[String] = List("1 string", "the 10 string", "'2 string'", "the 9 string", "'the 0 string'")
listOfStrings sortBy atoz shouldEqual List("'the 0 string'", "1 string", "'2 string'", "the 9 string", "the 10 string")
}
"sort the accented characters in-between English words - if an equivalent English character has been specified" in {
val listOfStrings: List[String] = List("D string", "F string", "È string")
listOfStrings sortBy atoz shouldEqual List("D string", "È string", "F string")
}
}
}
| delprks/alphasorter | src/test/scala/com/delprks/alphasorter/AlphaSorter$Test.scala | Scala | mit | 2,414 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airspec.runner
import sbt.testing._
import wvlet.airframe.{Design, Session}
import wvlet.airspec.runner.AirSpecSbtRunner.AirSpecConfig
import wvlet.airspec.spi.{AirSpecContext, AirSpecException}
import wvlet.log.LogSupport
import scala.util.{Failure, Success, Try}
/**
* AirSpecTaskRunner will execute a task.
*
* For each test spec (AirSpec instance), it will create a global airframe session, which can be configured with
* configure(Design).
*
* For each test method in the AirSpec instance, it will create a child session so that users can manage test-method
* local instances, which will be discarded after the completion of the test method.
*/
private[airspec] class AirSpecTaskRunner(
taskDef: TaskDef,
config: AirSpecConfig,
taskLogger: AirSpecLogger,
eventHandler: EventHandler,
classLoader: ClassLoader
) extends LogSupport {
import wvlet.airspec._
def runTask: Unit = {
val testClassName = taskDef.fullyQualifiedName()
val leafName = AirSpecSpi.leafClassName(AirSpecSpi.decodeClassName(testClassName))
val startTimeNanos = System.nanoTime()
try {
// Start a background log level scanner thread. If a thread is already running, reuse it.
compat.withLogScanner {
trace(s"Processing task: ${taskDef}")
// Getting an instance of AirSpec
val testObj = taskDef.fingerprint() match {
// In Scala.js we cannot use pattern match for objects like AirSpecObjectFingerPrint, so using isModule here.
case c: SubclassFingerprint if c.isModule() =>
compat.findCompanionObjectOf(testClassName, classLoader)
case _ =>
compat.newInstanceOf(testClassName, classLoader)
}
testObj match {
case Some(spec: AirSpecSpi) =>
run(parentContext = None, spec, spec.testDefinitions)
case _ =>
taskLogger.logSpecName(leafName, indentLevel = 0)
throw new IllegalStateException(
s"${testClassName} needs to be a class or object extending AirSpec: ${testObj.getClass}"
)
}
}
} catch {
case e: Throwable =>
taskLogger.logSpecName(leafName, indentLevel = 0)
val cause = compat.findCause(e)
val status = AirSpecException.classifyException(cause)
// Unknown error
val event =
AirSpecEvent(taskDef, "<spec>", status, new OptionalThrowable(cause), System.nanoTime() - startTimeNanos)
taskLogger.logEvent(event)
eventHandler.handle(event)
}
}
private[airspec] def run(parentContext: Option[AirSpecContext], spec: AirSpecSpi, testDefs: Seq[AirSpecDef]): Unit = {
if (testDefs.isEmpty) {
val name = specName(parentContext, spec)
warn(s"No test definition is found in ${name}. Add at least one test(...) method call.")
}
val selectedMethods =
config.pattern match {
case Some(regex) =>
// Find matching methods
testDefs.filter { m =>
// Concatenate (parent class name)? + class name + method name for handy search
val fullName = s"${specName(parentContext, spec)}.${m.name}"
regex.findFirstIn(fullName).isDefined
}
case None =>
testDefs
}
if (selectedMethods.nonEmpty) {
runSpec(parentContext, spec, selectedMethods)
}
}
private def specName(parentContext: Option[AirSpecContext], spec: AirSpecSpi): String = {
val parentName = parentContext.map(x => s"${x.specName}.").getOrElse("")
s"${parentName}${spec.leafSpecName}"
}
private def runSpec(
parentContext: Option[AirSpecContext],
spec: AirSpecSpi,
targetTestDefs: Seq[AirSpecDef]
): Unit = {
val indentLevel = parentContext.map(_.indentLevel + 1).getOrElse(0)
taskLogger.logSpecName(spec.leafSpecName, indentLevel = indentLevel)
try {
// Start the spec
spec.callBeforeAll
// Configure the global spec design
var d = Design.newDesign.noLifeCycleLogging
d = d + spec.callDesign
// Create a global Airframe session
val globalSession =
parentContext
.map(_.currentSession.newChildSession(d))
.getOrElse { d.newSessionBuilder.noShutdownHook.build } // Do not register JVM shutdown hooks
val localDesign = spec.callLocalDesign
globalSession.start {
for (m <- targetTestDefs) {
runSingle(parentContext, globalSession, spec, m, isLocal = false, design = localDesign)
}
}
} finally {
spec.callAfterAll
}
}
private var displayedContext = Set.empty[String]
private[airspec] def runSingle(
parentContext: Option[AirSpecContext],
globalSession: Session,
spec: AirSpecSpi,
m: AirSpecDef,
isLocal: Boolean,
design: Design
): Unit = {
val ctxName = parentContext.map(ctx => s"${ctx.fullSpecName}.${ctx.testName}").getOrElse("N/A")
val indentLevel = parentContext.map(_.indentLevel + 1).getOrElse(0)
// Show the inner test name
if (isLocal) {
parentContext.map { ctx =>
synchronized {
if (!displayedContext.contains(ctxName)) {
taskLogger.logTestName(ctx.testName, indentLevel = (indentLevel - 1).max(0))
displayedContext += ctxName
}
}
}
}
spec.callBefore
// Configure the test-local design
val childDesign = design + m.design
val startTimeNanos = System.nanoTime()
var hadChildTask = false
// Create a test-method local child session
val result = globalSession.withChildSession(childDesign) { childSession =>
val context =
new AirSpecContextImpl(
this,
parentContext = parentContext,
currentSpec = spec,
testName = m.name,
currentSession = childSession
)
spec.pushContext(context)
// Wrap the execution with Try[_] to report the test result to the event handler
Try {
try {
m.run(context, childSession)
} finally {
spec.callAfter
spec.popContext
// If the test method had any child task, update the flag
hadChildTask |= context.hasChildTask
}
}
}
// Report the test result
val durationNanos = System.nanoTime() - startTimeNanos
val (status, throwableOpt) = result match {
case Success(x) =>
(Status.Success, new OptionalThrowable())
case Failure(ex) =>
val status = AirSpecException.classifyException(ex)
(status, new OptionalThrowable(compat.findCause(ex)))
}
val e = AirSpecEvent(taskDef, m.name, status, throwableOpt, durationNanos)
taskLogger.logEvent(e, indentLevel = indentLevel, showTestName = !hadChildTask)
eventHandler.handle(e)
}
}
| wvlet/airframe | airspec/src/main/scala/wvlet/airspec/runner/AirSpecTaskRunner.scala | Scala | apache-2.0 | 7,421 |
package components
import helpers.TestSpec
import components.Dsl._
import io.handlers.IOHandler
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar.mock
import org.scalatest.MustMatchers
import scala.util.Random
class ModelSpec extends TestSpec {
describe("Model") {
it("should contain at least one constraint") {
val objective = max(2("a"))
val expectedError = the [IllegalArgumentException] thrownBy Model(Set.empty, Some(objective))
expectedError.getMessage should endWith("At least one constraint is required.")
}
it("should not contain terms in the objective that do not exist in the constraints") {
val expectedError = the [IllegalArgumentException] thrownBy {
max(1 ("a") + 2 ("b") - 3 ("c")) subjectTo(
2 ("a") + 1 ("b") <= 3,
1 ("a") - 2 ("b") == 4
)
}
expectedError.getMessage should endWith("Unused variable in objective function.")
}
it("should allow new constraints to be added") {
val model = max(1("a") + 2("b")) subjectTo (
1("a") + 3("b") <= 5,
3("a") - 1("b") == 0
)
val newModel = model + (1("a") - 5("b") >= 0)
val expectedModel = max(1("a") + 2("b")) subjectTo (
1("a") + 3("b") <= 5,
3("a") - 1("b") == 0,
1("a") - 5("b") >= 0
)
newModel shouldEqual expectedModel
}
it("should return all the variables used") {
val model = max(1("a") + 2("b")) subjectTo (
1("a") + 3("b") + 1("c") <= 5,
3("a") - 1("b") == 0,
1("c") <= 10
)
val expectedVariables: Set[Variable] = Set("a", "b", "c")
model.variables shouldEqual expectedVariables
}
it("should allow for the objective function to be modified") {
val model = max(1("a") + 2("b")) subjectTo (
1("a") + 3("b") <= 5,
3("a") - 1("b") == 0
)
val newObjective = min(1("a") - 2("b"))
val newModel = model withObjective newObjective
val expectedModel = min(1("a") - 2("b")) subjectTo (
1("a") + 3("b") <= 5,
3("a") - 1("b") == 0
)
newModel shouldEqual expectedModel
}
}
describe("can be built from it's components") {
it("case 1") {
val constraints = Seq(
2("a") - 3("b") == 3,
1("a") + 2("b") <= 3
)
val objective = max(1("a") + 1("b"))
val model = objective subjectTo(constraints: _*)
model shouldEqual Model(constraints.toSet, Some(objective))
}
}
}
| akwanashie/constraints-dsl | src/test/scala/components/ModelSpec.scala | Scala | mit | 2,540 |
package org.scalacoin.protocol.transaction
/**
* Created by chris on 12/26/15.
*/
trait TransactionOutPoint {
def txId : String
def vout : Int
}
case class TransactionOutPointImpl(txId : String, vout : Int) extends TransactionOutPoint | scalacoin/scalacoin | src/main/scala/org/scalacoin/protocol/transaction/TransactionOutPoint.scala | Scala | mit | 242 |
object SCL9445 {
/*start*/Bar.apply(123)/*end*/
}
//SCL9445.Bar | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL9445.scala | Scala | apache-2.0 | 65 |
/**
* Copyright © 2015, BoldRadius Solutions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.boldradius.akka_exchange.journal.util
import akka.actor.{Props, ActorSystem}
import akka.cluster.Cluster
import com.boldradius.akka_exchange.journal.{SharedJournalFinder, SharedJournal}
import com.boldradius.akka_exchange.util.ExchangeNodeBootable
import scala.collection.breakOut
abstract class JournaledExchangeNodeBootable extends ExchangeNodeBootable {
val findJournal: Boolean = true
val persistentJournal = {
println("Booting up Journal Finder...")
system.actorOf(
Props(
classOf[SharedJournalFinder]
),
SharedJournalFinder.name
)
}
}
| rahulkavale/akka-exchange | journal/src/main/scala/com/boldradius/akka_exchange/journal/util/JournaledExchangeNodeBootable.scala | Scala | apache-2.0 | 1,208 |
package com.pygmalios.reactiveinflux
import java.util.Date
import org.joda.time.{DateTime, DateTimeZone, Instant}
/**
* Epoch time with nanosecond precision.
*/
trait PointTime extends Serializable {
/**
* The number of seconds from the epoch of 1970-01-01T00:00:00Z.
*/
def seconds: Long
/**
* The number of nanoseconds, later along the time-line, from the seconds field.
* This is always positive, and never exceeds 999,999,999.
*/
def nanos: Int
}
object PointTime {
def ofEpochMilli(epochMilli: Long): PointTime = ofEpochSecond(epochMilli / 1000, (epochMilli % 1000).toInt * 1000000)
def ofEpochSecond(epochSecond: Long): PointTime = ofEpochMilli(epochSecond * 1000)
def ofEpochSecond(epochSecond: Long, nanoAdjustment: Int): PointTime =
SimplePointTime(epochSecond, nanoAdjustment)
implicit def apply(dateTime: DateTime): PointTime = ofEpochMilli(dateTime.withZone(DateTimeZone.UTC).getMillis)
implicit def apply(instant: Instant): PointTime = apply(instant.toDateTime)
implicit def apply(date: Date): PointTime = apply(new DateTime(date))
implicit def pointTimeToDateTime(pointTime: PointTime): DateTime =
new DateTime(pointTime.seconds*1000 + (pointTime.nanos/1000000), DateTimeZone.UTC)
}
private case class SimplePointTime(seconds: Long, nanos: Int) extends PointTime | pygmalios/reactiveinflux | src/main/scala/com/pygmalios/reactiveinflux/PointTime.scala | Scala | apache-2.0 | 1,337 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.notify
import Notification.{Errors, Result}
/** Accumulate errors to report them. An alternative to throwing an exception on the first mistake,
* and forcing the caller to resolve errors one-by-one.
*
* Inspired by [[http://martinfowler.com/articles/replaceThrowWithNotification.html Martin Fowler]].
*/
sealed abstract class Notification [+A] {
/** Get the result if one was produced.
* @return The result.
* @throws NoSuchElementException If there are errors.
*/
def get: A
/** Get the error messages.
* @return The error messages, which will be empty if there are no errors.
*/
def errors: Seq [Message]
/** Are there any errors?
* @return True if there are errors.
*/
def hasErrors: Boolean =
!errors.isEmpty
def map [B] (f: A => B): Notification [B] =
if (!hasErrors)
Result (f (get))
else
Errors (this.errors)
def flatMap [B] (f: A => Notification [B]): Notification [B] =
if (!hasErrors)
f (get)
else
Errors (this.errors)
def filter (p: A => Boolean): Notification [A] =
this
def withFilter (p: A => Boolean): Notification [A] =
this
}
object Notification {
/** Errors were found; we have messages. */
case class Errors (errors: Seq [Message]) extends Notification [Nothing] {
require (!errors.isEmpty, "Must have error messages.")
def get = throw new NoSuchElementException ("Errors.get")
}
/** No errors were found; we have a result. */
case class Result [A] (get: A) extends Notification [A] {
def errors = Seq.empty
}
/** Collects errors and yields `Errors` or `Result` accordingly. */
class Builder {
private var list = List.empty [Message]
/** Add a message.
* @param message The message to add.
*/
def add (message: Message): Unit =
list ::= message
/** Add messages.
* @param messages The messages to add.
*/
def add (messages: Seq [Message]): Unit =
list :::= messages.toList
/** Are there any errors?
* @return True if there are errors.
*/
def hasErrors: Boolean =
!list.isEmpty
/** Get the errors.
* @return `Errors`
* @throws IllegalArgumentException If there are no errors.
*/
def result [A]: Notification [A] =
Errors (list.reverse)
/** Get the appropriate notification.
* @param v The result value, if no errors were found.
* @return `Errors` if this builder accumulated any, otherwise `Result`.
*/
def result [A] (v: A): Notification [A] =
if (!hasErrors)
Result (v)
else
Errors (list.reverse)
}
def newBuilder: Builder = new Builder
/** Failure.
* @param messages The error messages.
* @return `Errors`
*/
def errors [A] (messages: Message*): Notification [A] =
Errors (messages)
/** Success!
* @param v The result value, if no errors were found.
* @return `Result`
*/
def result [A] (v: A): Notification [A] =
Result (v)
/** Success!
* @param close The method to close the resource that the value acquired.
* @return `Result`
*/
def unit: Notification [Unit] =
Result ((), Seq.empty)
/** Yield all values if all were successful, otherwise combine error messages of all failures. */
def latch [A, B] (a: Notification [A], b: Notification [B]): Notification [(A, B)] = {
var errors = a.errors ++ b.errors
if (errors.isEmpty)
Result ((a.get, b.get))
else
Errors (errors)
}
/** Yield all values if all were successful, otherwise combine error messages of all failures. */
def latch [A, B, C] (a: Notification [A], b: Notification [B], c: Notification [C]): Notification [(A, B, C)] = {
var errors = a.errors ++ b.errors ++ c.errors
if (errors.isEmpty)
Result ((a.get, b.get, c.get))
else
Errors (errors)
}
}
| Treode/store | core/src/com/treode/notify/Notification.scala | Scala | apache-2.0 | 4,503 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.io.File
import java.net.URI
import java.nio.file.FileSystems
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import scala.util.control.NonFatal
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, UnresolvedAttribute}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.CatalogTableType._
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.logical.Histogram
import org.apache.spark.sql.catalyst.util.{escapeSingleQuotedString, quoteIdentifier}
import org.apache.spark.sql.execution.datasources.{DataSource, PartitioningUtils}
import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.util.Utils
/**
* A command to create a table with the same definition of the given existing table.
* In the target table definition, the table comment is always empty but the column comments
* are identical to the ones defined in the source table.
*
* The CatalogTable attributes copied from the source table are storage(inputFormat, outputFormat,
* serde, compressed, properties), schema, provider, partitionColumnNames, bucketSpec.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE TABLE [IF NOT EXISTS] [db_name.]table_name
* LIKE [other_db_name.]existing_table_name [locationSpec]
* }}}
*/
case class CreateTableLikeCommand(
targetTable: TableIdentifier,
sourceTable: TableIdentifier,
location: Option[String],
ifNotExists: Boolean) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val sourceTableDesc = catalog.getTempViewOrPermanentTableMetadata(sourceTable)
val newProvider = if (sourceTableDesc.tableType == CatalogTableType.VIEW) {
Some(sparkSession.sessionState.conf.defaultDataSourceName)
} else {
sourceTableDesc.provider
}
// If the location is specified, we create an external table internally.
// Otherwise create a managed table.
val tblType = if (location.isEmpty) CatalogTableType.MANAGED else CatalogTableType.EXTERNAL
val newTableDesc =
CatalogTable(
identifier = targetTable,
tableType = tblType,
storage = sourceTableDesc.storage.copy(
locationUri = location.map(CatalogUtils.stringToURI(_))),
schema = sourceTableDesc.schema,
provider = newProvider,
partitionColumnNames = sourceTableDesc.partitionColumnNames,
bucketSpec = sourceTableDesc.bucketSpec)
catalog.createTable(newTableDesc, ifNotExists)
Seq.empty[Row]
}
}
// TODO: move the rest of the table commands from ddl.scala to this file
/**
* A command to create a table.
*
* Note: This is currently used only for creating Hive tables.
* This is not intended for temporary tables.
*
* The syntax of using this command in SQL is:
* {{{
* CREATE [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name
* [(col1 data_type [COMMENT col_comment], ...)]
* [COMMENT table_comment]
* [PARTITIONED BY (col3 data_type [COMMENT col_comment], ...)]
* [CLUSTERED BY (col1, ...) [SORTED BY (col1 [ASC|DESC], ...)] INTO num_buckets BUCKETS]
* [SKEWED BY (col1, col2, ...) ON ((col_value, col_value, ...), ...)
* [STORED AS DIRECTORIES]
* [ROW FORMAT row_format]
* [STORED AS file_format | STORED BY storage_handler_class [WITH SERDEPROPERTIES (...)]]
* [LOCATION path]
* [TBLPROPERTIES (property_name=property_value, ...)]
* [AS select_statement];
* }}}
*/
case class CreateTableCommand(
table: CatalogTable,
ignoreIfExists: Boolean) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
sparkSession.sessionState.catalog.createTable(table, ignoreIfExists)
Seq.empty[Row]
}
}
/**
* A command that renames a table/view.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table1 RENAME TO table2;
* ALTER VIEW view1 RENAME TO view2;
* }}}
*/
case class AlterTableRenameCommand(
oldName: TableIdentifier,
newName: TableIdentifier,
isView: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
// If this is a temp view, just rename the view.
// Otherwise, if this is a real table, we also need to uncache and invalidate the table.
if (catalog.isTemporaryTable(oldName)) {
catalog.renameTable(oldName, newName)
} else {
val table = catalog.getTableMetadata(oldName)
DDLUtils.verifyAlterTableType(catalog, table, isView)
// If an exception is thrown here we can just assume the table is uncached;
// this can happen with Hive tables when the underlying catalog is in-memory.
val wasCached = Try(sparkSession.catalog.isCached(oldName.unquotedString)).getOrElse(false)
if (wasCached) {
try {
sparkSession.catalog.uncacheTable(oldName.unquotedString)
} catch {
case NonFatal(e) => log.warn(e.toString, e)
}
}
// Invalidate the table last, otherwise uncaching the table would load the logical plan
// back into the hive metastore cache
catalog.refreshTable(oldName)
catalog.renameTable(oldName, newName)
if (wasCached) {
sparkSession.catalog.cacheTable(newName.unquotedString)
}
}
Seq.empty[Row]
}
}
/**
* A command that add columns to a table
* The syntax of using this command in SQL is:
* {{{
* ALTER TABLE table_identifier
* ADD COLUMNS (col_name data_type [COMMENT col_comment], ...);
* }}}
*/
case class AlterTableAddColumnsCommand(
table: TableIdentifier,
colsToAdd: Seq[StructField]) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val catalogTable = verifyAlterTableAddColumn(sparkSession.sessionState.conf, catalog, table)
try {
sparkSession.catalog.uncacheTable(table.quotedString)
} catch {
case NonFatal(e) =>
log.warn(s"Exception when attempting to uncache table ${table.quotedString}", e)
}
catalog.refreshTable(table)
SchemaUtils.checkColumnNameDuplication(
(colsToAdd ++ catalogTable.schema).map(_.name),
"in the table definition of " + table.identifier,
conf.caseSensitiveAnalysis)
DDLUtils.checkDataColNames(catalogTable, colsToAdd.map(_.name))
catalog.alterTableDataSchema(table, StructType(catalogTable.dataSchema ++ colsToAdd))
Seq.empty[Row]
}
/**
* ALTER TABLE ADD COLUMNS command does not support temporary view/table,
* view, or datasource table with text, orc formats or external provider.
* For datasource table, it currently only supports parquet, json, csv.
*/
private def verifyAlterTableAddColumn(
conf: SQLConf,
catalog: SessionCatalog,
table: TableIdentifier): CatalogTable = {
val catalogTable = catalog.getTempViewOrPermanentTableMetadata(table)
if (catalogTable.tableType == CatalogTableType.VIEW) {
throw new AnalysisException(
s"""
|ALTER ADD COLUMNS does not support views.
|You must drop and re-create the views for adding the new columns. Views: $table
""".stripMargin)
}
if (DDLUtils.isDatasourceTable(catalogTable)) {
DataSource.lookupDataSource(catalogTable.provider.get, conf).newInstance() match {
// For datasource table, this command can only support the following File format.
// TextFileFormat only default to one column "value"
// Hive type is already considered as hive serde table, so the logic will not
// come in here.
case _: JsonFileFormat | _: CSVFileFormat | _: ParquetFileFormat =>
case s if s.getClass.getCanonicalName.endsWith("OrcFileFormat") =>
case s =>
throw new AnalysisException(
s"""
|ALTER ADD COLUMNS does not support datasource table with type $s.
|You must drop and re-create the table for adding the new columns. Tables: $table
""".stripMargin)
}
}
catalogTable
}
}
/**
* A command that loads data into a Hive table.
*
* The syntax of this command is:
* {{{
* LOAD DATA [LOCAL] INPATH 'filepath' [OVERWRITE] INTO TABLE tablename
* [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
case class LoadDataCommand(
table: TableIdentifier,
path: String,
isLocal: Boolean,
isOverwrite: Boolean,
partition: Option[TablePartitionSpec]) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val targetTable = catalog.getTableMetadata(table)
val tableIdentwithDB = targetTable.identifier.quotedString
if (targetTable.tableType == CatalogTableType.VIEW) {
throw new AnalysisException(s"Target table in LOAD DATA cannot be a view: $tableIdentwithDB")
}
if (DDLUtils.isDatasourceTable(targetTable)) {
throw new AnalysisException(
s"LOAD DATA is not supported for datasource tables: $tableIdentwithDB")
}
if (targetTable.partitionColumnNames.nonEmpty) {
if (partition.isEmpty) {
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is partitioned, " +
s"but no partition spec is provided")
}
if (targetTable.partitionColumnNames.size != partition.get.size) {
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is partitioned, " +
s"but number of columns in provided partition spec (${partition.get.size}) " +
s"do not match number of partitioned columns in table " +
s"(${targetTable.partitionColumnNames.size})")
}
partition.get.keys.foreach { colName =>
if (!targetTable.partitionColumnNames.contains(colName)) {
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is partitioned, " +
s"but the specified partition spec refers to a column that is not partitioned: " +
s"'$colName'")
}
}
} else {
if (partition.nonEmpty) {
throw new AnalysisException(s"LOAD DATA target table $tableIdentwithDB is not " +
s"partitioned, but a partition spec was provided.")
}
}
val loadPath =
if (isLocal) {
val uri = Utils.resolveURI(path)
val file = new File(uri.getPath)
val exists = if (file.getAbsolutePath.contains("*")) {
val fileSystem = FileSystems.getDefault
val dir = file.getParentFile.getAbsolutePath
if (dir.contains("*")) {
throw new AnalysisException(
s"LOAD DATA input path allows only filename wildcard: $path")
}
// Note that special characters such as "*" on Windows are not allowed as a path.
// Calling `WindowsFileSystem.getPath` throws an exception if there are in the path.
val dirPath = fileSystem.getPath(dir)
val pathPattern = new File(dirPath.toAbsolutePath.toString, file.getName).toURI.getPath
val safePathPattern = if (Utils.isWindows) {
// On Windows, the pattern should not start with slashes for absolute file paths.
pathPattern.stripPrefix("/")
} else {
pathPattern
}
val files = new File(dir).listFiles()
if (files == null) {
false
} else {
val matcher = fileSystem.getPathMatcher("glob:" + safePathPattern)
files.exists(f => matcher.matches(fileSystem.getPath(f.getAbsolutePath)))
}
} else {
new File(file.getAbsolutePath).exists()
}
if (!exists) {
throw new AnalysisException(s"LOAD DATA input path does not exist: $path")
}
uri
} else {
val uri = new URI(path)
val hdfsUri = if (uri.getScheme() != null && uri.getAuthority() != null) {
uri
} else {
// Follow Hive's behavior:
// If no schema or authority is provided with non-local inpath,
// we will use hadoop configuration "fs.defaultFS".
val defaultFSConf = sparkSession.sessionState.newHadoopConf().get("fs.defaultFS")
val defaultFS = if (defaultFSConf == null) {
new URI("")
} else {
new URI(defaultFSConf)
}
val scheme = if (uri.getScheme() != null) {
uri.getScheme()
} else {
defaultFS.getScheme()
}
val authority = if (uri.getAuthority() != null) {
uri.getAuthority()
} else {
defaultFS.getAuthority()
}
if (scheme == null) {
throw new AnalysisException(
s"LOAD DATA: URI scheme is required for non-local input paths: '$path'")
}
// Follow Hive's behavior:
// If LOCAL is not specified, and the path is relative,
// then the path is interpreted relative to "/user/<username>"
val uriPath = uri.getPath()
val absolutePath = if (uriPath != null && uriPath.startsWith("/")) {
uriPath
} else {
s"/user/${System.getProperty("user.name")}/$uriPath"
}
new URI(scheme, authority, absolutePath, uri.getQuery(), uri.getFragment())
}
val hadoopConf = sparkSession.sessionState.newHadoopConf()
val srcPath = new Path(hdfsUri)
val fs = srcPath.getFileSystem(hadoopConf)
if (!fs.exists(srcPath)) {
throw new AnalysisException(s"LOAD DATA input path does not exist: $path")
}
hdfsUri
}
if (partition.nonEmpty) {
catalog.loadPartition(
targetTable.identifier,
loadPath.toString,
partition.get,
isOverwrite,
inheritTableSpecs = true,
isSrcLocal = isLocal)
} else {
catalog.loadTable(
targetTable.identifier,
loadPath.toString,
isOverwrite,
isSrcLocal = isLocal)
}
// Refresh the metadata cache to ensure the data visible to the users
catalog.refreshTable(targetTable.identifier)
CommandUtils.updateTableStats(sparkSession, targetTable)
Seq.empty[Row]
}
}
/**
* A command to truncate table.
*
* The syntax of this command is:
* {{{
* TRUNCATE TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
case class TruncateTableCommand(
tableName: TableIdentifier,
partitionSpec: Option[TablePartitionSpec]) extends RunnableCommand {
override def run(spark: SparkSession): Seq[Row] = {
val catalog = spark.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
val tableIdentWithDB = table.identifier.quotedString
if (table.tableType == CatalogTableType.EXTERNAL) {
throw new AnalysisException(
s"Operation not allowed: TRUNCATE TABLE on external tables: $tableIdentWithDB")
}
if (table.tableType == CatalogTableType.VIEW) {
throw new AnalysisException(
s"Operation not allowed: TRUNCATE TABLE on views: $tableIdentWithDB")
}
if (table.partitionColumnNames.isEmpty && partitionSpec.isDefined) {
throw new AnalysisException(
s"Operation not allowed: TRUNCATE TABLE ... PARTITION is not supported " +
s"for tables that are not partitioned: $tableIdentWithDB")
}
if (partitionSpec.isDefined) {
DDLUtils.verifyPartitionProviderIsHive(spark, table, "TRUNCATE TABLE ... PARTITION")
}
val partCols = table.partitionColumnNames
val locations =
if (partCols.isEmpty) {
Seq(table.storage.locationUri)
} else {
val normalizedSpec = partitionSpec.map { spec =>
PartitioningUtils.normalizePartitionSpec(
spec,
partCols,
table.identifier.quotedString,
spark.sessionState.conf.resolver)
}
val partLocations =
catalog.listPartitions(table.identifier, normalizedSpec).map(_.storage.locationUri)
// Fail if the partition spec is fully specified (not partial) and the partition does not
// exist.
for (spec <- partitionSpec if partLocations.isEmpty && spec.size == partCols.length) {
throw new NoSuchPartitionException(table.database, table.identifier.table, spec)
}
partLocations
}
val hadoopConf = spark.sessionState.newHadoopConf()
locations.foreach { location =>
if (location.isDefined) {
val path = new Path(location.get)
try {
val fs = path.getFileSystem(hadoopConf)
fs.delete(path, true)
fs.mkdirs(path)
} catch {
case NonFatal(e) =>
throw new AnalysisException(
s"Failed to truncate table $tableIdentWithDB when removing data of the path: $path " +
s"because of ${e.toString}")
}
}
}
// After deleting the data, invalidate the table to make sure we don't keep around a stale
// file relation in the metastore cache.
spark.sessionState.refreshTable(tableName.unquotedString)
// Also try to drop the contents of the table from the columnar cache
try {
spark.sharedState.cacheManager.uncacheQuery(spark.table(table.identifier), cascade = true)
} catch {
case NonFatal(e) =>
log.warn(s"Exception when attempting to uncache table $tableIdentWithDB", e)
}
if (table.stats.nonEmpty) {
// empty table after truncation
val newStats = CatalogStatistics(sizeInBytes = 0, rowCount = Some(0))
catalog.alterTableStats(tableName, Some(newStats))
}
Seq.empty[Row]
}
}
/**
* Command that looks like
* {{{
* DESCRIBE [EXTENDED|FORMATTED] table_name partitionSpec?;
* }}}
*/
case class DescribeTableCommand(
table: TableIdentifier,
partitionSpec: TablePartitionSpec,
isExtended: Boolean)
extends RunnableCommand {
override val output: Seq[Attribute] = Seq(
// Column names are based on Hive.
AttributeReference("col_name", StringType, nullable = false,
new MetadataBuilder().putString("comment", "name of the column").build())(),
AttributeReference("data_type", StringType, nullable = false,
new MetadataBuilder().putString("comment", "data type of the column").build())(),
AttributeReference("comment", StringType, nullable = true,
new MetadataBuilder().putString("comment", "comment of the column").build())()
)
override def run(sparkSession: SparkSession): Seq[Row] = {
val result = new ArrayBuffer[Row]
val catalog = sparkSession.sessionState.catalog
if (catalog.isTemporaryTable(table)) {
if (partitionSpec.nonEmpty) {
throw new AnalysisException(
s"DESC PARTITION is not allowed on a temporary view: ${table.identifier}")
}
describeSchema(catalog.lookupRelation(table).schema, result, header = false)
} else {
val metadata = catalog.getTableMetadata(table)
if (metadata.schema.isEmpty) {
// In older version(prior to 2.1) of Spark, the table schema can be empty and should be
// inferred at runtime. We should still support it.
describeSchema(sparkSession.table(metadata.identifier).schema, result, header = false)
} else {
describeSchema(metadata.schema, result, header = false)
}
describePartitionInfo(metadata, result)
if (partitionSpec.nonEmpty) {
// Outputs the partition-specific info for the DDL command:
// "DESCRIBE [EXTENDED|FORMATTED] table_name PARTITION (partitionVal*)"
describeDetailedPartitionInfo(sparkSession, catalog, metadata, result)
} else if (isExtended) {
describeFormattedTableInfo(metadata, result)
}
}
result
}
private def describePartitionInfo(table: CatalogTable, buffer: ArrayBuffer[Row]): Unit = {
if (table.partitionColumnNames.nonEmpty) {
append(buffer, "# Partition Information", "", "")
describeSchema(table.partitionSchema, buffer, header = true)
}
}
private def describeFormattedTableInfo(table: CatalogTable, buffer: ArrayBuffer[Row]): Unit = {
// The following information has been already shown in the previous outputs
val excludedTableInfo = Seq(
"Partition Columns",
"Schema"
)
append(buffer, "", "", "")
append(buffer, "# Detailed Table Information", "", "")
table.toLinkedHashMap.filterKeys(!excludedTableInfo.contains(_)).foreach {
s => append(buffer, s._1, s._2, "")
}
}
private def describeDetailedPartitionInfo(
spark: SparkSession,
catalog: SessionCatalog,
metadata: CatalogTable,
result: ArrayBuffer[Row]): Unit = {
if (metadata.tableType == CatalogTableType.VIEW) {
throw new AnalysisException(
s"DESC PARTITION is not allowed on a view: ${table.identifier}")
}
DDLUtils.verifyPartitionProviderIsHive(spark, metadata, "DESC PARTITION")
val partition = catalog.getPartition(table, partitionSpec)
if (isExtended) describeFormattedDetailedPartitionInfo(table, metadata, partition, result)
}
private def describeFormattedDetailedPartitionInfo(
tableIdentifier: TableIdentifier,
table: CatalogTable,
partition: CatalogTablePartition,
buffer: ArrayBuffer[Row]): Unit = {
append(buffer, "", "", "")
append(buffer, "# Detailed Partition Information", "", "")
append(buffer, "Database", table.database, "")
append(buffer, "Table", tableIdentifier.table, "")
partition.toLinkedHashMap.foreach(s => append(buffer, s._1, s._2, ""))
append(buffer, "", "", "")
append(buffer, "# Storage Information", "", "")
table.bucketSpec match {
case Some(spec) =>
spec.toLinkedHashMap.foreach(s => append(buffer, s._1, s._2, ""))
case _ =>
}
table.storage.toLinkedHashMap.foreach(s => append(buffer, s._1, s._2, ""))
}
private def describeSchema(
schema: StructType,
buffer: ArrayBuffer[Row],
header: Boolean): Unit = {
if (header) {
append(buffer, s"# ${output.head.name}", output(1).name, output(2).name)
}
schema.foreach { column =>
append(buffer, column.name, column.dataType.simpleString, column.getComment().orNull)
}
}
private def append(
buffer: ArrayBuffer[Row], column: String, dataType: String, comment: String): Unit = {
buffer += Row(column, dataType, comment)
}
}
/**
* A command to list the info for a column, including name, data type, comment and column stats.
*
* The syntax of using this command in SQL is:
* {{{
* DESCRIBE [EXTENDED|FORMATTED] table_name column_name;
* }}}
*/
case class DescribeColumnCommand(
table: TableIdentifier,
colNameParts: Seq[String],
isExtended: Boolean)
extends RunnableCommand {
override val output: Seq[Attribute] = {
Seq(
AttributeReference("info_name", StringType, nullable = false,
new MetadataBuilder().putString("comment", "name of the column info").build())(),
AttributeReference("info_value", StringType, nullable = false,
new MetadataBuilder().putString("comment", "value of the column info").build())()
)
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val resolver = sparkSession.sessionState.conf.resolver
val relation = sparkSession.table(table).queryExecution.analyzed
val colName = UnresolvedAttribute(colNameParts).name
val field = {
relation.resolve(colNameParts, resolver).getOrElse {
throw new AnalysisException(s"Column $colName does not exist")
}
}
if (!field.isInstanceOf[Attribute]) {
// If the field is not an attribute after `resolve`, then it's a nested field.
throw new AnalysisException(
s"DESC TABLE COLUMN command does not support nested data types: $colName")
}
val catalogTable = catalog.getTempViewOrPermanentTableMetadata(table)
val colStats = catalogTable.stats.map(_.colStats).getOrElse(Map.empty)
val cs = colStats.get(field.name)
val comment = if (field.metadata.contains("comment")) {
Option(field.metadata.getString("comment"))
} else {
None
}
val buffer = ArrayBuffer[Row](
Row("col_name", field.name),
Row("data_type", field.dataType.catalogString),
Row("comment", comment.getOrElse("NULL"))
)
if (isExtended) {
// Show column stats when EXTENDED or FORMATTED is specified.
buffer += Row("min", cs.flatMap(_.min.map(_.toString)).getOrElse("NULL"))
buffer += Row("max", cs.flatMap(_.max.map(_.toString)).getOrElse("NULL"))
buffer += Row("num_nulls", cs.flatMap(_.nullCount.map(_.toString)).getOrElse("NULL"))
buffer += Row("distinct_count",
cs.flatMap(_.distinctCount.map(_.toString)).getOrElse("NULL"))
buffer += Row("avg_col_len", cs.flatMap(_.avgLen.map(_.toString)).getOrElse("NULL"))
buffer += Row("max_col_len", cs.flatMap(_.maxLen.map(_.toString)).getOrElse("NULL"))
val histDesc = for {
c <- cs
hist <- c.histogram
} yield histogramDescription(hist)
buffer ++= histDesc.getOrElse(Seq(Row("histogram", "NULL")))
}
buffer
}
private def histogramDescription(histogram: Histogram): Seq[Row] = {
val header = Row("histogram",
s"height: ${histogram.height}, num_of_bins: ${histogram.bins.length}")
val bins = histogram.bins.zipWithIndex.map {
case (bin, index) =>
Row(s"bin_$index",
s"lower_bound: ${bin.lo}, upper_bound: ${bin.hi}, distinct_count: ${bin.ndv}")
}
header +: bins
}
}
/**
* A command for users to get tables in the given database.
* If a databaseName is not given, the current database will be used.
* The syntax of using this command in SQL is:
* {{{
* SHOW TABLES [(IN|FROM) database_name] [[LIKE] 'identifier_with_wildcards'];
* SHOW TABLE EXTENDED [(IN|FROM) database_name] LIKE 'identifier_with_wildcards'
* [PARTITION(partition_spec)];
* }}}
*/
case class ShowTablesCommand(
databaseName: Option[String],
tableIdentifierPattern: Option[String],
isExtended: Boolean = false,
partitionSpec: Option[TablePartitionSpec] = None) extends RunnableCommand {
// The result of SHOW TABLES/SHOW TABLE has three basic columns: database, tableName and
// isTemporary. If `isExtended` is true, append column `information` to the output columns.
override val output: Seq[Attribute] = {
val tableExtendedInfo = if (isExtended) {
AttributeReference("information", StringType, nullable = false)() :: Nil
} else {
Nil
}
AttributeReference("database", StringType, nullable = false)() ::
AttributeReference("tableName", StringType, nullable = false)() ::
AttributeReference("isTemporary", BooleanType, nullable = false)() :: tableExtendedInfo
}
override def run(sparkSession: SparkSession): Seq[Row] = {
// Since we need to return a Seq of rows, we will call getTables directly
// instead of calling tables in sparkSession.
val catalog = sparkSession.sessionState.catalog
val db = databaseName.getOrElse(catalog.getCurrentDatabase)
if (partitionSpec.isEmpty) {
// Show the information of tables.
val tables =
tableIdentifierPattern.map(catalog.listTables(db, _)).getOrElse(catalog.listTables(db))
tables.map { tableIdent =>
val database = tableIdent.database.getOrElse("")
val tableName = tableIdent.table
val isTemp = catalog.isTemporaryTable(tableIdent)
if (isExtended) {
val information = catalog.getTempViewOrPermanentTableMetadata(tableIdent).simpleString
Row(database, tableName, isTemp, s"$information\\n")
} else {
Row(database, tableName, isTemp)
}
}
} else {
// Show the information of partitions.
//
// Note: tableIdentifierPattern should be non-empty, otherwise a [[ParseException]]
// should have been thrown by the sql parser.
val tableIdent = TableIdentifier(tableIdentifierPattern.get, Some(db))
val table = catalog.getTableMetadata(tableIdent).identifier
val partition = catalog.getPartition(tableIdent, partitionSpec.get)
val database = table.database.getOrElse("")
val tableName = table.table
val isTemp = catalog.isTemporaryTable(table)
val information = partition.simpleString
Seq(Row(database, tableName, isTemp, s"$information\\n"))
}
}
}
/**
* A command for users to list the properties for a table. If propertyKey is specified, the value
* for the propertyKey is returned. If propertyKey is not specified, all the keys and their
* corresponding values are returned.
* The syntax of using this command in SQL is:
* {{{
* SHOW TBLPROPERTIES table_name[('propertyKey')];
* }}}
*/
case class ShowTablePropertiesCommand(table: TableIdentifier, propertyKey: Option[String])
extends RunnableCommand {
override val output: Seq[Attribute] = {
val schema = AttributeReference("value", StringType, nullable = false)() :: Nil
propertyKey match {
case None => AttributeReference("key", StringType, nullable = false)() :: schema
case _ => schema
}
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
if (catalog.isTemporaryTable(table)) {
Seq.empty[Row]
} else {
val catalogTable = sparkSession.sessionState.catalog.getTableMetadata(table)
propertyKey match {
case Some(p) =>
val propValue = catalogTable
.properties
.getOrElse(p, s"Table ${catalogTable.qualifiedName} does not have property: $p")
Seq(Row(propValue))
case None =>
catalogTable.properties.map(p => Row(p._1, p._2)).toSeq
}
}
}
}
/**
* A command to list the column names for a table.
*
* The syntax of using this command in SQL is:
* {{{
* SHOW COLUMNS (FROM | IN) table_identifier [(FROM | IN) database];
* }}}
*/
case class ShowColumnsCommand(
databaseName: Option[String],
tableName: TableIdentifier) extends RunnableCommand {
override val output: Seq[Attribute] = {
AttributeReference("col_name", StringType, nullable = false)() :: Nil
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val resolver = sparkSession.sessionState.conf.resolver
val lookupTable = databaseName match {
case None => tableName
case Some(db) if tableName.database.exists(!resolver(_, db)) =>
throw new AnalysisException(
s"SHOW COLUMNS with conflicting databases: '$db' != '${tableName.database.get}'")
case Some(db) => TableIdentifier(tableName.identifier, Some(db))
}
val table = catalog.getTempViewOrPermanentTableMetadata(lookupTable)
table.schema.map { c =>
Row(c.name)
}
}
}
/**
* A command to list the partition names of a table. If the partition spec is specified,
* partitions that match the spec are returned. [[AnalysisException]] exception is thrown under
* the following conditions:
*
* 1. If the command is called for a non partitioned table.
* 2. If the partition spec refers to the columns that are not defined as partitioning columns.
*
* The syntax of using this command in SQL is:
* {{{
* SHOW PARTITIONS [db_name.]table_name [PARTITION(partition_spec)]
* }}}
*/
case class ShowPartitionsCommand(
tableName: TableIdentifier,
spec: Option[TablePartitionSpec]) extends RunnableCommand {
override val output: Seq[Attribute] = {
AttributeReference("partition", StringType, nullable = false)() :: Nil
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
val tableIdentWithDB = table.identifier.quotedString
/**
* Validate and throws an [[AnalysisException]] exception under the following conditions:
* 1. If the table is not partitioned.
* 2. If it is a datasource table.
* 3. If it is a view.
*/
if (table.tableType == VIEW) {
throw new AnalysisException(s"SHOW PARTITIONS is not allowed on a view: $tableIdentWithDB")
}
if (table.partitionColumnNames.isEmpty) {
throw new AnalysisException(
s"SHOW PARTITIONS is not allowed on a table that is not partitioned: $tableIdentWithDB")
}
DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "SHOW PARTITIONS")
/**
* Validate the partitioning spec by making sure all the referenced columns are
* defined as partitioning columns in table definition. An AnalysisException exception is
* thrown if the partitioning spec is invalid.
*/
if (spec.isDefined) {
val badColumns = spec.get.keySet.filterNot(table.partitionColumnNames.contains)
if (badColumns.nonEmpty) {
val badCols = badColumns.mkString("[", ", ", "]")
throw new AnalysisException(
s"Non-partitioning column(s) $badCols are specified for SHOW PARTITIONS")
}
}
val partNames = catalog.listPartitionNames(tableName, spec)
partNames.map(Row(_))
}
}
case class ShowCreateTableCommand(table: TableIdentifier) extends RunnableCommand {
override val output: Seq[Attribute] = Seq(
AttributeReference("createtab_stmt", StringType, nullable = false)()
)
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val tableMetadata = catalog.getTableMetadata(table)
// TODO: unify this after we unify the CREATE TABLE syntax for hive serde and data source table.
val stmt = if (DDLUtils.isDatasourceTable(tableMetadata)) {
showCreateDataSourceTable(tableMetadata)
} else {
showCreateHiveTable(tableMetadata)
}
Seq(Row(stmt))
}
private def showCreateHiveTable(metadata: CatalogTable): String = {
def reportUnsupportedError(features: Seq[String]): Unit = {
throw new AnalysisException(
s"Failed to execute SHOW CREATE TABLE against table/view ${metadata.identifier}, " +
"which is created by Hive and uses the following unsupported feature(s)\\n" +
features.map(" - " + _).mkString("\\n")
)
}
if (metadata.unsupportedFeatures.nonEmpty) {
reportUnsupportedError(metadata.unsupportedFeatures)
}
val builder = StringBuilder.newBuilder
val tableTypeString = metadata.tableType match {
case EXTERNAL => " EXTERNAL TABLE"
case VIEW => " VIEW"
case MANAGED => " TABLE"
case t =>
throw new IllegalArgumentException(
s"Unknown table type is found at showCreateHiveTable: $t")
}
builder ++= s"CREATE$tableTypeString ${table.quotedString}"
if (metadata.tableType == VIEW) {
if (metadata.schema.nonEmpty) {
builder ++= metadata.schema.map(_.name).mkString("(", ", ", ")")
}
builder ++= metadata.viewText.mkString(" AS\\n", "", "\\n")
} else {
showHiveTableHeader(metadata, builder)
showHiveTableNonDataColumns(metadata, builder)
showHiveTableStorageInfo(metadata, builder)
showHiveTableProperties(metadata, builder)
}
builder.toString()
}
private def showHiveTableHeader(metadata: CatalogTable, builder: StringBuilder): Unit = {
val columns = metadata.schema.filterNot { column =>
metadata.partitionColumnNames.contains(column.name)
}.map(_.toDDL)
if (columns.nonEmpty) {
builder ++= columns.mkString("(", ", ", ")\\n")
}
metadata
.comment
.map("COMMENT '" + escapeSingleQuotedString(_) + "'\\n")
.foreach(builder.append)
}
private def showHiveTableNonDataColumns(metadata: CatalogTable, builder: StringBuilder): Unit = {
if (metadata.partitionColumnNames.nonEmpty) {
val partCols = metadata.partitionSchema.map(_.toDDL)
builder ++= partCols.mkString("PARTITIONED BY (", ", ", ")\\n")
}
if (metadata.bucketSpec.isDefined) {
val bucketSpec = metadata.bucketSpec.get
builder ++= s"CLUSTERED BY (${bucketSpec.bucketColumnNames.mkString(",")})\\n"
if (bucketSpec.sortColumnNames.nonEmpty) {
builder ++= s"SORTED BY (${bucketSpec.sortColumnNames.map(_ + " ASC").mkString(", ")})\\n"
}
builder ++= s"INTO ${bucketSpec.numBuckets} BUCKETS\\n"
}
}
private def showHiveTableStorageInfo(metadata: CatalogTable, builder: StringBuilder): Unit = {
val storage = metadata.storage
storage.serde.foreach { serde =>
builder ++= s"ROW FORMAT SERDE '$serde'\\n"
val serdeProps = metadata.storage.properties.map {
case (key, value) =>
s"'${escapeSingleQuotedString(key)}' = '${escapeSingleQuotedString(value)}'"
}
builder ++= serdeProps.mkString("WITH SERDEPROPERTIES (\\n ", ",\\n ", "\\n)\\n")
}
if (storage.inputFormat.isDefined || storage.outputFormat.isDefined) {
builder ++= "STORED AS\\n"
storage.inputFormat.foreach { format =>
builder ++= s" INPUTFORMAT '${escapeSingleQuotedString(format)}'\\n"
}
storage.outputFormat.foreach { format =>
builder ++= s" OUTPUTFORMAT '${escapeSingleQuotedString(format)}'\\n"
}
}
if (metadata.tableType == EXTERNAL) {
storage.locationUri.foreach { uri =>
builder ++= s"LOCATION '$uri'\\n"
}
}
}
private def showHiveTableProperties(metadata: CatalogTable, builder: StringBuilder): Unit = {
if (metadata.properties.nonEmpty) {
val props = metadata.properties.map { case (key, value) =>
s"'${escapeSingleQuotedString(key)}' = '${escapeSingleQuotedString(value)}'"
}
builder ++= props.mkString("TBLPROPERTIES (\\n ", ",\\n ", "\\n)\\n")
}
}
private def showCreateDataSourceTable(metadata: CatalogTable): String = {
val builder = StringBuilder.newBuilder
builder ++= s"CREATE TABLE ${table.quotedString} "
showDataSourceTableDataColumns(metadata, builder)
showDataSourceTableOptions(metadata, builder)
showDataSourceTableNonDataColumns(metadata, builder)
builder.toString()
}
private def showDataSourceTableDataColumns(
metadata: CatalogTable, builder: StringBuilder): Unit = {
val columns = metadata.schema.fields.map(_.toDDL)
builder ++= columns.mkString("(", ", ", ")\\n")
}
private def showDataSourceTableOptions(metadata: CatalogTable, builder: StringBuilder): Unit = {
builder ++= s"USING ${metadata.provider.get}\\n"
val dataSourceOptions = metadata.storage.properties.map {
case (key, value) => s"${quoteIdentifier(key)} '${escapeSingleQuotedString(value)}'"
} ++ metadata.storage.locationUri.flatMap { location =>
if (metadata.tableType == MANAGED) {
// If it's a managed table, omit PATH option. Spark SQL always creates external table
// when the table creation DDL contains the PATH option.
None
} else {
Some(s"path '${escapeSingleQuotedString(CatalogUtils.URIToString(location))}'")
}
}
if (dataSourceOptions.nonEmpty) {
builder ++= "OPTIONS (\\n"
builder ++= dataSourceOptions.mkString(" ", ",\\n ", "\\n")
builder ++= ")\\n"
}
}
private def showDataSourceTableNonDataColumns(
metadata: CatalogTable, builder: StringBuilder): Unit = {
val partCols = metadata.partitionColumnNames
if (partCols.nonEmpty) {
builder ++= s"PARTITIONED BY ${partCols.mkString("(", ", ", ")")}\\n"
}
metadata.bucketSpec.foreach { spec =>
if (spec.bucketColumnNames.nonEmpty) {
builder ++= s"CLUSTERED BY ${spec.bucketColumnNames.mkString("(", ", ", ")")}\\n"
if (spec.sortColumnNames.nonEmpty) {
builder ++= s"SORTED BY ${spec.sortColumnNames.mkString("(", ", ", ")")}\\n"
}
builder ++= s"INTO ${spec.numBuckets} BUCKETS\\n"
}
}
}
}
| rikima/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala | Scala | apache-2.0 | 41,556 |
package is.hail.rvd
import is.hail.annotations.{Region, RegionValue, SafeRow, WritableRegionValue}
import is.hail.types.virtual.Type
import is.hail.utils._
case class RVDPartitionInfo(
partitionIndex: Int,
size: Int,
min: Any,
max: Any,
// min, max: RegionValue[kType]
samples: Array[Any],
sortedness: Int,
contextStr: String
) {
val interval = Interval(min, max, true, true)
def pretty(t: Type): String = {
s"partitionIndex=$partitionIndex,size=$size,min=$min,max=$max,samples=${samples.mkString(",")},sortedness=$sortedness"
}
}
object RVDPartitionInfo {
final val UNSORTED = 0
final val TSORTED = 1
final val KSORTED = 2
def apply(
typ: RVDType,
partitionKey: Int,
sampleSize: Int,
partitionIndex: Int,
it: Iterator[Long],
seed: Int,
producerContext: RVDContext
): RVDPartitionInfo = {
using(RVDContext.default) { localctx =>
val kPType = typ.kType
val pkOrd = typ.copy(key = typ.key.take(partitionKey)).kOrd
val minF = WritableRegionValue(kPType, localctx.freshRegion())
val maxF = WritableRegionValue(kPType, localctx.freshRegion())
val prevF = WritableRegionValue(kPType, localctx.freshRegion())
assert(it.hasNext)
val f0 = it.next()
minF.set(f0, deepCopy = true)
maxF.set(f0, deepCopy = true)
prevF.set(f0, deepCopy = true)
var sortedness = KSORTED
var contextStr = ""
val rng = new java.util.Random(seed)
val samples = new Array[WritableRegionValue](sampleSize)
var i = 0
if (sampleSize > 0) {
samples(0) = WritableRegionValue(kPType, f0, localctx.freshRegion())
i += 1
}
producerContext.region.clear()
while (it.hasNext) {
val f = it.next()
if (sortedness > UNSORTED && typ.kOrd.lt(f, prevF.value.offset)) {
if (pkOrd.lt(f, prevF.value.offset)) {
val curr = Region.pretty(typ.kType, f)
val prev = prevF.pretty
log.info(s"unsorted: $curr, $prev")
contextStr = s"CURRENT=$curr, PREV=$prev"
sortedness = UNSORTED
} else if (sortedness > TSORTED) {
val curr = Region.pretty(typ.kType, f)
val prev = prevF.pretty
log.info(s"partition-key-sorted: $curr, $prev")
contextStr = s"CURRENT=$curr, PREV=$prev"
sortedness = sortedness.min(TSORTED)
}
}
if (typ.kOrd.lt(f, minF.value.offset))
minF.set(f, deepCopy = true)
if (typ.kOrd.gt(f, maxF.value.offset))
maxF.set(f, deepCopy = true)
prevF.set(f, deepCopy = true)
if (i < sampleSize)
samples(i) = WritableRegionValue(kPType, f, localctx.freshRegion())
else {
val j = if (i > 0) rng.nextInt(i) else 0
if (j < sampleSize)
samples(j).set(f, deepCopy = true)
}
producerContext.region.clear()
i += 1
}
val safe: RegionValue => Any = SafeRow(kPType, _)
RVDPartitionInfo(partitionIndex, i,
safe(minF.value), safe(maxF.value),
Array.tabulate[Any](math.min(i, sampleSize))(i => safe(samples(i).value)),
sortedness,
contextStr)
}
}
}
| cseed/hail | hail/src/main/scala/is/hail/rvd/RVDPartitionInfo.scala | Scala | mit | 3,247 |
package com.twitter.finagle.benchmark
import java.net.SocketAddress
import com.twitter.finagle.{Group, ServiceFactory, Service}
import com.twitter.finagle.client.DefaultClient
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.util.{Await, Future}
import com.google.caliper.{SimpleBenchmark, Param}
object EchoBridge {
def apply[Req]: ((SocketAddress, StatsReceiver) => ServiceFactory[Req, Req]) =
(sa, sr) => ServiceFactory(() => Future.value(new Service[Req, Req] {
def apply(req: Req) = Future.value(req)
}))
}
object TestClient extends DefaultClient[Int, Int](
name = "test",
endpointer = EchoBridge[Int]
)
class DefaultClientBenchmark extends SimpleBenchmark {
@Param(Array("1", "10")) val nconns: Int = 1
val addrs = (0 until nconns) map { _ => new SocketAddress {} }
val clientWithDefaultStack = TestClient.newService(Group(addrs: _*))
def timeClientWithDefaultStack(n: Int) {
var i = 0
while (i < n) {
Await.ready(clientWithDefaultStack(i))
i += 1
}
}
}
| travisbrown/finagle | finagle-benchmark/src/main/scala/com/twitter/finagle/benchmark/DefaultClient.scala | Scala | apache-2.0 | 1,037 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.views.application.gifts
import iht.forms.ApplicationForms._
import iht.models.application.gifts.AllGifts
import iht.testhelpers.{CommonBuilder, TestHelper}
import iht.utils.{CommonHelper, DeceasedInfoHelper}
import iht.views.application.{CancelComponent, SubmittableApplicationPageBehaviour}
import iht.views.html.application.gift.seven_years_given_in_last_7_years
import play.api.data.Form
import play.twirl.api.HtmlFormat.Appendable
class SevenYearsGivenInLast7YearsViewTest extends SubmittableApplicationPageBehaviour[AllGifts] {
val regDetails = CommonBuilder.buildRegistrationDetails.copy(ihtReference = Some("ABC1A1A1A"),
deceasedDetails = Some(CommonBuilder.buildDeceasedDetails.copy(
maritalStatus = Some(TestHelper.MaritalStatusMarried))),
deceasedDateOfDeath = Some(CommonBuilder.buildDeceasedDateOfDeath))
val fakeRequest = createFakeRequest(isAuthorised = false)
override def pageTitle = messagesApi("iht.estateReport.gifts.givenAwayIn7YearsBeforeDeath")
override def browserTitle = messagesApi("iht.estateReport.gifts.givenAwayIn7YearsBeforeDeath")
override def guidance = guidance(
Set(
messagesApi("page.iht.application.gifts.lastYears.question", DeceasedInfoHelper.getDeceasedNameOrDefaultString(regDetails)),
messagesApi("page.iht.application.gifts.lastYears.description.p1"),
messagesApi("iht.estateReport.assets.money.lowerCaseInitial"),
messagesApi("iht.estateReport.gifts.stocksAndSharesListed"),
messagesApi("page.iht.application.gifts.lastYears.description.e3"),
messagesApi("page.iht.application.gifts.lastYears.description.e4"),
messagesApi("page.iht.application.gifts.lastYears.description.p3", DeceasedInfoHelper.getDeceasedNameOrDefaultString(regDetails))
)
)
override def formTarget = Some(iht.controllers.application.gifts.routes.SevenYearsGivenInLast7YearsController.onSubmit())
override def cancelComponent = Some(
CancelComponent(
iht.controllers.application.gifts.routes.GiftsOverviewController.onPageLoad(),
messagesApi("page.iht.application.gifts.return.to.givenAwayBy",
CommonHelper.getOrException(regDetails.deceasedDetails).name),
TestHelper.GiftsSevenYearsQuestionID
)
)
override def linkHash = TestHelper.GiftsSevenYearsQuestionID
override def form: Form[AllGifts] = giftSevenYearsGivenInLast7YearsForm
lazy val sevenYearsGivenInLast7YearsView: seven_years_given_in_last_7_years = app.injector.instanceOf[seven_years_given_in_last_7_years]
override def formToView: Form[AllGifts] => Appendable =
form =>
sevenYearsGivenInLast7YearsView(form, regDetails)
"SevenYearsGivenInLast7Years Page" must {
behave like applicationPageWithErrorSummaryBox()
}
}
| hmrc/iht-frontend | test/iht/views/application/gifts/SevenYearsGivenInLast7YearsViewTest.scala | Scala | apache-2.0 | 3,364 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import java.util
import org.scalatest.BeforeAndAfter
import org.apache.spark.sql.{DataFrame, QueryTest, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{NoSuchTableException, TableAlreadyExistsException}
import org.apache.spark.sql.connector.catalog._
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.internal.SQLConf.V2_SESSION_CATALOG
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
class DataSourceV2DataFrameSessionCatalogSuite
extends InsertIntoTests(supportsDynamicOverwrite = true, includeSQLOnlyTests = false)
with SessionCatalogTest[InMemoryTable, InMemoryTableSessionCatalog] {
override protected def doInsert(tableName: String, insert: DataFrame, mode: SaveMode): Unit = {
val dfw = insert.write.format(v2Format)
if (mode != null) {
dfw.mode(mode)
}
dfw.insertInto(tableName)
}
override protected def verifyTable(tableName: String, expected: DataFrame): Unit = {
checkAnswer(spark.table(tableName), expected)
checkAnswer(sql(s"SELECT * FROM $tableName"), expected)
checkAnswer(sql(s"SELECT * FROM default.$tableName"), expected)
checkAnswer(sql(s"TABLE $tableName"), expected)
}
override protected val catalogAndNamespace: String = ""
test("saveAsTable: Append mode should not fail if the table already exists " +
"and a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
val format = spark.sessionState.conf.defaultDataSourceName
sql(s"CREATE TABLE same_name(id LONG) USING $format")
spark.range(10).createTempView("same_name")
spark.range(20).write.format(v2Format).mode(SaveMode.Append).saveAsTable("same_name")
checkAnswer(spark.table("same_name"), spark.range(10).toDF())
checkAnswer(spark.table("default.same_name"), spark.range(20).toDF())
}
}
}
test("saveAsTable with mode Overwrite should not fail if the table already exists " +
"and a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
sql(s"CREATE TABLE same_name(id LONG) USING $v2Format")
spark.range(10).createTempView("same_name")
spark.range(20).write.format(v2Format).mode(SaveMode.Overwrite).saveAsTable("same_name")
checkAnswer(spark.table("same_name"), spark.range(10).toDF())
checkAnswer(spark.table("default.same_name"), spark.range(20).toDF())
}
}
}
test("saveAsTable passes path and provider information properly") {
val t1 = "prop_table"
withTable(t1) {
spark.range(20).write.format(v2Format).option("path", "abc").saveAsTable(t1)
val cat = spark.sessionState.catalogManager.v2SessionCatalog.asInstanceOf[TableCatalog]
val tableInfo = cat.loadTable(Identifier.of(Array.empty, t1))
assert(tableInfo.properties().get("location") === "abc")
assert(tableInfo.properties().get("provider") === v2Format)
}
}
}
class InMemoryTableProvider extends TableProvider {
override def getTable(options: CaseInsensitiveStringMap): Table = {
throw new UnsupportedOperationException("D'oh!")
}
}
class InMemoryTableSessionCatalog extends TestV2SessionCatalogBase[InMemoryTable] {
override def newTable(
name: String,
schema: StructType,
partitions: Array[Transform],
properties: util.Map[String, String]): InMemoryTable = {
new InMemoryTable(name, schema, partitions, properties)
}
override def alterTable(ident: Identifier, changes: TableChange*): Table = {
val fullIdent = fullIdentifier(ident)
Option(tables.get(fullIdent)) match {
case Some(table) =>
val properties = CatalogV2Util.applyPropertiesChanges(table.properties, changes)
val schema = CatalogV2Util.applySchemaChanges(table.schema, changes)
// fail if the last column in the schema was dropped
if (schema.fields.isEmpty) {
throw new IllegalArgumentException(s"Cannot drop all fields")
}
val newTable = new InMemoryTable(table.name, schema, table.partitioning, properties)
.withData(table.data)
tables.put(fullIdent, newTable)
newTable
case _ =>
throw new NoSuchTableException(ident)
}
}
}
private [connector] trait SessionCatalogTest[T <: Table, Catalog <: TestV2SessionCatalogBase[T]]
extends QueryTest
with SharedSparkSession
with BeforeAndAfter {
protected def catalog(name: String): CatalogPlugin = {
spark.sessionState.catalogManager.catalog(name)
}
protected val v2Format: String = classOf[InMemoryTableProvider].getName
protected val catalogClassName: String = classOf[InMemoryTableSessionCatalog].getName
before {
spark.conf.set(V2_SESSION_CATALOG.key, catalogClassName)
}
override def afterEach(): Unit = {
super.afterEach()
catalog("session").asInstanceOf[Catalog].clearTables()
spark.conf.unset(V2_SESSION_CATALOG.key)
}
protected def verifyTable(tableName: String, expected: DataFrame): Unit
import testImplicits._
test("saveAsTable: v2 table - table doesn't exist and default mode (ErrorIfExists)") {
val t1 = "tbl"
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
df.write.format(v2Format).saveAsTable(t1)
verifyTable(t1, df)
}
test("saveAsTable: v2 table - table doesn't exist and append mode") {
val t1 = "tbl"
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
df.write.format(v2Format).mode("append").saveAsTable(t1)
verifyTable(t1, df)
}
test("saveAsTable: Append mode should not fail if the table not exists " +
"but a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
spark.range(10).createTempView("same_name")
spark.range(20).write.format(v2Format).mode(SaveMode.Append).saveAsTable("same_name")
assert(
spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default"))))
}
}
}
test("saveAsTable: v2 table - table exists") {
val t1 = "tbl"
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
spark.sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format")
intercept[TableAlreadyExistsException] {
df.select("id", "data").write.format(v2Format).saveAsTable(t1)
}
df.write.format(v2Format).mode("append").saveAsTable(t1)
verifyTable(t1, df)
// Check that appends are by name
df.select('data, 'id).write.format(v2Format).mode("append").saveAsTable(t1)
verifyTable(t1, df.union(df))
}
test("saveAsTable: v2 table - table overwrite and table doesn't exist") {
val t1 = "tbl"
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
df.write.format(v2Format).mode("overwrite").saveAsTable(t1)
verifyTable(t1, df)
}
test("saveAsTable: v2 table - table overwrite and table exists") {
val t1 = "tbl"
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
spark.sql(s"CREATE TABLE $t1 USING $v2Format AS SELECT 'c', 'd'")
df.write.format(v2Format).mode("overwrite").saveAsTable(t1)
verifyTable(t1, df)
}
test("saveAsTable: Overwrite mode should not drop the temp view if the table not exists " +
"but a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
spark.range(10).createTempView("same_name")
spark.range(20).write.format(v2Format).mode(SaveMode.Overwrite).saveAsTable("same_name")
assert(spark.sessionState.catalog.getTempView("same_name").isDefined)
assert(
spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default"))))
}
}
}
test("saveAsTable: v2 table - ignore mode and table doesn't exist") {
val t1 = "tbl"
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
df.write.format(v2Format).mode("ignore").saveAsTable(t1)
verifyTable(t1, df)
}
test("saveAsTable: v2 table - ignore mode and table exists") {
val t1 = "tbl"
val df = Seq((1L, "a"), (2L, "b"), (3L, "c")).toDF("id", "data")
spark.sql(s"CREATE TABLE $t1 USING $v2Format AS SELECT 'c', 'd'")
df.write.format(v2Format).mode("ignore").saveAsTable(t1)
verifyTable(t1, Seq(("c", "d")).toDF("id", "data"))
}
}
| bdrillard/spark | sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2DataFrameSessionCatalogSuite.scala | Scala | apache-2.0 | 9,312 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.online.joins
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, UnspecifiedDistribution}
import org.apache.spark.sql.execution.joins._
import org.apache.spark.sql.execution.{BinaryNode, SparkPlan}
import org.apache.spark.sql.hive.online._
import org.apache.spark.util.collection.CompactBuffer2
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.existentials
/**
* Multi-time broadcast hash join.
* Remarks:
* 1. Now we only handle the case with AlmostFixed build side.
* We assume "almost-fixed" means that the first iteration will see all the keys.
* Therefore, we have the following remarks.
* 2. We keep track of all the keys with at least one false flag from the build side,
* and throw exception if we see new keys out of this set in later iterations.
* 3. We cache from the stream side the tuples with true flags
* but joined with at least one false-flagged build-side tuple.
* Stream-side cache is saved in state.
* 4. Build-side cache is saved in broadcast.
* 5. We refresh both build-side and stream-side caches.
*/
case class MTBroadcastHashJoin(
leftCacheFilter: Option[Attribute],
rightCacheFilter: Option[Attribute],
streamRefresh: RefreshInfo,
buildRefresh: RefreshInfo,
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
buildSide: BuildSide,
left: SparkPlan,
right: SparkPlan)(
@transient val controller: OnlineDataFrame,
@transient val trace: List[Int] = -1 :: Nil,
opId: OpId = OpId.newOpId)
extends BinaryNode with HashJoin with Stateful
with IteratorRefresher with HashedRelationRefresher {
override def outputPartitioning: Partitioning = streamedPlan.outputPartitioning
override def requiredChildDistribution =
UnspecifiedDistribution :: UnspecifiedDistribution :: Nil
protected val (buildCacheFilter, streamedCacheFilter) = buildSide match {
case BuildLeft => (leftCacheFilter, rightCacheFilter)
case BuildRight => (rightCacheFilter, leftCacheFilter)
}
protected def hashJoin2(
streamIter: Iterator[Row],
hashedRelation: HashedRelation2): Iterator[Row] = {
new Iterator[Row] {
private[this] var currentStreamedRow: Row = _
private[this] var currentHashMatches: CompactBuffer2[Row] = _
private[this] var currentMatchPosition: Int = -1
// Mutable per row objects.
private[this] val joinRow = new JoinedRow2
private[this] val joinKeys = streamSideKeyGenerator()
override final def hasNext: Boolean =
(currentMatchPosition != -1 && currentMatchPosition < currentHashMatches.size) ||
(streamIter.hasNext && fetchNext())
override final def next() = {
val ret = buildSide match {
case BuildRight => joinRow(currentStreamedRow, currentHashMatches(currentMatchPosition))
case BuildLeft => joinRow(currentHashMatches(currentMatchPosition), currentStreamedRow)
}
currentMatchPosition += 1
ret
}
/**
* Searches the streamed iterator for the next row that has at least one match in hashtable.
*
* @return true if the search is successful, and false if the streamed iterator runs out of
* tuples.
*/
private final def fetchNext(): Boolean = {
currentHashMatches = null
currentMatchPosition = -1
while (currentHashMatches == null && streamIter.hasNext) {
currentStreamedRow = streamIter.next()
if (!joinKeys(currentStreamedRow).anyNull) {
currentHashMatches = hashedRelation.get(joinKeys.currentValue)
}
}
if (currentHashMatches == null) {
false
} else {
currentMatchPosition = 0
true
}
}
}
}
val timeout = {
val timeoutValue = sqlContext.conf.broadcastTimeout
if (timeoutValue < 0) {
Duration.Inf
} else {
timeoutValue.seconds
}
}
@transient
private lazy val broadcastFuture = future {
// Note that we use .execute().collect() because we don't want to convert data to Scala types
val input: Array[Row] = buildPlan.execute().map(_.copy()).collect()
val predicate = buildCacheFilter match {
case Some(filter) => newPredicate(filter, buildPlan.output)
case None => (_: Row) => true
}
val hashed = HashedRelation2(input.iterator, buildSideKeyGenerator, predicate, input.length)
prevBatches.lastOption match {
case None =>
val broadcast = sparkContext.broadcast(
CombinedHashedRelation(hashed, null, refreshHashedRelation))
controller.broadcasts((opId, currentBatch)) = broadcast
broadcast
case Some(bId) =>
// TODO: fix this integrity error by supporting join whose both branches may grow
if (!hashed.keySet().isEmpty) {
controller.getWatcher += -1
logError(s"Integrity Error in MTBroadcastHashJoin(Op $opId, Batch $currentBatch)")
}
controller.broadcasts((opId, bId)).asInstanceOf[Broadcast[HashedRelation2]]
}
}(BroadcastHashJoin.broadcastHashJoinExecutionContext)
override def doExecute() = {
val broadcastRelation = Await.result(broadcastFuture, timeout)
streamedPlan.execute().mapPartitions { streamedIter =>
new Iterator[Row] {
private[this] val iterator =
hashJoin2(streamedIter, refreshHashedRelation()(broadcastRelation.value))
override def hasNext: Boolean = iterator.hasNext
override def next(): Row = iterator.next()
}
}
}
override protected final def otherCopyArgs = controller :: trace :: opId :: Nil
override def simpleString = s"${super.simpleString} $opId"
override def newBatch(newTrace: List[Int]): SparkPlan = {
val join = MTBroadcastHashJoin(
leftCacheFilter, rightCacheFilter, streamRefresh, buildRefresh,
leftKeys, rightKeys, buildSide, left, right)(controller, newTrace, opId)
join.broadcastFuture
join
}
}
| andrewor14/iolap | sql/hive/src/main/scala/org/apache/spark/sql/hive/online/joins/MTBroadcastHashJoin.scala | Scala | apache-2.0 | 6,949 |
/*******************************************************************************
Copyright (c) 2012-2014, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
***************************************************************************** */
package kr.ac.kaist.jsaf.analysis.typing
import kr.ac.kaist.jsaf.Shell
import kr.ac.kaist.jsaf.analysis.cfg._
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.{AccessHelper=>AH}
import kr.ac.kaist.jsaf.analysis.typing.{SemanticsExpr => SE}
import kr.ac.kaist.jsaf.scala_src.useful.WorkTrait
import scala.collection.immutable.HashMap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.domain.State
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.models.ModelManager
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
class Access(cfg: CFG, cg: Map[CFGInst, Set[FunctionId]], state_org: State) {
val dusetLock: AnyRef = new AnyRef()
var duset: DUSet = HashMap()
def result = duset
def getState(fid: FunctionId): State = {
val ph = state_org._1
val h = ph.update(SinglePureLocalLoc, ph(cfg.getMergedPureLocal(fid)))
State(h, state_org._2)
}
def process(): Unit = process(false)
def process(quiet: Boolean): Unit = {
val reachableFuncs = cg.foldLeft(Set(cfg.getGlobalFId))((s, kv) => s ++ kv._2)
val programNodes = reachableFuncs.foldLeft(List[Node]())((S, fid) => S ++ cfg.getReachableNodes(fid))
if (!quiet)
System.out.println("# Reachable Nodes: " + programNodes.length);
// Initialize WorkManager
if(Shell.params.opt_MultiThread) Shell.workManager.initialize() // Multi-thread
else Shell.workManager.initialize(null, 1) // Single-thread
// Push works
for(node <- programNodes) Shell.workManager.pushWork(new AccessWork(cfg, node))
// Wait until all works are finished.
Shell.workManager.waitFinishEvent()
// Deinitialize WorkManager
Shell.workManager.deinitialize()
if (!quiet)
System.err.println(" The size of du: "+ duset.foldLeft(0)((i, pair) => i + (pair._2._1.toSet.size) + (pair._2._2.toSet.size)))
}
class AccessWork(cfg:CFG, node: Node) extends WorkTrait {
override def doit(): Unit = {
val state = getState(node._1)
val du: (Node, (LPSet, LPSet)) = cfg.getCmd(node) match {
case Entry => {
val (fid, l) = node
val h = state._1
val ctx = state._2
val x_argvars = cfg.getArgVars(fid)
val x_localvars = cfg.getLocalVars(fid)
val lset_arg = h(SinglePureLocalLoc)(cfg.getArgumentsName(fid))._1._1._2
val env = h(SinglePureLocalLoc)("@env")._2._2
var i = 0
// def
val LPd_1 = x_argvars.foldLeft(LPBot)((lp, x) => {
lp ++ AH.CreateMutableBinding_def(h, env, x)
})
val LPd_2 = x_localvars.foldLeft(LPBot)((lp, x) => {
lp ++ AH.CreateMutableBinding_def(h, env, x)
})
val LPd_3 = LPSet(Set((ContextLoc, "3"), (ContextLoc, "4")))
val LPd_4 = LPSet((SinglePureLocalLoc, cfg.getArgumentsName(fid)))
val LPd_5 = env.foldLeft(LPBot)((lp, l) => lp + ((l, "@outer")))
val LPd = LPd_1 ++ LPd_2 ++ LPd_3 ++ LPd_4 ++ LPd_5
// use
val LPu_1 = LPSet((SinglePureLocalLoc, cfg.getArgumentsName(fid)))
val LPu_2 = x_argvars.foldLeft(LPBot)((lp, x) => {
val lp_3 = lset_arg.foldLeft(lp)((lp_2, l_arg) => {
lp_2 ++ AH.Proto_use(h, l_arg, AbsString.alpha(i.toString))
})
i = i + 1
val lp_4 = AH.CreateMutableBinding_use(h, env, x)
lp_3 ++ lp_4
})
val LPu_3 = x_localvars.foldLeft(LPBot)((lp, x) => {
lp ++ AH.CreateMutableBinding_use(h, env, x)
})
val LPu = LPu_1 ++ LPu_2 ++ LPu_3
(node, ((LPd, LPu)))
}
case Exit => {
val LPu_1 = LPSet((SinglePureLocalLoc, "@return"))
val LPu_2 =
if (node._1 == cfg.getGlobalFId)
LPSet((GlobalLoc, "__result"))
else
LPBot
val LPu_3 = LPSet(Set((ContextLoc, "3"), (ContextLoc, "4")))
val LPu = LPu_1 ++ LPu_2 ++ LPu_3
// Note: #PureLocal must not be passed between functions.
val hold_purelocal = LPSet((SinglePureLocalLoc, "@temp"))
val hold_context = LPSet(Set((ContextLoc, "3"), (ContextLoc, "4")))
(node, ((hold_purelocal ++ hold_context, LPu)))
}
case ExitExc => {
val LPu_1 = LPSet((SinglePureLocalLoc, "@exception"))
val LPu_2 = LPSet((SinglePureLocalLoc, "@exception_all"))
val LPu_3 = LPSet(Set((ContextLoc, "3"), (ContextLoc, "4")))
val LPu = LPu_1 ++ LPu_2 ++ LPu_3
// Note: #PureLocal must not be passed between functions.
val hold_purelocal = LPSet((SinglePureLocalLoc, "@temp"))
val hold_context = LPSet(Set((ContextLoc, "3"), (ContextLoc, "4")))
(node, ((hold_purelocal ++ hold_context, LPu)))
}
case Block(insts) => {
val du = insts.foldLeft((LPBot, LPBot))((S, i) => {
val returnVar =
cfg.getReturnVar(node) match {
case Some(id) => AH.VarStore_def(state._1, state._1(SinglePureLocalLoc)("@env")._2._2, id)
case None => LPBot
}
val defset = Access.I_def(cfg, i, state._1, state._2) ++ returnVar
val useset = Access.I_use(cfg, i, state._1, state._2)// -- S._1
// if an instruction can make an exception state, defsets of following instructions must be included in useset.
// now, we always merge defsets to useset(sound but not efficient).
(S._1 ++ defset, S._2 ++ useset ++ defset)
})
(node, du)
}
}
dusetLock.synchronized {
duset += (du._1 -> du._2)
// after-call node
if (cfg.getAftercalls.contains(node)) {
// Notes
// 1. Both of #Context and #PureLocal are defined by an edge transfer function.
// 2. @temp stands for all the properties in #PureLocal.
duset += (node -> (du._2._1 ++ LPSet(Set((SinglePureLocalLoc, "@temp"), (ContextLoc, "3"), (ContextLoc, "4"))), du._2._2))
} else if (cfg.getAftercatches.contains(node)) {
// Notes
// 1. Both of #Context and #PureLocal are defined by an edge transfer function.
// 2. @temp stands for all the properties in #PureLocal.
duset += (node -> (du._2._1 ++ LPSet(Set((SinglePureLocalLoc, "@temp"), (ContextLoc, "3"), (ContextLoc, "4"))), du._2._2))
}
}
}
}
}
object Access {
def I_def(cfg:CFG, i: CFGInst, h: Heap, ctx: Context): LPSet = {
i match {
case CFGAlloc(_, _, x, e, a_new) => {
val l_r = addrToLoc(a_new, Recent)
val es = e match {
case None => ExceptionBot
case Some(proto) => {
val (_,es_) = SE.V(proto, h, ctx)
es_
}
}
val LP_1 = AH.Oldify_def(h,ctx,a_new)
val LP_2 = AH.NewObject_def.foldLeft(LPBot)((S,p) => S + ((l_r, p)))
val LP_3 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,x)
val LP_4 = AH.RaiseException_def(es)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4
}
case CFGAllocArray(_, _, x, n, a_new) => {
val l_r = addrToLoc(a_new, Recent)
val LP_1 = AH.Oldify_def(h,ctx,a_new)
val LP_2 = AH.NewArrayObject_def.foldLeft(LPBot)((S,p) => S + ((l_r, p)))
val LP_3 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,x)
LP_1 ++ LP_2 ++ LP_3
}
case CFGAllocArg(_, _, x, n, a_new) => {
val l_r = addrToLoc(a_new, Recent)
val LP_1 = AH.Oldify_def(h,ctx,a_new)
val LP_2 = AH.NewArgObject_def.foldLeft(LPBot)((S,p) => S + ((l_r, p)))
val LP_3 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,x)
LP_1 ++ LP_2 ++ LP_3
}
case CFGExprStmt(_, _, x, e) => {
val (v,es) = SE.V(e, h, ctx)
val LP_1 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,x)
val LP_2 = AH.RaiseException_def(es)
LP_1 ++ LP_2
}
case CFGDelete(_, _, x_1, expr) =>
expr match {
case CFGVarRef(_, x_2) => {
val lset_base = Helper.LookupBase(h, x_2)
val ax_2 = AbsString.alpha(x_2)
val LP_1 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,x_1)
val LP_2 = lset_base.foldLeft(LPBot)((S,l_base) =>
S ++ AH.Delete_def(h,l_base, ax_2))
LP_1 ++ LP_2
}
case _ => {
val (v, es) = SE.V(expr, h, ctx)
val LP_1 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,x_1)
val LP_2 = AH.RaiseException_def(es)
LP_1 ++ LP_2
}
}
case CFGDeleteProp(_, _, x, e_1, e_2) => {
// lset must not be empty because obj is coming through <>toObject.
val lset = SE.V(e_1, h, ctx)._1._2
val (v, es) = SE.V(e_2, h, ctx)
val sset = Helper.toStringSet(Helper.toPrimitive_better(h, v))
val LP_1 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,x)
val LP_2 =
lset.foldLeft(LPBot)((S_1, l) => {
sset.foldLeft(S_1)((S_2, s) => {
S_2 ++ AH.Delete_def(h,l,s)
})
})
val LP_3 = AH.RaiseException_def(es)
LP_1 ++ LP_2 ++ LP_3
}
case CFGStore(_, _, e_1, e_2, e_3) => {
// TODO: toStringSet should be used in more optimized way
val (lpset1, es_1) = {
val (v_index, es_index) = SE.V(e_2, h, ctx)
if (v_index <= ValueBot) (LPBot, es_index)
else {
val (v_rhs, es_rhs) = SE.V(e_3, h, ctx)
if (v_rhs <= ValueBot) (LPBot, es_index ++ es_rhs)
else {
// lset must not be empty because obj is coming through <>toObject.
val lset = SE.V(e_1, h, ctx)._1._2
// interate over set of strings for index
val sset = Helper.toStringSet(Helper.toPrimitive_better(h, v_index))
val (lpset2, es_2) = sset.foldLeft(LPBot, es_index ++ es_rhs)((res, s) => {
// non-array objects
val lset_narr = lset.filter(l => (BoolFalse <= Helper.IsArray(h, l)) && BoolTrue <= Helper.CanPut(h, l, s))
// array objects
val lset_arr = lset.filter(l => (BoolTrue <= Helper.IsArray(h, l)) && BoolTrue <= Helper.CanPut(h, l, s))
// store for non-array object
val LP_narr = lset_narr.foldLeft(LPBot)((_lpset, l) => _lpset ++ AH.PropStore_def(h, l, s))
// 15.4.5.1 [[DefineOwnProperty]] of Array
val (lpset_arr, ex) = lset_arr.foldLeft((LPBot, ExceptionBot))((_lpex, l) => {
// 3. s is length
val (lpset_length, ex_len) =
if (AbsString.alpha("length") <= s) {
val v_newLen = Value(Operator.ToUInt32(v_rhs))
val n_oldLen = h(l)("length")._1._1._1._4 // number
val b_g = (n_oldLen < v_newLen._1._4)
val b_eq = (n_oldLen === v_newLen._1._4)
val b_canputLen = Helper.CanPut(h, l, AbsString.alpha("length"))
// 3.d
val n_value = Helper.toNumber(v_rhs._1) + Helper.toNumber(Helper.objToPrimitive(v_rhs._2, "Number"))
val ex_len =
if (BoolFalse <= (n_value === v_newLen._1._4)) Set[Exception](RangeError)
else Set[Exception]()
val h_normal =
if (BoolTrue <= (n_value === v_newLen._1._4)) {
// 3.f
val LP1 =
if ((BoolTrue <= b_g || BoolTrue <= b_eq) && BoolTrue <= b_canputLen)
AH.PropStore_def(h, l, AbsString.alpha("length"))
else LPBot
// 3.j, 3.l
val LP2 =
if (BoolFalse <= b_g && BoolTrue <= b_canputLen) {
val _LP1 = AH.PropStore_def(h, l, AbsString.alpha("length"))
(v_newLen._1._4.getSingle, n_oldLen.getSingle) match {
case (Some(n1), Some(n2)) =>
(n1.toInt until n2.toInt).foldLeft(_LP1)((__lpset, i) =>
__lpset ++ AH.Delete_def(h, l, AbsString.alpha(i.toString)))
case _ =>
if (v_newLen._1._4 <= NumBot || n_oldLen <= NumBot) LPBot
else _LP1 ++ AH.Delete_def(h, l, NumStr)
}
}
else LPBot
LP1 ++ LP2
}
else
LPBot
(h_normal, ex_len)
}
else
(LPBot, ExceptionBot)
// 4. s is array index
val lpset_index =
if (BoolTrue <= Helper.IsArrayIndex(s)) {
val n_oldLen = h(l)("length")._1._1._1._4 // number
val n_index = Operator.ToUInt32(Value(Helper.toNumber(PValue(s))))
val b_g = (n_oldLen < n_index)
val b_eq = (n_oldLen === n_index)
val b_canputLen = Helper.CanPut(h, l, AbsString.alpha("length"))
// 4.c
val LP1 =
if (BoolTrue <= (n_index < n_oldLen)) AH.PropStore_def(h, l, s)
else LPBot
// 4.e
val LP2 =
if ((BoolTrue <= b_g || BoolTrue <= b_eq) && BoolTrue <= b_canputLen)
AH.PropStore_def(h, l, s) ++ AH.PropStore_def(h, l, AbsString.alpha("length"))
else LPBot
LP1 ++ LP2
}
else LPBot
// 5. other
val lpset_normal =
if (s != AbsString.alpha("length") && BoolFalse <= Helper.IsArrayIndex(s))
AH.PropStore_def(h, l, s)
else LPBot
(_lpex._1 ++ lpset_length ++ lpset_index ++ lpset_normal, _lpex._2 ++ ex_len)
})
(res._1 ++ LP_narr ++ lpset_arr, res._2 ++ ex)
})
(lpset2, es_2)
}
}
}
val LP2 = AH.RaiseException_def(es_1)
lpset1 ++ LP2
}
case CFGFunExpr(_, _, x_1, None, fid, a_new1, a_new2, None) => {
val l_r1 = addrToLoc(a_new1, Recent)
val l_r2 = addrToLoc(a_new2, Recent)
val LP_1 = AH.Oldify_def(h,ctx,a_new1)
val LP_2 = AH.Oldify_def(h,ctx,a_new2)
val LP_3 = AH.NewFunctionObject_def.foldLeft(LPBot)((S,p) => S + ((l_r1, p)))
val LP_4 = AH.NewObject_def.foldLeft(LPBot)((S,p) => S + ((l_r2, p)))
val LP_5 = LPSet((l_r2, "constructor"))
val LP_6 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,x_1)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 ++ LP_5 ++ LP_6
}
case CFGFunExpr(_, _, x_1, Some(name), fid, a_new1, a_new2, Some(a_new3)) => {
val x_2 = name.getText
val l_r1 = addrToLoc(a_new1, Recent)
val l_r2 = addrToLoc(a_new2, Recent)
val l_r3 = addrToLoc(a_new3, Recent)
val LP_1 = AH.Oldify_def(h,ctx,a_new1)
val LP_2 = AH.Oldify_def(h,ctx,a_new2)
val LP_3 = AH.Oldify_def(h,ctx,a_new3)
val LP_4 = AH.NewFunctionObject_def.foldLeft(LPBot)((S,p) => S + ((l_r1, p)))
val LP_5 = AH.NewObject_def.foldLeft(LPBot)((S,p) => S + ((l_r2, p)))
val LP_6 = LPSet((l_r2, "constructor"))
val LP_7 = AH.NewDeclEnvRecord_def.foldLeft(LPBot)((S,p) => S + ((l_r3, p)))
val LP_8 = LPSet((l_r3, x_2))
val LP_9 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,x_1)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 ++ LP_5 ++ LP_6 ++ LP_7 ++ LP_8 ++ LP_9
}
case CFGConstruct(_, _, e_1, e_2, e_3, a_new, b_new) => {
// exception handling
val (v_1, es_1) = SE.V(e_1, h, ctx)
val cond = v_1._2.exists((l) => BoolFalse <= Helper.HasConstruct(h,l))
val es_2 =
if (cond) {
Set(TypeError)
} else {
ExceptionBot
}
val es_3 =
if (v_1._1 </ PValueBot) {
Set(TypeError)
} else {
ExceptionBot
}
val es = es_1 ++ es_2 ++ es_3
val v_arg = SE.V(e_3, h, ctx)._1
val LP_1 = AH.Oldify_def(h,ctx,a_new)
val LP_2 = v_arg._2.foldLeft(LPBot)((S,l) => S + ((l, "callee")))
val LP_3 = AH.RaiseException_def(es)
LP_1 ++ LP_2 ++ LP_3
}
case CFGCall(_, _, e_1, e_2, e_3, a_new, b_new) => {
// exception handling
val (v_1, es_1) = SE.V(e_1, h, ctx)
val cond = v_1._2.exists((l) => BoolFalse <= Helper.IsCallable(h,l))
val es_2 =
if (cond) {
Set(TypeError)
} else {
ExceptionBot
}
val es_3 =
if (v_1._1 </ PValueBot) {
Set(TypeError)
} else {
ExceptionBot
}
val es = es_1 ++ es_2 ++ es_3
val v_arg = SE.V(e_3, h, ctx)._1
val LP_1 = AH.Oldify_def(h,ctx,a_new)
val LP_2 = v_arg._2.foldLeft(LPBot)((S,l) => S + ((l, "callee")))
val LP_3 = AH.RaiseException_def(es)
LP_1 ++ LP_2 ++ LP_3
}
case CFGAssert(_, info, expr, _) => {
V_use(expr, h, ctx)
}
case CFGCatch(_, _, name) => {
val LP_1 = AH.CreateMutableBinding_def(h,h(SinglePureLocalLoc)("@env")._2._2, name)
val LP_2 = LPSet((SinglePureLocalLoc, "@exception"))
LP_1 ++ LP_2
}
case CFGReturn(_, _, expr) => {
val LP_1 = expr match {
case None => LPBot
case Some(e) => {
val (v,es) = SE.V(e, h, ctx)
AH.RaiseException_def(es)
}
}
val LP_2 = LPSet((SinglePureLocalLoc, "@return"))
LP_1 ++ LP_2
}
case CFGThrow(_, _, expr) => {
val (v,es) = SE.V(expr, h, ctx)
val LP_1 = LPSet(Set((SinglePureLocalLoc, "@exception"),(SinglePureLocalLoc, "@exception_all")))
val LP_2 = AH.RaiseException_def(es)
LP_1 ++ LP_2
}
case CFGInternalCall(_, _, lhs, fun, arguments, loc) => {
(fun.toString, arguments, loc) match {
case ("<>Global<>toObject", List(expr), Some(a_new)) => {
val (v,es) = SE.V(expr, h, ctx)
val LP_1 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,lhs)
val LP_2 = AH.toObject_def(h,ctx,v,a_new)
val LP_3 = AH.RaiseException_def(es)
LP_1 ++ LP_2 ++ LP_3
}
case ("<>Global<>isObject", List(expr), None) => {
val (v,es) = SE.V(expr, h, ctx)
val LP_1 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,lhs)
val LP_2 = AH.RaiseException_def(es)
LP_1 ++ LP_2
}
case ("<>Global<>toNumber", List(expr), None) => {
val (v,es) = SE.V(expr, h, ctx)
val LP_1 = AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,lhs)
val LP_2 = AH.RaiseException_def(es)
LP_1 ++ LP_2
}
case ("<>Global<>getBase", List(expr_2), None) => {
AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,lhs)
}
case ("<>Global<>iteratorInit", List(expr), Some(a_new)) => {
if (Config.defaultForinUnrollingCount == 0) {
LPBot
} else {
val l_new = addrToLoc(a_new, Recent)
val LP_1 = LPSet(Set((l_new, "index"), (l_new, "length")))
val LP_2 = AH.absPair(h, l_new, AbsString.NumTop)
val LP_3 = AH.VarStore_def(h, h(SinglePureLocalLoc)("@env")._2._2, lhs)
LP_1 ++ LP_2 ++ LP_3
}
}
case ("<>Global<>iteratorHasNext", List(expr_2, expr_3), None) => {
if (Config.defaultForinUnrollingCount == 0) {
AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,lhs)
} else {
AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,lhs)
}
}
case ("<>Global<>iteratorNext", List(expr_2, expr_3), None) => {
if (Config.defaultForinUnrollingCount == 0) {
AH.VarStore_def(h,h(SinglePureLocalLoc)("@env")._2._2,lhs)
} else {
val (v_iter, _) = SE.V(expr_3, h, ctx)
val lset = v_iter._2
val LP_1 = lset.foldLeft(LPBot)((lp, l) => lp + ((l, "index")))
val LP_2 = AH.VarStore_def(h, h(SinglePureLocalLoc)("@env")._2._2, lhs)
LP_1 ++ LP_2
}
}
case _ => {
if (!Config.quietMode)
System.out.println(fun.toString)
throw new NotYetImplemented()
}
}
}
case CFGAPICall(_, model, fun, args) => {
val def_map = ModelManager.getModel(model).getDefMap()
def_map.get(fun) match {
case Some(f) =>
f(h, ctx, cfg, fun, args, cfg.findEnclosingNode(i)._1)
case None =>
if (!Config.quietMode)
System.err.println("* Warning: def. info. of the API function '"+fun+"' are not defined.")
LPBot
}
}
case CFGAsyncCall(_, _, model, call_type, addr1, addr2, addr3) => {
ModelManager.getModel(model).asyncDef(h, ctx, cfg, call_type, List(addr1, addr2, addr3))
}
case _ => LPBot
}
}
def I_use(cfg:CFG, i: CFGInst, h: Heap, ctx: Context): LPSet = {
i match {
case CFGAlloc(_, _, x, e, a_new) => {
val l_r = addrToLoc(a_new, Recent)
val es = e match {
case None => ExceptionBot
case Some(proto) => {
val (_,es_) = SE.V(proto, h, ctx)
es_
}
}
val LP_1 = AH.Oldify_use(h,ctx,a_new)
val LP_2 =
e match {
case None => LPBot
case Some(e) => V_use(e, h, ctx)
}
val LP_3 = AH.VarStore_use(h,h(SinglePureLocalLoc)("@env")._2._2,x)
val LP_4 = AH.RaiseException_use(es)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 + ((SinglePureLocalLoc, "@env"))
}
case CFGAllocArray(_, _, x, n, a_new) => {
val LP_1 = AH.Oldify_use(h,ctx,a_new)
val LP_2 = AH.VarStore_use(h,h(SinglePureLocalLoc)("@env")._2._2,x)
LP_1 ++ LP_2 + ((SinglePureLocalLoc, "@env"))
}
case CFGAllocArg(_, _, x, n, a_new) => {
val LP_1 = AH.Oldify_use(h,ctx,a_new)
val LP_2 = AH.VarStore_use(h,h(SinglePureLocalLoc)("@env")._2._2,x)
LP_1 ++ LP_2 + ((SinglePureLocalLoc, "@env"))
}
case CFGExprStmt(_, _, x, e) => {
val (v,es) = SE.V(e, h, ctx)
val LP_1 = AH.VarStore_use(h,h(SinglePureLocalLoc)("@env")._2._2,x)
val LP_2 = V_use(e,h,ctx)
val LP_3 = AH.RaiseException_use(es)
LP_1 ++ LP_2 ++ LP_3 + ((SinglePureLocalLoc, "@env"))
}
case CFGDelete(_, _, x_1, expr) => {
expr match {
case CFGVarRef(_, x_2) => {
val lset_base = Helper.LookupBase(h, x_2)
val ax_2 = AbsString.alpha(x_2)
val LP_1 = AH.VarStore_use(h,h(SinglePureLocalLoc)("@env")._2._2,x_1)
val LP_2 = AH.LookupBase_use(h,h(SinglePureLocalLoc)("@env")._2._2,x_2)
val LP_3 = lset_base.foldLeft(LPBot)((S,l_base) =>
S ++ AH.Delete_use(h,l_base, ax_2))
LP_1 ++ LP_2 ++ LP_3 + ((SinglePureLocalLoc, "@env"))
}
case _ => {
val (v, es) = SE.V(expr, h, ctx)
val LP_1 = AH.VarStore_use(h,h(SinglePureLocalLoc)("@env")._2._2,x_1)
val LP_2 = V_use(expr,h,ctx)
val LP_3 = AH.RaiseException_use(es)
LP_1 ++ LP_2 ++ LP_3 + ((SinglePureLocalLoc, "@env"))
}
}
}
case CFGDeleteProp(_, _, x, e_1, e_2) => {
// lset must not be empty because obj is coming through <>toObject.
val lset = SE.V(e_1, h, ctx)._1._2
val (v, es) = SE.V(e_2, h, ctx)
val sset = Helper.toStringSet(Helper.toPrimitive_better(h, v))
val LP_1 = AH.VarStore_use(h,h(SinglePureLocalLoc)("@env")._2._2,x)
val LP_2 = V_use(e_1, h, ctx)
val LP_3 = V_use(e_2, h, ctx)
val LP_4 =
lset.foldLeft(LPBot)((S_1, l) => {
sset.foldLeft(S_1)((S_2, s) => {
S_2 ++ AH.Delete_use(h,l,s)
})
})
val LP_5 = AH.RaiseException_use(es)
val LP_6 = AH.toPrimitive_use(h, v)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 ++ LP_5 ++ LP_6 + ((SinglePureLocalLoc, "@env"))
}
case CFGStore(_, _, e_1, e_2, e_3) => {
// TODO: toStringSet should be used in more optimized way
val (v_rhs, es_rhs) = SE.V(e_3, h, ctx)
val lset = SE.V(e_1, h, ctx)._1._2
val (v_index, es_index) = SE.V(e_2, h, ctx)
val sset = Helper.toStringSet(Helper.toPrimitive_better(h, v_index))
val tset =
lset.filter((l) => {
sset.exists((s) => {
BoolTrue <= Helper.CanPut(h,l,s) // BoolFalse <= IsArray(h,l)
})
})
val LP_1 =
tset.foldLeft(LPBot)((S_1,l) => {
sset.foldLeft(S_1)((S_2,s) => {
S_2 ++ AH.PropStore_use(h,l,s)
})
})
val LP_2 = V_use(e_1, h, ctx)
val LP_3 = V_use(e_2, h, ctx)
val LP_4 = V_use(e_3, h, ctx)
val LP_5 =
lset.foldLeft(LPBot)((S_1,l) => {
sset.foldLeft(S_1)((S_2,s) => {
S_2 ++ AH.CanPut_use(h, l, s)
}) ++ AH.IsArray_use(h,l)
})
// non-array objects
val lset_narr =
lset.filter(l => {
(BoolFalse <= Helper.IsArray(h, l)) &&
sset.exists(s => BoolTrue <= Helper.CanPut(h, l, s))
})
val LP_narr =
lset_narr.foldLeft(LPBot)((S_1,l) => {
sset.foldLeft(S_1)((S_2,s) => {
S_2 ++ AH.PropStore_use(h,l,s)
})
})
// array objects
val lset_arr =
lset.filter(l => {
(BoolTrue <= Helper.IsArray(h, l)) &&
sset.exists(s => BoolTrue <= Helper.CanPut(h, l, s))
})
val LP_arr =
lset_arr.foldLeft(LPBot)((S_1,l) => {
sset.foldLeft(S_1)((S_2,s) => {
S_2 ++ AH.PropStore_use(h, l, s)
}) ++
AH.PropStore_use(h, l, AbsString.alpha("length")) ++
AH.CanPut_use(h, l, AbsString.alpha("length")) ++
LPSet((l, "length")) ++
AH.Delete_use(h, l, NumStr)
})
// RangeError
val n_value = Helper.toNumber(v_rhs._1) + Helper.toNumber(Helper.objToPrimitive(v_rhs._2, "Number"))
val v_newLen = Value(Operator.ToUInt32(v_rhs))
val es_len =
if (BoolFalse <= (n_value === v_newLen._1._4)) Set[Exception](RangeError)
else Set[Exception]()
val LP_6 = AH.RaiseException_use(es_index ++ es_rhs ++ es_len)
val LP_7 = AH.toPrimitive_use(h, v_index)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 ++ LP_5 ++ LP_6 ++ LP_7 ++ LP_narr ++ LP_arr ++
LPSet(Set((SinglePureLocalLoc, "@env"), (SinglePureLocalLoc, "@this")))
}
case CFGFunExpr(_, _, x_1, None, fid, a_new1, a_new2, None) => {
val LP_1 = AH.Oldify_use(h,ctx,a_new1)
val LP_2 = AH.Oldify_use(h,ctx,a_new2)
val LP_3 = AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, x_1)
LP_1 ++ LP_2 ++ LP_3 + ((SinglePureLocalLoc, "@env"))
}
case CFGFunExpr(_, _, x_1, Some(name), fid, a_new1, a_new2, Some(a_new3)) => {
val LP_1 = AH.Oldify_use(h,ctx,a_new1)
val LP_2 = AH.Oldify_use(h,ctx,a_new2)
val LP_3 = AH.Oldify_use(h,ctx,a_new3)
val LP_4 = AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, x_1)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 + ((SinglePureLocalLoc, "@env"))
}
case CFGConstruct(_, _, e_1, e_2, e_3, a_new, b_new) => {
val (v_1, es_1) = SE.V(e_1, h, ctx)
val v_arg = SE.V(e_3, h, ctx)._1
val lset_f = v_1._2.filter((l) => BoolTrue <= Helper.HasConstruct(h,l))
// exception handling
val cond = v_1._2.exists((l) => BoolFalse <= Helper.HasConstruct(h,l))
val es_2 =
if (cond) {
Set(TypeError)
} else {
ExceptionBot
}
val es_3 =
if (v_1._1 </ PValueBot) {
Set(TypeError)
} else {
ExceptionBot
}
val es = es_1 ++ es_2 ++ es_3
val LP_1 = AH.Oldify_use(h, ctx, a_new)
val LP_2 = V_use(e_1,h,ctx)
val LP_3 = V_use(e_2,h,ctx)
val LP_4 = V_use(e_3,h,ctx)
val LP_5 = v_1._2.foldLeft(LPBot)((S, l) => S ++ AH.HasConstruct_use(h,l))
val LP_6 = AH.getThis_use(h, SE.V(e_2, h, ctx)._1)
val LP_7 = lset_f.foldLeft(LPBot)((S, l_f) => S + ((l_f, "@construct")))
val LP_8 = v_arg._2.foldLeft(LPBot)((S, l) => S + ((l, "callee")))
val LP_9 = AH.RaiseException_use(es)
// because of PureLocal object is weak updated in edges, all the element are needed
val LP_10 = h(SinglePureLocalLoc).getAllProps.foldLeft(LPBot)((S, kv) => S + ((SinglePureLocalLoc, kv)))
val LP_11 = LPSet(Set((ContextLoc, "3"), (ContextLoc, "4")))
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 ++ LP_5 ++ LP_6 ++ LP_7 ++ LP_8 ++ LP_9 ++ LP_10 ++ LP_11
}
case CFGCall(_, _, e_1, e_2, e_3, a_new, b_new) => {
val (v_1, es_1) = SE.V(e_1, h, ctx)
val v_arg = SE.V(e_3, h, ctx)._1
val lset_f = v_1._2.filter((l) => BoolTrue <= Helper.IsCallable(h,l))
// exception handling
val cond = v_1._2.exists((l) => BoolFalse <= Helper.IsCallable(h,l))
val es_2 =
if (cond) {
Set(TypeError)
} else {
ExceptionBot
}
val es_3 =
if (v_1._1 </ PValueBot) {
Set(TypeError)
} else {
ExceptionBot
}
val es = es_1 ++ es_2 ++ es_3
val LP_1 = AH.Oldify_use(h, ctx, a_new)
val LP_2 = V_use(e_1,h,ctx)
val LP_3 = V_use(e_2,h,ctx)
val LP_4 = V_use(e_3,h,ctx)
val LP_5 = v_1._2.foldLeft(LPBot)((S, l) => S ++ AH.IsCallable_use(h,l))
val LP_6 = AH.getThis_use(h, SE.V(e_2, h, ctx)._1)
val LP_7 = lset_f.foldLeft(LPBot)((S, l_f) => S + ((l_f, "@function")))
val LP_8 = v_arg._2.foldLeft(LPBot)((S, l) => S + ((l, "callee")))
val LP_9 = AH.RaiseException_use(es)
// because of PureLocal object is weak updated in edges, all the element are needed
val LP_10 = h(SinglePureLocalLoc).getAllProps.foldLeft(LPBot)((S, kv) => S + ((SinglePureLocalLoc, kv)))
val LP_11 = LPSet(Set((ContextLoc, "3"), (ContextLoc, "4")))
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 ++ LP_5 ++ LP_6 ++ LP_7 ++ LP_8 ++ LP_9 ++ LP_10 ++ LP_11
}
case CFGAssert(_, info, expr, _) => {
V_use(expr, h, ctx) + ((GlobalLoc, "@class"))
}
case CFGCatch(_, _, name) => {
val LP_1 = AH.CreateMutableBinding_use(h,h(SinglePureLocalLoc)("@env")._2._2, name)
val LP_2 = LPSet(Set((SinglePureLocalLoc, "@exception_all"), (SinglePureLocalLoc, "@exception")))
LP_1 ++ LP_2 + ((SinglePureLocalLoc, "@env"))
}
case CFGReturn(_, _, expr) => {
val LP = expr match {
case None => LPBot
case Some(e) => {
val (v,es) = SE.V(e, h, ctx)
val LP_1 = V_use(e, h, ctx)
val LP_2 = AH.RaiseException_use(es)
LP_1 ++ LP_2
}
}
LP + ((SinglePureLocalLoc, "@return"))
}
case CFGThrow(_, _, e) => {
val (v,es) = SE.V(e, h, ctx)
val LP_1 = V_use(e, h, ctx)
val LP_2 = AH.RaiseException_use(es)
LP_1 ++ LP_2 + ((SinglePureLocalLoc, "@exception_all"))
}
case CFGInternalCall(_, _, lhs, fun, arguments, loc) => {
(fun.toString, arguments, loc) match {
case ("<>Global<>toObject", List(expr), Some(a_new)) => {
val (v,es) = SE.V(expr, h, ctx)
val LP_1 = V_use(expr, h, ctx)
val LP_2 = AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, lhs)
val LP_3 = AH.toObject_use(h, ctx, v, a_new)
val LP_4 = AH.RaiseException_use(es)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 + ((SinglePureLocalLoc, "@env"))
}
case ("<>Global<>isObject", List(expr), None) => {
val (v,es) = SE.V(expr, h, ctx)
val LP_1 = V_use(expr, h, ctx)
val LP_2 = AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, lhs)
val LP_3 = AH.RaiseException_use(es)
LP_1 ++ LP_2 ++ LP_3 + ((SinglePureLocalLoc, "@env"))
}
case ("<>Global<>toNumber", List(expr), None) => {
val (v,es) = SE.V(expr, h, ctx)
val LP_1 = V_use(expr, h, ctx)
val LP_2 = AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, lhs)
val LP_3 = AH.RaiseException_use(es)
val LP_4 = AH.toPrimitive_use(h, v)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 + ((SinglePureLocalLoc, "@env"))
}
case ("<>Global<>getBase", List(expr_2), None) => {
val x_2 = expr_2.asInstanceOf[CFGVarRef].id
val LP_1 = AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, lhs)
val LP_2 = AH.LookupBase_use(h, h(SinglePureLocalLoc)("@env")._2._2, x_2)
LP_1 ++ LP_2 + ((SinglePureLocalLoc, "@env"))
}
case ("<>Global<>iteratorInit", List(expr), Some(a_new)) => {
if (Config.defaultForinUnrollingCount == 0) {
LPBot
} else {
val (v,_) = SE.V(expr, h, ctx)
val v_obj = Value(PValue(UndefBot, NullBot, v._1._3, v._1._4, v._1._5), v._2)
val (v_1, h_1, _, _) = Helper.toObject(h, ctx, v_obj, a_new)
val lset = v_1._2
val LP_1 = V_use(expr, h, ctx)
val LP_2 = AH.toObject_use(h, ctx, v_obj, a_new)
val LP_3 = AH.CollectProps_use(h_1, lset)
val LP_4 = AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, lhs)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4
}
}
case ("<>Global<>iteratorHasNext", List(expr_2, expr_3), None) => {
if (Config.defaultForinUnrollingCount == 0) {
AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, lhs) + ((SinglePureLocalLoc, "@env"))
} else {
val (v,_) = SE.V(expr_3, h, ctx)
val lset = v._2
val LP_1 = V_use(expr_3, h, ctx)
val LP_2 = lset.foldLeft(LPBot)((lp, l) => lp ++ AH.absPair(h, l, AbsString.NumTop) + ((l, "index")))
val LP_3 = AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, lhs)
LP_1 ++ LP_2 ++ LP_3
}
}
case ("<>Global<>iteratorNext", List(expr_2, expr_3), None) => {
if (Config.defaultForinUnrollingCount == 0) {
AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, lhs) + ((SinglePureLocalLoc, "@env"))
} else {
val (v,_) = SE.V(expr_3, h, ctx)
val lset = v._2
val LP_1 = V_use(expr_3, h, ctx)
val LP_2 = lset.foldLeft(LPBot)((lp, l) => lp ++ AH.absPair(h, l, AbsString.NumTop) + ((l, "index")))
val LP_3 = AH.VarStore_use(h, h(SinglePureLocalLoc)("@env")._2._2, lhs)
LP_1 ++ LP_2 ++ LP_3
}
}
case _ => {
if (!Config.quietMode)
System.out.println(fun.toString)
throw new NotYetImplemented()
}
}
}
case CFGAPICall(_, model, fun, args) => {
val use_map = ModelManager.getModel(model).getUseMap()
use_map.get(fun) match {
case Some(f) =>
f(h, ctx, cfg, fun, args, cfg.findEnclosingNode(i)._1)
case None =>
if (!Config.quietMode)
System.err.println("* Warning: use. info. of the API function '"+fun+"' are not defined.")
LPBot
}
}
case CFGAsyncCall(_, _, model, call_type, addr1, addr2, addr3) => {
ModelManager.getModel(model).asyncUse(h, ctx, cfg, call_type, List(addr1, addr2, addr3))
}
case _ => LPBot
}
}
def V_use(e: CFGExpr, h: Heap, ctx: Context): LPSet = {
e match {
case CFGVarRef(info, id) => {
LPSet((SinglePureLocalLoc, "@env")) ++ AH.Lookup_use(h,h(SinglePureLocalLoc)("@env")._2._2,id)
}
case CFGBin(info, e_1, op, e_2) => {
op.getText match {
case "instanceof" =>
val (v_1,es_1) = SE.V(e_1,h,ctx)
val (v_2,es_2) = SE.V(e_2,h,ctx)
val lset_1 = v_1._2
val lset_2 = v_2._2
val lset_3 = lset_2.filter((l) => BoolTrue <= Helper.HasConstruct(h, l))
val aproto = AbsString.alpha("prototype")
val v_proto = lset_3.foldLeft(ValueBot)((v, l) => v + Helper.Proto(h,l,aproto))
val lset_4 = v_proto._2
val LP_1 = lset_2.foldLeft(LPBot)((S, l) => S + ((l, "@construct")))
val LP_2 = lset_3.foldLeft(LPBot)((S, l) => S ++ AH.Proto_use(h,l,aproto))
val LP_3 = lset_1.foldLeft(LPBot)((S1, l_1) =>
S1 ++ lset_4.foldLeft(LPBot)((S2, l_2) => S2 ++ AH.inherit_use(h,l_1,l_2)))
val LP_4 = V_use(e_1, h, ctx)
val LP_5 = V_use(e_2, h, ctx)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4 ++ LP_5
case "in" => {
val (v_1,es_1) = SE.V(e_1,h,ctx)
val (v_2,es_2) = SE.V(e_2,h,ctx)
val s = Helper.toString(Helper.toPrimitive_better(h, v_1))
val LP_1 = V_use(e_1, h, ctx)
val LP_2 = V_use(e_2, h, ctx)
val LP_3 = v_2._2.foldLeft(LPBot)((S, l) => S ++ AH.HasProperty_use(h,l,s))
val LP_4 = AH.toPrimitive_use(h, v_1)
LP_1 ++ LP_2 ++ LP_3 ++ LP_4
}
case _ => V_use(e_1, h, ctx) ++ V_use(e_2, h, ctx)
}
}
case CFGUn(info, op, e) => {
op.getText match {
case "typeof" =>
val (v,es) = SE.V(e,h,ctx)
val LP = AH.TypeTag_use(h,v)
V_use(e, h, ctx) ++ LP
case _ => V_use(e, h, ctx)
}
}
case CFGLoad(info, e_1, e_2) => {
val lset = SE.V(e_1,h,ctx)._1._2
val (v, es) = SE.V(e_2,h,ctx)
val sset = Helper.toStringSet(Helper.toPrimitive_better(h, v))
val LP_1 =
lset.foldLeft(LPBot)((S_1, l) => {
sset.foldLeft(S_1)((S_2, s) => {
S_2 ++ AH.Proto_use(h,l,s)
})
})
val LP_2 = AH.toPrimitive_use(h, v)
LP_1 ++ LP_2 ++ V_use(e_1, h, ctx) ++ V_use(e_2, h, ctx)
}
case CFGThis(_) => LPSet((SinglePureLocalLoc, "@this"))
case _ => LPBot
}
}
def heap_diff(h1: Heap, h2: Heap): LPSet = {
val keys = h1.map.keySet ++ h2.map.keySet
keys.foldLeft(LPBot)((S, key) => {
val S2 = (h1.map.contains(key), h2.map.contains(key)) match {
case (true,true) => S ++ obj_diff(key, h1(key), h2(key))
case (true, false) => S ++ h1(key).getAllProps.foldLeft(LPBot)((S, v) => S + ((key, v)))
case (false, true) => S ++ h2(key).getAllProps.foldLeft(LPBot)((S, v) => S + ((key, v)))
case _ => LPBot
}
S ++ S2
})
}
def obj_diff(l: Loc, o1: Obj, o2: Obj): LPSet = {
val keys = o1.getAllProps ++ o2.getAllProps
keys.foldLeft(LPBot)((S, key) => {
val pv1 = o1(key)
val pv2 = o2(key)
val as1 = o1.domIn(key)
val as2 = o2.domIn(key)
if ((pv1 </ pv2 || pv2 </ pv1) || (as1 </ as2 || as2 </ as1))
S + ((l,key))
else S
})
}
def heap_check(h_org: Heap, h_res: Heap, defset: LocSet): Boolean = {
h_org.restrict(defset) == h_res.restrict(defset)
}
def heap_check(h_org: Heap, h_res: Heap, defset: LPSet): Boolean = {
h_org.restrict(defset) == h_res.restrict(defset)
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/Access.scala | Scala | bsd-3-clause | 41,104 |
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.participants
import net.lshift.diffa.kernel.participants.{DownstreamMemoryParticipant}
import org.joda.time.DateTime
import org.apache.commons.codec.digest.DigestUtils
import net.lshift.diffa.participant.changes.ChangeEvent
import scala.collection.JavaConversions._
/**
* An implementation of the DownstreamParticipant using the MemoryParticipant base, whereby the body is the version
* of an entity.
*/
class DownstreamWebParticipant(val epName:String, val agentRoot:String, val domain:String)
extends DownstreamMemoryParticipant(DigestUtils.md5Hex, DigestUtils.md5Hex)
with WebParticipant {
override def addEntity(id: String, someDate:DateTime, someString:String, lastUpdated: DateTime, body: String) = {
super.addEntity(id, someDate, someString, lastUpdated, body)
changesClient.onChangeEvent(ChangeEvent.forChange(id, dvsnGen(body), lastUpdated, Map("someDate" -> someDate.toString(), "someString" -> someString)))
}
override def removeEntity(id: String) = {
super.removeEntity(id)
changesClient.onChangeEvent(ChangeEvent.forChange(id, null, new DateTime))
}
} | aprescott/diffa | participants-web/src/main/scala/net/lshift/diffa/participants/DownstreamWebParticipant.scala | Scala | apache-2.0 | 1,741 |
package monocle.function
import monocle._
import monocle.generic.GenericInstances
import scala.annotation.nowarn
@nowarn
class ReverseExample extends MonocleSuite with GenericInstances {
test("reverse creates an Iso from a List to its reversed version") {
assertEquals((List(1, 2, 3) applyIso reverse get), List(3, 2, 1))
}
test("reverse creates an Iso from a tuple to its reversed version") {
assertEquals(((1, 'b') applyIso reverse get), (('b', 1)))
assertEquals(((1, 'b', true) applyIso reverse get), ((true, 'b', 1)))
assertEquals(((1, 'b', true, 5.4, "plop", 7L) applyIso reverse get), ((7L, "plop", 5.4, true, 'b', 1)))
// for tuple greater than 6 we need to use shapeless
assertEquals(
((1, 'b', true, 5.4, "plop", 7L, false) applyIso reverse get),
((false, 7L, "plop", 5.4, true, 'b', 1))
)
}
test("reverse creates an Iso from a String to its reversed version") {
assertEquals(("Hello" applyIso reverse get), "olleH")
}
test("reverse creates an Iso from a Vector to its reversed version") {
assertEquals((Vector(1, 2, 3) applyIso reverse get), Vector(3, 2, 1))
}
}
| julien-truffaut/Monocle | example/src/test/scala/monocle/function/ReverseExample.scala | Scala | mit | 1,144 |
package com.catinthedark.models
import com.catinthedark.lib.network.Message
sealed class GameMessage extends Message
case class EnemyDisconnectedMessage(clientId: String) extends Message
case class GameStartedMessage(clientId: String) extends Message
case class RoundEndsMessage(gameStateModel: GameStateModel) extends Message
case class HelloMessage(name: String) extends Message
case class ServerHelloMessage(clientId: String) extends Message
case class MoveMessage(speedX: Float, speedY: Float, angle: Float, stateName: String) extends Message
case class GameStateMessage(gameStateModel: GameStateModel) extends Message
case class SoundMessage(soundName: String) extends Message
case class ThrowBrickMessage(x: Float, y: Float, force: Float, angle: Float) extends Message
| cat-in-the-dark/old48_36_game | core/src/main/scala/com/catinthedark/models/Messages.scala | Scala | mit | 777 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.execution._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
class ExplainSuite extends QueryTest with SharedSparkSession {
import testImplicits._
var originalValue: String = _
protected override def beforeAll(): Unit = {
super.beforeAll()
originalValue = spark.conf.get(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key)
spark.conf.set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, "false")
}
protected override def afterAll(): Unit = {
spark.conf.set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, originalValue)
super.afterAll()
}
private def getNormalizedExplain(df: DataFrame, mode: ExplainMode): String = {
val output = new java.io.ByteArrayOutputStream()
Console.withOut(output) {
df.explain(mode.name)
}
output.toString.replaceAll("#\\\\d+", "#x")
}
/**
* Get the explain from a DataFrame and run the specified action on it.
*/
private def withNormalizedExplain(df: DataFrame, mode: ExplainMode)(f: String => Unit) = {
f(getNormalizedExplain(df, mode))
}
/**
* Get the explain by running the sql. The explain mode should be part of the
* sql text itself.
*/
private def withNormalizedExplain(queryText: String)(f: String => Unit) = {
val output = new java.io.ByteArrayOutputStream()
Console.withOut(output) {
sql(queryText).show(false)
}
val normalizedOutput = output.toString.replaceAll("#\\\\d+", "#x")
f(normalizedOutput)
}
/**
* Runs the plan and makes sure the plans contains all of the keywords.
*/
private def checkKeywordsExistsInExplain(
df: DataFrame, mode: ExplainMode, keywords: String*): Unit = {
withNormalizedExplain(df, mode) { normalizedOutput =>
for (key <- keywords) {
assert(normalizedOutput.contains(key))
}
}
}
private def checkKeywordsExistsInExplain(df: DataFrame, keywords: String*): Unit = {
checkKeywordsExistsInExplain(df, ExtendedMode, keywords: _*)
}
test("SPARK-23034 show rdd names in RDD scan nodes (Dataset)") {
val rddWithName = spark.sparkContext.parallelize(Row(1, "abc") :: Nil).setName("testRdd")
val df = spark.createDataFrame(rddWithName, StructType.fromDDL("c0 int, c1 string"))
checkKeywordsExistsInExplain(df, keywords = "Scan ExistingRDD testRdd")
}
test("SPARK-23034 show rdd names in RDD scan nodes (DataFrame)") {
val rddWithName = spark.sparkContext.parallelize(ExplainSingleData(1) :: Nil).setName("testRdd")
val df = spark.createDataFrame(rddWithName)
checkKeywordsExistsInExplain(df, keywords = "Scan testRdd")
}
test("SPARK-24850 InMemoryRelation string representation does not include cached plan") {
val df = Seq(1).toDF("a").cache()
checkKeywordsExistsInExplain(df,
keywords = "InMemoryRelation", "StorageLevel(disk, memory, deserialized, 1 replicas)")
}
test("optimized plan should show the rewritten aggregate expression") {
withTempView("test_agg") {
sql(
"""
|CREATE TEMPORARY VIEW test_agg AS SELECT * FROM VALUES
| (1, true), (1, false),
| (2, true),
| (3, false), (3, null),
| (4, null), (4, null),
| (5, null), (5, true), (5, false) AS test_agg(k, v)
""".stripMargin)
// simple explain of queries having every/some/any aggregates. Optimized
// plan should show the rewritten aggregate expression.
val df = sql("SELECT k, every(v), some(v), any(v) FROM test_agg GROUP BY k")
checkKeywordsExistsInExplain(df,
"Aggregate [k#x], [k#x, min(v#x) AS every(v)#x, max(v#x) AS some(v)#x, " +
"max(v#x) AS any(v)#x]")
}
}
test("explain inline tables cross-joins") {
val df = sql(
"""
|SELECT * FROM VALUES ('one', 1), ('three', null)
| CROSS JOIN VALUES ('one', 1), ('three', null)
""".stripMargin)
checkKeywordsExistsInExplain(df,
"Join Cross",
":- LocalRelation [col1#x, col2#x]",
"+- LocalRelation [col1#x, col2#x]")
}
test("explain table valued functions") {
checkKeywordsExistsInExplain(sql("select * from RaNgE(2)"), "Range (0, 2, step=1, splits=None)")
checkKeywordsExistsInExplain(sql("SELECT * FROM range(3) CROSS JOIN range(3)"),
"Join Cross",
":- Range (0, 3, step=1, splits=None)",
"+- Range (0, 3, step=1, splits=None)")
}
test("explain string functions") {
// Check if catalyst combine nested `Concat`s
val df1 = sql(
"""
|SELECT (col1 || col2 || col3 || col4) col
| FROM (SELECT id col1, id col2, id col3, id col4 FROM range(10))
""".stripMargin)
checkKeywordsExistsInExplain(df1,
"Project [concat(cast(id#xL as string), cast(id#xL as string), cast(id#xL as string)" +
", cast(id#xL as string)) AS col#x]")
// Check if catalyst combine nested `Concat`s if concatBinaryAsString=false
withSQLConf(SQLConf.CONCAT_BINARY_AS_STRING.key -> "false") {
val df2 = sql(
"""
|SELECT ((col1 || col2) || (col3 || col4)) col
|FROM (
| SELECT
| string(id) col1,
| string(id + 1) col2,
| encode(string(id + 2), 'utf-8') col3,
| encode(string(id + 3), 'utf-8') col4
| FROM range(10)
|)
""".stripMargin)
checkKeywordsExistsInExplain(df2,
"Project [concat(cast(id#xL as string), cast((id#xL + 1) as string), " +
"cast(encode(cast((id#xL + 2) as string), utf-8) as string), " +
"cast(encode(cast((id#xL + 3) as string), utf-8) as string)) AS col#x]")
val df3 = sql(
"""
|SELECT (col1 || (col3 || col4)) col
|FROM (
| SELECT
| string(id) col1,
| encode(string(id + 2), 'utf-8') col3,
| encode(string(id + 3), 'utf-8') col4
| FROM range(10)
|)
""".stripMargin)
checkKeywordsExistsInExplain(df3,
"Project [concat(cast(id#xL as string), " +
"cast(encode(cast((id#xL + 2) as string), utf-8) as string), " +
"cast(encode(cast((id#xL + 3) as string), utf-8) as string)) AS col#x]")
}
}
test("check operator precedence") {
// We follow Oracle operator precedence in the table below that lists the levels
// of precedence among SQL operators from high to low:
// ---------------------------------------------------------------------------------------
// Operator Operation
// ---------------------------------------------------------------------------------------
// +, - identity, negation
// *, / multiplication, division
// +, -, || addition, subtraction, concatenation
// =, !=, <, >, <=, >=, IS NULL, LIKE, BETWEEN, IN comparison
// NOT exponentiation, logical negation
// AND conjunction
// OR disjunction
// ---------------------------------------------------------------------------------------
checkKeywordsExistsInExplain(sql("select 'a' || 1 + 2"),
"Project [null AS (CAST(concat(a, CAST(1 AS STRING)) AS DOUBLE) + CAST(2 AS DOUBLE))#x]")
checkKeywordsExistsInExplain(sql("select 1 - 2 || 'b'"),
"Project [-1b AS concat(CAST((1 - 2) AS STRING), b)#x]")
checkKeywordsExistsInExplain(sql("select 2 * 4 + 3 || 'b'"),
"Project [11b AS concat(CAST(((2 * 4) + 3) AS STRING), b)#x]")
checkKeywordsExistsInExplain(sql("select 3 + 1 || 'a' || 4 / 2"),
"Project [4a2.0 AS concat(concat(CAST((3 + 1) AS STRING), a), " +
"CAST((CAST(4 AS DOUBLE) / CAST(2 AS DOUBLE)) AS STRING))#x]")
checkKeywordsExistsInExplain(sql("select 1 == 1 OR 'a' || 'b' == 'ab'"),
"Project [true AS ((1 = 1) OR (concat(a, b) = ab))#x]")
checkKeywordsExistsInExplain(sql("select 'a' || 'c' == 'ac' AND 2 == 3"),
"Project [false AS ((concat(a, c) = ac) AND (2 = 3))#x]")
}
test("explain for these functions; use range to avoid constant folding") {
val df = sql("select ifnull(id, 'x'), nullif(id, 'x'), nvl(id, 'x'), nvl2(id, 'x', 'y') " +
"from range(2)")
checkKeywordsExistsInExplain(df,
"Project [coalesce(cast(id#xL as string), x) AS ifnull(`id`, 'x')#x, " +
"id#xL AS nullif(`id`, 'x')#xL, coalesce(cast(id#xL as string), x) AS nvl(`id`, 'x')#x, " +
"x AS nvl2(`id`, 'x', 'y')#x]")
}
test("SPARK-26659: explain of DataWritingCommandExec should not contain duplicate cmd.nodeName") {
withTable("temptable") {
val df = sql("create table temptable using parquet as select * from range(2)")
withNormalizedExplain(df, SimpleMode) { normalizedOutput =>
assert("Create\\\\w*?TableAsSelectCommand".r.findAllMatchIn(normalizedOutput).length == 1)
}
}
}
test("explain formatted - check presence of subquery in case of DPP") {
withTable("df1", "df2") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
withTable("df1", "df2") {
spark.range(1000).select(col("id"), col("id").as("k"))
.write
.partitionBy("k")
.format("parquet")
.mode("overwrite")
.saveAsTable("df1")
spark.range(100)
.select(col("id"), col("id").as("k"))
.write
.partitionBy("k")
.format("parquet")
.mode("overwrite")
.saveAsTable("df2")
val sqlText =
"""
|EXPLAIN FORMATTED SELECT df1.id, df2.k
|FROM df1 JOIN df2 ON df1.k = df2.k AND df2.id < 2
|""".stripMargin
val expected_pattern1 =
"Subquery:1 Hosting operator id = 1 Hosting Expression = k#xL IN subquery#x"
val expected_pattern2 =
"PartitionFilters: \\\\[isnotnull\\\\(k#xL\\\\), dynamicpruningexpression\\\\(k#xL " +
"IN subquery#x\\\\)\\\\]"
val expected_pattern3 =
"Location: InMemoryFileIndex \\\\[.*org.apache.spark.sql.ExplainSuite" +
"/df2/.*, ... 99 entries\\\\]"
val expected_pattern4 =
"Location: InMemoryFileIndex \\\\[.*org.apache.spark.sql.ExplainSuite" +
"/df1/.*, ... 999 entries\\\\]"
withNormalizedExplain(sqlText) { normalizedOutput =>
assert(expected_pattern1.r.findAllMatchIn(normalizedOutput).length == 1)
assert(expected_pattern2.r.findAllMatchIn(normalizedOutput).length == 1)
assert(expected_pattern3.r.findAllMatchIn(normalizedOutput).length == 2)
assert(expected_pattern4.r.findAllMatchIn(normalizedOutput).length == 1)
}
}
}
}
}
test("Support ExplainMode in Dataset.explain") {
val df1 = Seq((1, 2), (2, 3)).toDF("k", "v1")
val df2 = Seq((2, 3), (1, 1)).toDF("k", "v2")
val testDf = df1.join(df2, "k").groupBy("k").agg(count("v1"), sum("v1"), avg("v2"))
val simpleExplainOutput = getNormalizedExplain(testDf, SimpleMode)
assert(simpleExplainOutput.startsWith("== Physical Plan =="))
Seq("== Parsed Logical Plan ==",
"== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==").foreach { planType =>
assert(!simpleExplainOutput.contains(planType))
}
checkKeywordsExistsInExplain(
testDf,
ExtendedMode,
"== Parsed Logical Plan ==" ::
"== Analyzed Logical Plan ==" ::
"== Optimized Logical Plan ==" ::
"== Physical Plan ==" ::
Nil: _*)
checkKeywordsExistsInExplain(
testDf,
CostMode,
"Statistics(sizeInBytes=" ::
Nil: _*)
checkKeywordsExistsInExplain(
testDf,
CodegenMode,
"WholeStageCodegen subtrees" ::
"Generated code:" ::
Nil: _*)
checkKeywordsExistsInExplain(
testDf,
FormattedMode,
"* LocalTableScan (1)" ::
"(1) LocalTableScan [codegen id :" ::
Nil: _*)
}
test("Dataset.toExplainString has mode as string") {
val df = spark.range(10).toDF
def assertExplainOutput(mode: ExplainMode): Unit = {
assert(df.queryExecution.explainString(mode).replaceAll("#\\\\d+", "#x").trim ===
getNormalizedExplain(df, mode).trim)
}
assertExplainOutput(SimpleMode)
assertExplainOutput(ExtendedMode)
assertExplainOutput(CodegenMode)
assertExplainOutput(CostMode)
assertExplainOutput(FormattedMode)
val errMsg = intercept[IllegalArgumentException] {
ExplainMode.fromString("unknown")
}.getMessage
assert(errMsg.contains("Unknown explain mode: unknown"))
}
}
case class ExplainSingleData(id: Int)
| darionyaphet/spark | sql/core/src/test/scala/org/apache/spark/sql/ExplainSuite.scala | Scala | apache-2.0 | 13,952 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.utils
import org.apache.commons.codec.binary.Base64
import org.geotools.factory.Hints
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.conf.QueryHints.RichHints
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.geotools.GeometryUtils
import org.locationtech.geomesa.utils.interop.SimpleFeatureTypes
import org.locationtech.geomesa.utils.io.CloseWithLogging
import org.locationtech.geomesa.utils.stats.{Stat, StatSerializer}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
object KryoLazyStatsUtils {
val StatsSft: SimpleFeatureType = SimpleFeatureTypes.createType("stats:stats", "stats:String,geom:Geometry")
/**
* Encodes a stat as a base64 string.
*
* @param sft simple feature type of underlying schema
* @return function to encode a stat as a base64 string
*/
def encodeStat(sft: SimpleFeatureType): (Stat) => String = {
val serializer = StatSerializer(sft)
(stat) => Base64.encodeBase64URLSafeString(serializer.serialize(stat))
}
/**
* Decodes a stat string from a result simple feature.
*
* @param sft simple feature type of the underlying schema
* @return function to convert an encoded encoded string to a stat
*/
def decodeStat(sft: SimpleFeatureType): (String) => Stat = {
val serializer = StatSerializer(sft)
(encoded) => serializer.deserialize(Base64.decodeBase64(encoded))
}
/**
* Reduces computed simple features which contain stat information into one on the client
*
* @param features iterator of features received per tablet server from query
* @param hints query hints that the stats are being run against
* @return aggregated iterator of features
*/
def reduceFeatures(sft: SimpleFeatureType,
hints: Hints)
(features: CloseableIterator[SimpleFeature]): CloseableIterator[SimpleFeature] = {
val statSft = hints.getTransformSchema.getOrElse(sft)
val sum = try {
if (features.isEmpty) {
// create empty stat based on the original input so that we always return something
Stat(statSft, hints.getStatsQuery)
} else {
val decode = decodeStat(statSft)
val sum = decode(features.next.getAttribute(0).asInstanceOf[String])
while (features.hasNext) {
sum += decode(features.next.getAttribute(0).asInstanceOf[String])
}
sum
}
} finally {
CloseWithLogging(features)
}
val stats = if (hints.isStatsEncode) { encodeStat(statSft)(sum) } else { sum.toJson }
CloseableIterator(Iterator(new ScalaSimpleFeature(StatsSft, "stat", Array(stats, GeometryUtils.zeroPoint))))
}
}
| ronq/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/utils/KryoLazyStatsUtils.scala | Scala | apache-2.0 | 3,266 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.mkldnn
import com.intel.analytics.bigdl.mkl.DataType
import com.intel.analytics.bigdl.dllib.nn.abstractnn.Activity
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.T
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
private[mkldnn] class ReorderManager() (implicit owner: MemoryOwner) {
// (MemoryFormatId, TargetFormat) -> Reorder
val reorders = mutable.HashMap[(Int, MemoryData), ReorderMemory]()
// ReorderId -> RefCount
val refCounts = mutable.HashMap[Int, Int]()
val useCounts = mutable.HashMap[Int, Int]()
private var runtime: MklDnnRuntime = _
def register(from: MemoryData, to: MemoryData): Unit = {
require(runtime != null, "Please call setRuntime first")
val mId = System.identityHashCode(from)
if (needReorder(from, to)) {
if (reorders.contains((mId, to))) {
refCounts(System.identityHashCode(reorders((mId, to)))) += 1
} else {
val reorder = ReorderMemory(to)
reorder.setRuntime(runtime)
reorder.initFwdPrimitives(Array(from), Phase.InferencePhase)
reorders((mId, to)) = reorder
val reorderId = System.identityHashCode(reorder)
refCounts(reorderId) = 1
useCounts(reorderId) = 0
}
}
}
def setRuntime(runtime: MklDnnRuntime): Unit = {
this.runtime = runtime
}
def infer(from: Array[MemoryData], to: Array[MemoryData], output: Activity)
: Activity = {
if (from.length == 1) {
require(output.isTensor, "output activity should be a tensor")
inferTensor(from(0), to(0), output.asInstanceOf[Tensor[Float]])
} else {
require(output.toTable.length() == from.length,
"output activity length doesn't match")
val outputTable = T()
var i = 0
while(i < from.length) {
outputTable(i + 1) = inferTensor(from(i), to(i), output.toTable(i + 1))
i += 1
}
outputTable
}
}
private def inferTensor(from: MemoryData, to : MemoryData, output: Tensor[Float])
: Tensor[Float] = {
val mId = System.identityHashCode(from)
if (reorders.contains((mId, to))) {
val reorder = reorders((mId, to))
val reorderId = System.identityHashCode(reorder)
val result = if (useCounts(reorderId) == 0) {
reorder.forward(output).asInstanceOf[Tensor[Float]]
} else {
reorder.output.asInstanceOf[Tensor[Float]]
}
useCounts(reorderId) += 1
if (useCounts(reorderId) == refCounts(reorderId)) {
useCounts(reorderId) = 0
}
result
} else {
output
}
}
private def needReorder(from: MemoryData, to: MemoryData): Boolean = {
from match {
case h: HeapData =>
to match {
case hh: HeapData => h.layout != hh.layout
case nn: NativeData => true
case _ => throw new UnsupportedOperationException("Not support such memory format")
}
case n: NativeData =>
to match {
case hh: HeapData => true
case nn: NativeData =>
// we will skip the S8 to U8 reorder
val doNotReorderIt = n.layout == nn.layout && (
n.dataType == nn.dataType || // the same data type
(n.dataType == DataType.S8 && nn.dataType == DataType.U8) || // skip the u8 -> s8
(n.dataType == DataType.U8 && nn.dataType == DataType.S8)) // skip the s8->u8
!doNotReorderIt
case _ => throw new UnsupportedOperationException("Not support such memory format")
}
case _ => throw new UnsupportedOperationException("Not support such memory format")
}
}
def release(): Unit = { }
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/mkldnn/ReorderManager.scala | Scala | apache-2.0 | 4,330 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import java.util.concurrent.{TimeoutException, TimeUnit}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.concurrent.Future
import scala.concurrent.duration.{Duration, SECONDS}
import scala.language.existentials
import scala.reflect.ClassTag
import org.scalactic.TripleEquals
import org.scalatest.Assertions.AssertionsHelper
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.TaskState._
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.util.{CallSite, ThreadUtils, Utils}
/**
* Tests for the entire scheduler code -- DAGScheduler, TaskSchedulerImpl, TaskSets,
* TaskSetManagers.
*
* Test cases are configured by providing a set of jobs to submit, and then simulating interaction
* with spark's executors via a mocked backend (eg., task completion, task failure, executors
* disconnecting, etc.).
*/
abstract class SchedulerIntegrationSuite[T <: MockBackend: ClassTag] extends SparkFunSuite
with LocalSparkContext {
var taskScheduler: TestTaskScheduler = null
var scheduler: DAGScheduler = null
var backend: T = _
override def beforeEach(): Unit = {
if (taskScheduler != null) {
taskScheduler.runningTaskSets.clear()
}
results.clear()
failure = null
backendException.set(null)
super.beforeEach()
}
override def afterEach(): Unit = {
super.afterEach()
taskScheduler.stop()
backend.stop()
scheduler.stop()
}
def setupScheduler(conf: SparkConf): Unit = {
conf.setAppName(this.getClass().getSimpleName())
val backendClassName = implicitly[ClassTag[T]].runtimeClass.getName()
conf.setMaster(s"mock[${backendClassName}]")
sc = new SparkContext(conf)
backend = sc.schedulerBackend.asInstanceOf[T]
taskScheduler = sc.taskScheduler.asInstanceOf[TestTaskScheduler]
taskScheduler.initialize(sc.schedulerBackend)
scheduler = new DAGScheduler(sc, taskScheduler)
taskScheduler.setDAGScheduler(scheduler)
}
def testScheduler(name: String)(testBody: => Unit): Unit = {
testScheduler(name, Seq())(testBody)
}
def testScheduler(name: String, extraConfs: Seq[(String, String)])(testBody: => Unit): Unit = {
test(name) {
val conf = new SparkConf()
extraConfs.foreach{ case (k, v) => conf.set(k, v)}
setupScheduler(conf)
testBody
}
}
/**
* A map from partition to results for all tasks of a job when you call this test framework's
* [[submit]] method. Two important considerations:
*
* 1. If there is a job failure, results may or may not be empty. If any tasks succeed before
* the job has failed, they will get included in `results`. Instead, check for job failure by
* checking [[failure]]. (Also see `assertDataStructuresEmpty()`)
*
* 2. This only gets cleared between tests. So you'll need to do special handling if you submit
* more than one job in one test.
*/
val results = new HashMap[Int, Any]()
/**
* If a call to [[submit]] results in a job failure, this will hold the exception, else it will
* be null.
*
* As with [[results]], this only gets cleared between tests, so care must be taken if you are
* submitting more than one job in one test.
*/
var failure: Throwable = _
/**
* When we submit dummy Jobs, this is the compute function we supply.
*/
private val jobComputeFunc: (TaskContext, scala.Iterator[_]) => Any = {
(context: TaskContext, it: Iterator[(_)]) =>
throw new RuntimeException("jobComputeFunc shouldn't get called in this mock")
}
/** Submits a job to the scheduler, and returns a future which does a bit of error handling. */
protected def submit(
rdd: RDD[_],
partitions: Array[Int],
func: (TaskContext, Iterator[_]) => _ = jobComputeFunc): Future[Any] = {
val waiter: JobWaiter[Any] = scheduler.submitJob(rdd, func, partitions.toSeq, CallSite("", ""),
(index, res) => results(index) = res, new Properties())
import scala.concurrent.ExecutionContext.Implicits.global
waiter.completionFuture.recover { case ex =>
failure = ex
}
}
/**
* Helper to run a few common asserts after a job has completed, in particular some internal
* datastructures for bookkeeping. This only does a very minimal check for whether the job
* failed or succeeded -- often you will want extra asserts on [[results]] or [[failure]].
*/
protected def assertDataStructuresEmpty(noFailure: Boolean = true): Unit = {
if (noFailure) {
if (failure != null) {
// if there is a job failure, it can be a bit hard to tease the job failure msg apart
// from the test failure msg, so we do a little extra formatting
val msg =
raw"""
| There was a failed job.
| ----- Begin Job Failure Msg -----
| ${Utils.exceptionString(failure)}
| ----- End Job Failure Msg ----
""".
stripMargin
fail(msg)
}
// When a job fails, we terminate before waiting for all the task end events to come in,
// so there might still be a running task set. So we only check these conditions
// when the job succeeds.
// When the final task of a taskset completes, we post
// the event to the DAGScheduler event loop before we finish processing in the taskscheduler
// thread. It's possible the DAGScheduler thread processes the event, finishes the job,
// and notifies the job waiter before our original thread in the task scheduler finishes
// handling the event and marks the taskset as complete. So its ok if we need to wait a
// *little* bit longer for the original taskscheduler thread to finish up to deal w/ the race.
eventually(timeout(1 second), interval(10 millis)) {
assert(taskScheduler.runningTaskSets.isEmpty)
}
assert(!backend.hasTasks)
} else {
assert(failure != null)
}
assert(scheduler.activeJobs.isEmpty)
assert(backendException.get() == null)
}
/**
* Looks at all shuffleMapOutputs that are dependencies of the given RDD, and makes sure
* they are all registered
*/
def assertMapOutputAvailable(targetRdd: MockRDD): Unit = {
val shuffleIds = targetRdd.shuffleDeps.map{_.shuffleId}
val nParts = targetRdd.numPartitions
for {
shuffleId <- shuffleIds
reduceIdx <- (0 until nParts)
} {
val statuses = taskScheduler.mapOutputTracker.getMapSizesByExecutorId(shuffleId, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
}
/** models a stage boundary with a single dependency, like a shuffle */
def shuffle(nParts: Int, input: MockRDD): MockRDD = {
val partitioner = new HashPartitioner(nParts)
val shuffleDep = new ShuffleDependency[Int, Int, Nothing](input, partitioner)
new MockRDD(sc, nParts, List(shuffleDep))
}
/** models a stage boundary with multiple dependencies, like a join */
def join(nParts: Int, inputs: MockRDD*): MockRDD = {
val partitioner = new HashPartitioner(nParts)
val shuffleDeps = inputs.map { inputRDD =>
new ShuffleDependency[Int, Int, Nothing](inputRDD, partitioner)
}
new MockRDD(sc, nParts, shuffleDeps)
}
val backendException = new AtomicReference[Exception](null)
/**
* Helper which makes it a little easier to setup a test, which starts a mock backend in another
* thread, responding to tasks with your custom function. You also supply the "body" of your
* test, where you submit jobs to your backend, wait for them to complete, then check
* whatever conditions you want. Note that this is *not* safe to all bad backends --
* in particular, your `backendFunc` has to return quickly, it can't throw errors, (instead
* it should send back the right TaskEndReason)
*/
def withBackend[T](backendFunc: () => Unit)(testBody: => T): T = {
val backendContinue = new AtomicBoolean(true)
val backendThread = new Thread("mock backend thread") {
override def run(): Unit = {
while (backendContinue.get()) {
if (backend.hasTasksWaitingToRun) {
try {
backendFunc()
} catch {
case ex: Exception =>
// Try to do a little error handling around exceptions that might occur here --
// otherwise it can just look like a TimeoutException in the test itself.
logError("Exception in mock backend:", ex)
backendException.set(ex)
backendContinue.set(false)
throw ex
}
} else {
Thread.sleep(10)
}
}
}
}
try {
backendThread.start()
testBody
} finally {
backendContinue.set(false)
backendThread.join()
}
}
/**
* Helper to do a little extra error checking while waiting for the job to terminate. Primarily
* just does a little extra error handling if there is an exception from the backend.
*/
def awaitJobTermination(jobFuture: Future[_], duration: Duration): Unit = {
try {
ThreadUtils.awaitReady(jobFuture, duration)
} catch {
case te: TimeoutException if backendException.get() != null =>
val msg = raw"""
| ----- Begin Backend Failure Msg -----
| ${Utils.exceptionString(backendException.get())}
| ----- End Backend Failure Msg ----
""".
stripMargin
fail(s"Future timed out after ${duration}, likely because of failure in backend: $msg")
}
}
}
/**
* Helper for running a backend in integration tests, does a bunch of the book-keeping
* so individual tests can focus on just responding to tasks. Individual tests will use
* [[beginTask]], [[taskSuccess]], and [[taskFailed]].
*/
private[spark] abstract class MockBackend(
conf: SparkConf,
val taskScheduler: TaskSchedulerImpl) extends SchedulerBackend with Logging {
// Periodically revive offers to allow delay scheduling to work
private val reviveThread =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-revive-thread")
private val reviveIntervalMs = conf.getTimeAsMs("spark.scheduler.revive.interval", "10ms")
/**
* Test backends should call this to get a task that has been assigned to them by the scheduler.
* Each task should be responded to with either [[taskSuccess]] or [[taskFailed]].
*/
def beginTask(): (TaskDescription, Task[_]) = {
synchronized {
val toRun = assignedTasksWaitingToRun.remove(assignedTasksWaitingToRun.size - 1)
runningTasks += toRun._1.taskId
toRun
}
}
/**
* Tell the scheduler the task completed successfully, with the given result. Also
* updates some internal state for this mock.
*/
def taskSuccess(task: TaskDescription, result: Any): Unit = {
val ser = env.serializer.newInstance()
val resultBytes = ser.serialize(result)
val directResult = new DirectTaskResult(resultBytes, Seq()) // no accumulator updates
taskUpdate(task, TaskState.FINISHED, directResult)
}
/**
* Tell the scheduler the task failed, with the given state and result (probably ExceptionFailure
* or FetchFailed). Also updates some internal state for this mock.
*/
def taskFailed(task: TaskDescription, exc: Exception): Unit = {
taskUpdate(task, TaskState.FAILED, new ExceptionFailure(exc, Seq()))
}
def taskFailed(task: TaskDescription, reason: TaskFailedReason): Unit = {
taskUpdate(task, TaskState.FAILED, reason)
}
def taskUpdate(task: TaskDescription, state: TaskState, result: Any): Unit = {
val ser = env.serializer.newInstance()
val resultBytes = ser.serialize(result)
// statusUpdate is safe to call from multiple threads, its protected inside taskScheduler
taskScheduler.statusUpdate(task.taskId, state, resultBytes)
if (TaskState.isFinished(state)) {
synchronized {
runningTasks -= task.taskId
executorIdToExecutor(task.executorId).freeCores += taskScheduler.CPUS_PER_TASK
freeCores += taskScheduler.CPUS_PER_TASK
}
reviveOffers()
}
}
// protected by this
private val assignedTasksWaitingToRun = new ArrayBuffer[(TaskDescription, Task[_])](10000)
// protected by this
private val runningTasks = HashSet[Long]()
def hasTasks: Boolean = synchronized {
assignedTasksWaitingToRun.nonEmpty || runningTasks.nonEmpty
}
def hasTasksWaitingToRun: Boolean = {
assignedTasksWaitingToRun.nonEmpty
}
override def start(): Unit = {
reviveThread.scheduleAtFixedRate(new Runnable {
override def run(): Unit = Utils.tryLogNonFatalError {
reviveOffers()
}
}, 0, reviveIntervalMs, TimeUnit.MILLISECONDS)
}
override def stop(): Unit = {
reviveThread.shutdown()
}
val env = SparkEnv.get
/** Accessed by both scheduling and backend thread, so should be protected by this. */
var freeCores: Int = _
/**
* Accessed by both scheduling and backend thread, so should be protected by this.
* Most likely the only thing that needs to be protected are the inidividual ExecutorTaskStatus,
* but for simplicity in this mock just lock the whole backend.
*/
def executorIdToExecutor: Map[String, ExecutorTaskStatus]
private def generateOffers(): IndexedSeq[WorkerOffer] = {
executorIdToExecutor.values.filter { exec =>
exec.freeCores > 0
}.map { exec =>
WorkerOffer(executorId = exec.executorId, host = exec.host,
cores = exec.freeCores)
}.toIndexedSeq
}
/**
* This is called by the scheduler whenever it has tasks it would like to schedule, when a tasks
* completes (which will be in a result-getter thread), and by the reviveOffers thread for delay
* scheduling.
*/
override def reviveOffers(): Unit = {
// Need a lock on the entire scheduler to protect freeCores -- otherwise, multiple threads
// may make offers at the same time, though they are using the same set of freeCores.
taskScheduler.synchronized {
val newTaskDescriptions = taskScheduler.resourceOffers(generateOffers()).flatten
// get the task now, since that requires a lock on TaskSchedulerImpl, to prevent individual
// tests from introducing a race if they need it.
val newTasks = newTaskDescriptions.map { taskDescription =>
val taskSet = taskScheduler.taskIdToTaskSetManager(taskDescription.taskId).taskSet
val task = taskSet.tasks(taskDescription.index)
(taskDescription, task)
}
newTasks.foreach { case (taskDescription, _) =>
executorIdToExecutor(taskDescription.executorId).freeCores -= taskScheduler.CPUS_PER_TASK
}
freeCores -= newTasks.size * taskScheduler.CPUS_PER_TASK
assignedTasksWaitingToRun ++= newTasks
}
}
override def killTask(
taskId: Long, executorId: String, interruptThread: Boolean, reason: String): Unit = {
// We have to implement this b/c of SPARK-15385.
// Its OK for this to be a no-op, because even if a backend does implement killTask,
// it really can only be "best-effort" in any case, and the scheduler should be robust to that.
// And in fact its reasonably simulating a case where a real backend finishes tasks in between
// the time when the scheduler sends the msg to kill tasks, and the backend receives the msg.
}
}
/**
* A very simple mock backend that can just run one task at a time.
*/
private[spark] class SingleCoreMockBackend(
conf: SparkConf,
taskScheduler: TaskSchedulerImpl) extends MockBackend(conf, taskScheduler) {
val cores = 1
override def defaultParallelism(): Int = conf.getInt("spark.default.parallelism", cores)
freeCores = cores
val localExecutorId = SparkContext.DRIVER_IDENTIFIER
val localExecutorHostname = "localhost"
override val executorIdToExecutor: Map[String, ExecutorTaskStatus] = Map(
localExecutorId -> new ExecutorTaskStatus(localExecutorHostname, localExecutorId, freeCores)
)
}
case class ExecutorTaskStatus(host: String, executorId: String, var freeCores: Int)
class MockRDD(
sc: SparkContext,
val numPartitions: Int,
val shuffleDeps: Seq[ShuffleDependency[Int, Int, Nothing]]
) extends RDD[(Int, Int)](sc, shuffleDeps) with Serializable {
MockRDD.validate(numPartitions, shuffleDeps)
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
throw new RuntimeException("should not be reached")
override def getPartitions: Array[Partition] = {
(0 until numPartitions).map(i => new Partition {
override def index: Int = i
}).toArray
}
override def getPreferredLocations(split: Partition): Seq[String] = Nil
override def toString: String = "MockRDD " + id
}
object MockRDD extends AssertionsHelper with TripleEquals {
/**
* make sure all the shuffle dependencies have a consistent number of output partitions
* (mostly to make sure the test setup makes sense, not that Spark itself would get this wrong)
*/
def validate(numPartitions: Int, dependencies: Seq[ShuffleDependency[_, _, _]]): Unit = {
dependencies.foreach { dependency =>
val partitioner = dependency.partitioner
assert(partitioner != null)
assert(partitioner.numPartitions === numPartitions)
}
}
}
/** Simple cluster manager that wires up our mock backend. */
private class MockExternalClusterManager extends ExternalClusterManager {
val MOCK_REGEX = """mock\\[(.*)\\]""".r
def canCreate(masterURL: String): Boolean = MOCK_REGEX.findFirstIn(masterURL).isDefined
def createTaskScheduler(
sc: SparkContext,
masterURL: String): TaskScheduler = {
new TestTaskScheduler(sc)
}
def createSchedulerBackend(
sc: SparkContext,
masterURL: String,
scheduler: TaskScheduler): SchedulerBackend = {
masterURL match {
case MOCK_REGEX(backendClassName) =>
val backendClass = Utils.classForName(backendClassName)
val ctor = backendClass.getConstructor(classOf[SparkConf], classOf[TaskSchedulerImpl])
ctor.newInstance(sc.getConf, scheduler).asInstanceOf[SchedulerBackend]
}
}
def initialize(scheduler: TaskScheduler, backend: SchedulerBackend): Unit = {
scheduler.asInstanceOf[TaskSchedulerImpl].initialize(backend)
}
}
/** TaskSchedulerImpl that just tracks a tiny bit more state to enable checks in tests. */
class TestTaskScheduler(sc: SparkContext) extends TaskSchedulerImpl(sc) {
/** Set of TaskSets the DAGScheduler has requested executed. */
val runningTaskSets = HashSet[TaskSet]()
override def submitTasks(taskSet: TaskSet): Unit = {
runningTaskSets += taskSet
super.submitTasks(taskSet)
}
override def taskSetFinished(manager: TaskSetManager): Unit = {
runningTaskSets -= manager.taskSet
super.taskSetFinished(manager)
}
}
/**
* Some very basic tests just to demonstrate the use of the test framework (and verify that it
* works).
*/
class BasicSchedulerIntegrationSuite extends SchedulerIntegrationSuite[SingleCoreMockBackend] {
/**
* Very simple one stage job. Backend successfully completes each task, one by one
*/
testScheduler("super simple job") {
def runBackend(): Unit = {
val (taskDescripition, _) = backend.beginTask()
backend.taskSuccess(taskDescripition, 42)
}
withBackend(runBackend _) {
val jobFuture = submit(new MockRDD(sc, 10, Nil), (0 until 10).toArray)
val duration = Duration(1, SECONDS)
awaitJobTermination(jobFuture, duration)
}
assert(results === (0 until 10).map { _ -> 42 }.toMap)
assertDataStructuresEmpty()
}
/**
* 5 stage job, diamond dependencies.
*
* a ----> b ----> d --> result
* \\--> c --/
*
* Backend successfully completes each task
*/
testScheduler("multi-stage job") {
def shuffleIdToOutputParts(shuffleId: Int): Int = {
shuffleId match {
case 0 => 10
case 1 => 20
case _ => 30
}
}
val a = new MockRDD(sc, 2, Nil)
val b = shuffle(10, a)
val c = shuffle(20, a)
val d = join(30, b, c)
def runBackend(): Unit = {
val (taskDescription, task) = backend.beginTask()
// make sure the required map output is available
task.stageId match {
case 4 => assertMapOutputAvailable(d)
case _ =>
// we can't check for the output for the two intermediate stages, unfortunately,
// b/c the stage numbering is non-deterministic, so stage number alone doesn't tell
// us what to check
}
(task.stageId, task.stageAttemptId, task.partitionId) match {
case (stage, 0, _) if stage < 4 =>
val shuffleId =
scheduler.stageIdToStage(stage).asInstanceOf[ShuffleMapStage].shuffleDep.shuffleId
backend.taskSuccess(taskDescription,
DAGSchedulerSuite.makeMapStatus("hostA", shuffleIdToOutputParts(shuffleId)))
case (4, 0, partition) =>
backend.taskSuccess(taskDescription, 4321 + partition)
}
}
withBackend(runBackend _) {
val jobFuture = submit(d, (0 until 30).toArray)
val duration = Duration(1, SECONDS)
awaitJobTermination(jobFuture, duration)
}
assert(results === (0 until 30).map { idx => idx -> (4321 + idx) }.toMap)
assertDataStructuresEmpty()
}
/**
* 2 stage job, with a fetch failure. Make sure that:
* (a) map output is available whenever we run stage 1
* (b) we get a second attempt for stage 0 & stage 1
*/
testScheduler("job with fetch failure") {
val input = new MockRDD(sc, 2, Nil)
val shuffledRdd = shuffle(10, input)
val shuffleId = shuffledRdd.shuffleDeps.head.shuffleId
val stageToAttempts = new HashMap[Int, HashSet[Int]]()
def runBackend(): Unit = {
val (taskDescription, task) = backend.beginTask()
stageToAttempts.getOrElseUpdate(task.stageId, new HashSet()) += task.stageAttemptId
// We cannot check if shuffle output is available, because the failed fetch will clear the
// shuffle output. Then we'd have a race, between the already-started task from the first
// attempt, and when the failure clears out the map output status.
(task.stageId, task.stageAttemptId, task.partitionId) match {
case (0, _, _) =>
backend.taskSuccess(taskDescription, DAGSchedulerSuite.makeMapStatus("hostA", 10))
case (1, 0, 0) =>
val fetchFailed = FetchFailed(
DAGSchedulerSuite.makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored")
backend.taskFailed(taskDescription, fetchFailed)
case (1, _, partition) =>
backend.taskSuccess(taskDescription, 42 + partition)
}
}
withBackend(runBackend _) {
val jobFuture = submit(shuffledRdd, (0 until 10).toArray)
val duration = Duration(1, SECONDS)
awaitJobTermination(jobFuture, duration)
}
assertDataStructuresEmpty()
assert(results === (0 until 10).map { idx => idx -> (42 + idx) }.toMap)
assert(stageToAttempts === Map(0 -> Set(0, 1), 1 -> Set(0, 1)))
}
testScheduler("job failure after 4 attempts") {
def runBackend(): Unit = {
val (taskDescription, _) = backend.beginTask()
backend.taskFailed(taskDescription, new RuntimeException("test task failure"))
}
withBackend(runBackend _) {
val jobFuture = submit(new MockRDD(sc, 10, Nil), (0 until 10).toArray)
val duration = Duration(1, SECONDS)
awaitJobTermination(jobFuture, duration)
assert(failure.getMessage.contains("test task failure"))
}
assertDataStructuresEmpty(noFailure = false)
}
}
| aokolnychyi/spark | core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala | Scala | apache-2.0 | 24,824 |
package simulations
import math.random
import scala.util.Random
/* Code Lost
use https://github.com/robertberry/Principles-of-Reactive-Programming-Homework instead
*/
object Utils {
def mod(n: Int, m: Int) = n % m match {
case i if i < 0 => i + m
case i => i
}
/** Creates a neighbours function for a grid of given rows and cols */
def makeNeighbours(rows: Int, cols: Int) =
(row: Int, col: Int) => for {
(r, c) <- List(
(row + 1, col),
(row - 1, col),
(row, col + 1),
(row, col - 1)
)
} yield (mod(r, rows), mod(c, cols))
}
class EpidemySimulator extends Simulator {
val NumberOfPersons = 300
val InfectionPrevalence = 0.01
val NumberOfInitiallyInfected = (NumberOfPersons * InfectionPrevalence).toInt
val ChanceOfDying = 0.25
val TransmissabilityRate = 0.4
val AirTrafficChance = 0.01
val VaccinationRate = 0.05
val NumberVaccinated = (NumberOfPersons * VaccinationRate).toInt
def randomBelow(i: Int) = (random * i).toInt
def randInt(low: Int, high: Int) = randomBelow(high - low) + low
/** Returns true with n chance (n being a number between 0.0 and 1.0) */
def chance(n: Double) = Random.nextDouble() < n
def choice[A](xs: List[A]) = xs(randomBelow(xs.length))
protected[simulations] object SimConfig {
val population: Int = 300
val roomRows: Int = 8
val roomColumns: Int = 8
val airTraffic = false
val reduceMobility = false
val chosenFew = false
}
import SimConfig._
val persons: List[Person] =
((1 to NumberOfPersons) map { id =>
new Person(id, vaccinated = chosenFew && id >= NumberOfPersons - NumberVaccinated)
}).toList
persons.take(NumberOfInitiallyInfected).foreach(_.infect)
val neighbourCoords = Utils.makeNeighbours(SimConfig.roomRows, SimConfig.roomColumns)
def personsAtCoord(row: Int, col: Int) = persons.filter(person => person.row == row && person.col == col)
def okToMoveTo(row: Int, col: Int) =
!personsAtCoord(row, col).exists(_.visiblyInfected)
def isActuallyOk(row: Int, col: Int) =
!personsAtCoord(row, col).exists(_.infected)
class Person (val id: Int, val vaccinated: Boolean = false) {
var infected = false
var sick = false
var immune = false
var dead = false
// demonstrates random number generation
var row: Int = randomBelow(roomRows)
var col: Int = randomBelow(roomColumns)
def visiblyInfected = sick || dead
def infect {
if (!vaccinated) {
infected = true
afterDelay(6) {
sick = true
}
afterDelay(14) {
if (chance(ChanceOfDying)) {
dead = true
} else {
afterDelay(2) {
if (!dead) {
/** This basically to pass the test suite - so far as I can reason about the program (and it's difficult
* with all the mutable state), I don't think this would ever happen in a normal run
*/
sick = false
immune = true
}
}
afterDelay(4) {
if (!dead) {
immune = false
infected = false
}
}
}
}
}
}
//
// to complete with simulation logic
//
def moveWithinDelay {
val baseDelay = randInt(1, 5)
val delayInDays = if (reduceMobility) {
if (visiblyInfected) {
baseDelay * 4
} else {
baseDelay * 2
}
} else {
baseDelay
}
afterDelay(delayInDays) {
move
}
}
private def moveAndInfect(newRow: Int, newCol: Int) {
/** Calculate whether gets infected */
if (!isActuallyOk(newRow, newCol) && !infected && chance(TransmissabilityRate)) {
infect
}
row = newRow
col = newCol
}
private def hopAPlain {
val newRow = randomBelow(roomRows)
val newCol = randomBelow(roomColumns)
moveAndInfect(newRow, newCol)
}
private def moveToAdjacent {
val newPosition = neighbourCoords(row, col) filter { case ((r, c)) => okToMoveTo(r, c) } match {
case Nil => None
case xs => Some(choice(xs))
}
newPosition foreach {
case (newRow, newCol) => moveAndInfect(newRow, newCol)
}
}
def move {
if (!dead) {
if (airTraffic && chance(AirTrafficChance)) {
hopAPlain
} else {
moveToAdjacent
}
moveWithinDelay
}
}
moveWithinDelay
}
} | 4DD8A19D69F5324F9D49D17EF78BBBCC/Princip_les_of_Rea_ctive_Progra_mming | simulations/src/main/scala/simulations/EpidemySimulator.scala | Scala | mit | 4,588 |
/*
* Copyright (c) 2015 Alpine Data Labs
* All rights reserved.
*/
package com.alpine.json
/**
* @author Jenny Thompson
* 6/10/15
*/
object JsonTestUtil {
def testJsonization(p: Any, printJson: Boolean = false): Unit = {
val prettyGson = JsonUtil.prettyGsonWithTypeHints
val pJson: String = prettyGson.toJson(p)
if (printJson) {
println("Pretty json is:")
println(pJson)
}
val compactGson = JsonUtil.compactGsonWithTypeHints
val cJson = compactGson.toJson(p)
if (printJson) {
println()
println("Compact json is:")
println(cJson)
}
assert(p == prettyGson.fromJson(pJson, p.getClass))
assert(p == compactGson.fromJson(cJson, p.getClass))
assert(p == prettyGson.fromJson(cJson, p.getClass))
assert(p == compactGson.fromJson(pJson, p.getClass))
}
}
| holdenk/PluginSDK | alpine-model-api/src/test/scala/com/alpine/json/JsonTestUtil.scala | Scala | apache-2.0 | 845 |
/*
* Copyright 2001-2013 Stephen Colebourne
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.joda.time.field
import org.joda.time.DateTimeFieldType
import org.joda.time.DurationField
/**
* Precise datetime field, composed of two precise duration fields.
* <p>
* This DateTimeField is useful for defining DateTimeFields that are composed
* of precise durations, like time of day fields. If either duration field is
* imprecise, then an {@link ImpreciseDateTimeField} may be used instead.
* <p>
* PreciseDateTimeField is thread-safe and immutable.
*
* @author Brian S O'Neill
* @author Stephen Colebourne
* @since 1.0
* @see ImpreciseDateTimeField
*/
@SerialVersionUID(-5586801265774496376L)
class PreciseDateTimeField extends PreciseDurationDateTimeField {
/** The maximum range in the correct units */
private final val iRange: Int = 0
private final val iRangeField: DurationField = null
/**
* Constructor.
*
* @param type the field type this field uses
* @param unit precise unit duration, like "seconds()".
* @param range precise range duration, preferably a multiple of the unit,
* like "minutes()".
* @throws IllegalArgumentException if either duration field is imprecise
* @throws IllegalArgumentException if unit milliseconds is less than one
* or effective value range is less than two.
*/
def this(`type`: DateTimeFieldType, unit: DurationField, range: DurationField) {
this()
`super`(`type`, unit)
if (!range.isPrecise) {
throw new IllegalArgumentException("Range duration field must be precise")
}
val rangeMillis: Long = range.getUnitMillis
iRange = (rangeMillis / getUnitMillis).toInt
if (iRange < 2) {
throw new IllegalArgumentException("The effective range must be at least 2")
}
iRangeField = range
}
/**
* Get the amount of fractional units from the specified time instant.
*
* @param instant the milliseconds from 1970-01-01T00:00:00Z to query
* @return the amount of fractional units extracted from the input.
*/
def get(instant: Long): Int = {
if (instant >= 0) {
return ((instant / getUnitMillis) % iRange).toInt
}
else {
return iRange - 1 + (((instant + 1) / getUnitMillis) % iRange).toInt
}
}
/**
* Add to the component of the specified time instant, wrapping around
* within that component if necessary.
*
* @param instant the milliseconds from 1970-01-01T00:00:00Z to add to
* @param amount the amount of units to add (can be negative).
* @return the updated time instant.
*/
override def addWrapField(instant: Long, amount: Int): Long = {
val thisValue: Int = get(instant)
val wrappedValue: Int = FieldUtils.getWrappedValue(thisValue, amount, getMinimumValue, getMaximumValue)
return instant + (wrappedValue - thisValue) * getUnitMillis
}
/**
* Set the specified amount of units to the specified time instant.
*
* @param instant the milliseconds from 1970-01-01T00:00:00Z to set in
* @param value value of units to set.
* @return the updated time instant.
* @throws IllegalArgumentException if value is too large or too small.
*/
override def set(instant: Long, value: Int): Long = {
FieldUtils.verifyValueBounds(this, value, getMinimumValue, getMaximumValue)
return instant + (value - get(instant)) * iUnitMillis
}
/**
* Returns the range duration of this field. For example, if this field
* represents "minute of hour", then the range duration field is an hours.
*
* @return the range duration of this field, or null if field has no range
*/
def getRangeDurationField: DurationField = {
return iRangeField
}
/**
* Get the maximum value for the field.
*
* @return the maximum value
*/
def getMaximumValue: Int = {
return iRange - 1
}
/**
* Returns the range of the field in the field's units.
* <p>
* For example, 60 for seconds per minute. The field is allowed values
* from 0 to range - 1.
*
* @return unit range
*/
def getRange: Int = {
return iRange
}
} | aparo/scalajs-joda | src/main/scala/org/joda/time/field/PreciseDateTimeField.scala | Scala | apache-2.0 | 4,685 |
package org.moe.interpreter.guts
import org.moe.interpreter._
import org.moe.runtime._
import org.moe.runtime.nativeobjects._
import org.moe.ast._
object Classes extends Utils {
def declaration (i: MoeInterpreter, r: MoeRuntime): PartialFunction[(MoeEnvironment, AST), MoeObject] = {
case (env, ClassDeclarationNode(name, superclass, body, version, authority)) => {
val pkg = getCurrentPackage(env)
val superclass_class: Option[MoeClass] = superclass.map(
r.lookupClass(_, pkg).getOrElse(
throw new MoeErrors.ClassNotFound(superclass.getOrElse(""))
)
).orElse(r.getCoreClassFor("Any"))
val klass = new MoeClass(
name,
version,
authority,
superclass_class
)
klass.setAssociatedType(Some(MoeClassType(r.getCoreClassFor("Class"))))
pkg.addClass(klass)
val klass_env = new MoeEnvironment(Some(env))
klass_env.setCurrentClass(klass)
i.compile(klass_env, body)
klass
}
case (env, SubMethodDeclarationNode(name, signature, body)) => {
val klass = getCurrentClass(env)
val sig = i.compile(env, signature).asInstanceOf[MoeSignature]
throwForUndeclaredVars(env, sig, body)
val method = new MoeMethod(
name = name,
signature = sig,
declaration_env = env,
body = (e) => i.evaluate(e, body)
)
klass.addSubMethod(method)
method
}
case (env, MethodDeclarationNode(name, signature, body)) => {
val klass = getCurrentClass(env)
val sig = i.compile(env, signature).asInstanceOf[MoeSignature]
throwForUndeclaredVars(env, sig, body)
val method = new MoeMethod(
name = name,
signature = sig,
declaration_env = env,
body = (e) => i.evaluate(e, body)
)
klass.addMethod(method)
method
}
case (env, AttributeDeclarationNode(name, expression)) => {
val klass = getCurrentClass(env)
val attr_default = () => i.evaluate(env, expression)
val attr = new MoeAttribute(name, Some(attr_default))
klass.addAttribute(attr)
attr
}
}
def apply (i: MoeInterpreter, r: MoeRuntime): PartialFunction[(MoeEnvironment, AST), MoeObject] = {
case (env, ClassAccessNode(name)) => r.lookupClass(
name,
getCurrentPackage(env)
).getOrElse(
throw new MoeErrors.ClassNotFound(name)
)
case (env, SuperCallNode()) => {
val superclass = getCurrentClass(env).getSuperclass.getOrElse(
throw new MoeErrors.SuperclassNotFound("__SUPER__")
)
val stack_frame = i.peakCallStack
val meth_name = stack_frame.getCode.getName
val args = stack_frame.getArgs
val invocant = stack_frame.getCurrentInvocant.getOrElse(
throw new MoeErrors.InvocantNotFound("super()")
)
val meth = superclass.getMethod(meth_name).getOrElse(
throw new MoeErrors.MethodNotFound(superclass.getName + "::" + meth_name)
)
i.pushCallStack(new MoeStackFrame(meth, args, env, Some(invocant)))
val result = invocant.callMethod(meth, args)
i.popCallStack
result
}
case (env, AttributeAccessNode(name)) => {
val klass = getCurrentClass(env)
val attr = klass.getAttribute(name).getOrElse(throw new MoeErrors.AttributeNotFound(name))
val invocant = env.getCurrentInvocant
invocant match {
case Some(invocant: MoeOpaque) => invocant.getValue(name).getOrElse(
throw new MoeErrors.InstanceValueNotFound(name)
)
case _ => throw new MoeErrors.UnexpectedType(invocant.getOrElse("(undef)").toString)
}
}
case (env, AttributeAssignmentNode(name, expression)) => {
val klass = getCurrentClass(env)
val attr = klass.getAttribute(name).getOrElse(throw new MoeErrors.AttributeNotFound(name))
val expr = i.evaluate(env, expression)
if (!MoeType.checkType(name, expr)) throw new MoeErrors.IncompatibleType(
"the container (" + name + ") is not compatible with " + expr.getAssociatedType.get.getName
)
env.getCurrentInvocant match {
case Some(invocant: MoeOpaque) => invocant.setValue(name, expr)
case Some(invocant) => throw new MoeErrors.UnexpectedType(invocant.toString)
case None => throw new MoeErrors.MoeException("Attribute default already declared")
}
expr
}
case (env, MultiAttributeAssignmentNode(names, expressions)) => {
val klass = getCurrentClass(env)
val evaled_expressions = expressions.map(i.evaluate(env, _))
env.getCurrentInvocant match {
case Some(invocant: MoeOpaque) => {
zipVars(
r,
names,
evaled_expressions,
{
case (name, value) => {
if (!MoeType.checkType(name, value)) throw new MoeErrors.IncompatibleType(
"the container (" + name + ") is not compatible with " + value.getAssociatedType.get.getName
)
klass.getAttribute(name).getOrElse(throw new MoeErrors.AttributeNotFound(name))
invocant.setValue(name, value)
}
}
)
}
case Some(invocant) => throw new MoeErrors.UnexpectedType(invocant.toString)
case None => throw new MoeErrors.MoeException("Attribute default already declared")
}
evaled_expressions.last
}
case (env, MethodCallNode(invocant, method_name, args)) => {
val invocant_object = i.evaluate(env, invocant)
invocant_object match {
case obj: MoeObject => {
val klass = obj.getAssociatedClass.getOrElse(throw new MoeErrors.ClassNotFound("__CLASS__"))
val meth = klass.getMethod(method_name).getOrElse(
klass.getSubMethod(method_name).getOrElse(
throw new MoeErrors.MethodNotFound(method_name)
)
)
val evaluated_args = args.map(i.evaluate(env, _))
i.pushCallStack(new MoeStackFrame(meth, evaluated_args, env, Some(obj)))
val result = obj.callMethod(meth, evaluated_args)
i.popCallStack
result
}
case _ => throw new MoeErrors.MoeException("Object expected")
}
}
}
} | MoeOrganization/moe | src/main/scala/org/moe/interpreter/guts/Classes.scala | Scala | mit | 6,430 |
class HelloClass {
def inClass() {
println("In class")
}
}
object HelloObjectPlusClass {
println("In object")
def main(args: Array[String]) {
println("In main")
val c = new HelloClass
c.inClass()
}
}
| beqa2323/learntosolveit | languages/scala/HelloObjectPlusClass.scala | Scala | bsd-3-clause | 254 |
package scalaFP
@meta.typeclass
trait Closed[P[_,_]] {
def oclosed[A, B, S, T](p: P[A, B])(g: Grate[A, B, S, T]): P[S, T]
def cotraversing[A, B, F[_]: Naperian](p: P[A, B]): P[F[A], F[B]]
val toProfunctor: Profunctor[P]
}
object Closed {
def fromOclosed[P[_, _]](oclosed: _Oclosed[P]): Closed[P] = {
val x = new _Cotraversing[P] with Profunctor._Dimap[P] {
def cotraversing[A, B, F[_]: Naperian](p: P[A, B]): P[F[A], F[B]] = oclosed(p)(Grate.cotraversing)
def dimap[A, B, C, D](p: P[A, B])(f: C => A, g: B => D): P[C, D] = oclosed(p)(Iso(f, g))
}
Closed(oclosed, x, Profunctor.fromDimap(x))
}
/** Potentially very inefficient since it uses tabulate */
def fromCotraversingProfunctor[P[_,_]](cotraversing: _Cotraversing[P], profunctor: Profunctor[P]): Closed[P] = {
val x = new _Oclosed[P] {
def oclosed[A, B, S, T](p: P[A, B])(g: Grate[A, B, S, T]): P[S, T] =
profunctor.dimap(cotraversing[A, B, (S => A) => ?](p))(s => sa => sa(s), g.tabulate)
}
Closed(x, cotraversing, profunctor)
}
}
trait ClosedModule {
implicit class ClosedSyntax[P[_, _], A, B](val self: P[A, B])(implicit P: Closed[P]) {
def oclosed[S, T](g: Grate[A, B, S, T]): P[S, T] = P.oclosed(self)(g)
def cotraversing[F[_]: Naperian]: P[F[A], F[B]] = P.cotraversing(self)
}
}
| aaronvargo/scalaFP | base/src/main/scala/Closed.scala | Scala | bsd-3-clause | 1,322 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.plugin.output.cassandra
import java.io.{Serializable => JSerializable}
import com.stratio.sparta.sdk.pipeline.output.Output._
import com.stratio.sparta.sdk.pipeline.output.{Output, SaveModeEnum}
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import org.apache.spark.sql._
class CassandraOutput(name: String, properties: Map[String, JSerializable]) extends Output(name, properties) {
val MaxTableNameLength = 48
val keyspace = properties.getString("keyspace", "sparta")
val cluster = properties.getString("cluster", "default")
override def supportedSaveModes: Seq[SaveModeEnum.Value] =
Seq(SaveModeEnum.Append, SaveModeEnum.ErrorIfExists, SaveModeEnum.Ignore, SaveModeEnum.Overwrite)
override def save(dataFrame: DataFrame, saveMode: SaveModeEnum.Value, options: Map[String, String]): Unit = {
val tableNameVersioned = getTableName(getTableNameFromOptions(options).toLowerCase)
validateSaveMode(saveMode)
dataFrame.write
.format("org.apache.spark.sql.cassandra")
.mode(getSparkSaveMode(saveMode))
.options(Map("table" -> tableNameVersioned, "keyspace" -> keyspace, "cluster" -> cluster) ++ getCustomProperties
)
.save()
}
def getTableName(table: String): String =
if (table.length > MaxTableNameLength - 3) table.substring(0, MaxTableNameLength - 3) else table
}
object CassandraOutput {
final val DefaultHost = "127.0.0.1"
final val DefaultPort = "9042"
def getSparkConfiguration(configuration: Map[String, JSerializable]): Seq[(String, String)] = {
val connectionHost = configuration.getString("connectionHost", DefaultHost)
val connectionPort = configuration.getString("connectionPort", DefaultPort)
val sparkProperties = getSparkCassandraProperties(configuration)
sparkProperties ++
Seq(
("spark.cassandra.connection.host", connectionHost),
("spark.cassandra.connection.port", connectionPort)
)
}
private def getSparkCassandraProperties(configuration: Map[String, JSerializable]): Seq[(String, String)] = {
configuration.get("sparkProperties") match {
case Some(properties) =>
val conObj = configuration.getMapFromJsoneyString("sparkProperties")
conObj.map(propKeyPair => {
val key = propKeyPair("sparkPropertyKey")
val value = propKeyPair("sparkPropertyValue")
(key, value)
})
case None => Seq()
}
}
}
| fjsc/sparta | plugins/src/main/scala/com/stratio/sparta/plugin/output/cassandra/CassandraOutput.scala | Scala | apache-2.0 | 3,083 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package search
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.util.Comparing
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi.PsiClass
import com.intellij.psi.search.searches.DirectClassInheritorsSearch
import com.intellij.psi.search.{GlobalSearchScope, LocalSearchScope}
import com.intellij.psi.util.PsiUtil
import com.intellij.util.{Processor, QueryExecutor}
import org.jetbrains.plugins.scala.extensions.{PsiElementExt, inReadAction}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTemplateDefinition, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.stubs.util.ScalaStubsUtil
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* User: Alexander Podkhalyuzin
* Date: 24.10.2008
*/
class ScalaDirectClassInheritorsSearcher extends QueryExecutor[PsiClass, DirectClassInheritorsSearch.SearchParameters] {
def execute(queryParameters: DirectClassInheritorsSearch.SearchParameters, consumer: Processor[PsiClass]): Boolean = {
val clazz = queryParameters.getClassToProcess
val scope = inReadAction {
val useScope = clazz.getUseScope match {
case _: LocalSearchScope => clazz.containingScalaFile.map(GlobalSearchScope.fileScope)
case global: GlobalSearchScope => Some(global)
case _ => None
}
ScalaPsiUtil.intersectScopes(queryParameters.getScope, useScope) match {
case x: GlobalSearchScope => x
case _ => return true
}
}
val anonymousClasses = new ArrayBuffer[PsiClass]()
val map = new mutable.HashMap[String, ArrayBuffer[PsiClass]]()
def add(clazz: PsiClass): Unit = {
val id = inReadAction {
clazz match {
case o: ScObject => s"object:${o.qualifiedName}"
case c: ScTypeDefinition => s"class:${c.qualifiedName}"
case n: ScNewTemplateDefinition =>
anonymousClasses += n
return
case _ =>
val qualName = clazz.getQualifiedName
if (qualName == null) {
anonymousClasses += clazz
return
} else qualName
}
}
val buffer = map.getOrElseUpdate(id, new ArrayBuffer[PsiClass]())
buffer += clazz
}
val candidates: Seq[ScTemplateDefinition] = inReadAction {
if (!clazz.isValid) return true
ScalaStubsUtil.getClassInheritors(clazz, scope)
}
for (candidate <- candidates if candidate.showAsInheritor) {
ProgressManager.checkCanceled()
if (inReadAction { candidate.isInheritor(clazz, deep = false) }) add(candidate)
}
if (map.nonEmpty) {
def getJarFile(clazz: PsiClass) = inReadAction { PsiUtil.getJarFile(clazz) }
val clazzJar = getJarFile(clazz)
for ((_, sameNameInheritors) <- map) {
ProgressManager.checkCanceled()
sameNameInheritors.find { inheritor =>
ProgressManager.checkCanceled()
Comparing.equal(getJarFile(inheritor), clazzJar)
} match {
case Some(inheritor) =>
if (!consumer.process(inheritor)) return false
case _ =>
val closestClass = sameNameInheritors.maxBy { inheritor =>
StringUtil.commonPrefixLength(getJarFile(inheritor).getCanonicalPath, clazzJar.getCanonicalPath)
}
if (!consumer.process(closestClass)) return false
}
}
}
if (anonymousClasses.nonEmpty && queryParameters.includeAnonymous()) {
for (clazz <- anonymousClasses) {
if (!consumer.process(clazz)) return false
}
}
true
}
} | JetBrains/intellij-scala-historical | src/org/jetbrains/plugins/scala/lang/psi/impl/search/ScalaDirectClassInheritorsSearcher.scala | Scala | apache-2.0 | 3,785 |
/*
* Copyright (c) 2015 Daniel Higuero.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.spark.batch.examples
import org.apache.log4j.Level
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import scala.io.StdIn
/**
* Spark skeleton application.
*/
object SparkSimpleApp {
def main(args: Array[String]) : Unit = {
// Suppress Spark output
Logger.getLogger("org").setLevel(Level.ERROR)
Logger.getLogger("akka").setLevel(Level.ERROR)
// Define the Spark configuration. In this case we are using the local mode
val sparkConf : SparkConf = new SparkConf().setMaster("local[2]").setAppName("Skeleton App")
// Define the Spark context.
val sc : SparkContext = new SparkContext(sparkConf)
// Create an example list filled with random doubles.
val list : List[Double] = List.fill(1000)(Math.random())
// Parallelize the list obtaining an RDD
val rddList : RDD[Double]= sc.parallelize(list)
// Filter those numbers greated that 0.5 and count them.
val numFiltered : Long = rddList.filter(_ > 0.5).count()
println("Number of filtered elements: " + numFiltered)
println("Open http://localhost:4040 in your browser and check the status.")
StdIn.readLine("Press enter to finish")
}
}
| dhiguero/spark-exercises | src/main/scala/org/spark/batch/examples/SparkSimpleApp.scala | Scala | apache-2.0 | 1,860 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
// GENERATED CODE: DO NOT EDIT. See scala.Function0 for timestamp.
package scala
/** A tuple of 22 elements; the canonical representation of a [[scala.Product22]].
*
* @constructor Create a new tuple with 22 elements. Note that it is more idiomatic to create a Tuple22 via `(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22)`
* @param _1 Element 1 of this Tuple22
* @param _2 Element 2 of this Tuple22
* @param _3 Element 3 of this Tuple22
* @param _4 Element 4 of this Tuple22
* @param _5 Element 5 of this Tuple22
* @param _6 Element 6 of this Tuple22
* @param _7 Element 7 of this Tuple22
* @param _8 Element 8 of this Tuple22
* @param _9 Element 9 of this Tuple22
* @param _10 Element 10 of this Tuple22
* @param _11 Element 11 of this Tuple22
* @param _12 Element 12 of this Tuple22
* @param _13 Element 13 of this Tuple22
* @param _14 Element 14 of this Tuple22
* @param _15 Element 15 of this Tuple22
* @param _16 Element 16 of this Tuple22
* @param _17 Element 17 of this Tuple22
* @param _18 Element 18 of this Tuple22
* @param _19 Element 19 of this Tuple22
* @param _20 Element 20 of this Tuple22
* @param _21 Element 21 of this Tuple22
* @param _22 Element 22 of this Tuple22
*/
@deprecatedInheritance("Tuples will be made final in a future version.", "2.11.0")
case class Tuple22[+T1, +T2, +T3, +T4, +T5, +T6, +T7, +T8, +T9, +T10, +T11, +T12, +T13, +T14, +T15, +T16, +T17, +T18, +T19, +T20, +T21, +T22](_1: T1, _2: T2, _3: T3, _4: T4, _5: T5, _6: T6, _7: T7, _8: T8, _9: T9, _10: T10, _11: T11, _12: T12, _13: T13, _14: T14, _15: T15, _16: T16, _17: T17, _18: T18, _19: T19, _20: T20, _21: T21, _22: T22)
extends Product22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 + "," + _10 + "," + _11 +
"," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + "," + _17 + "," + _18 + "," + _19 + "," + _20 + "," + _21 + "," + _22 + ")"
}
| mdemarne/scalahost | tests/src/test/resources/ScalaToMeta/Tuple22/Original.scala | Scala | bsd-3-clause | 2,729 |
package org.abhijitsarkar.feign.domain
import net.jcazevedo.moultingyaml.{DefaultYamlProtocol, YamlFormat, YamlNumber, YamlString, YamlValue}
import org.abhijitsarkar.feign.api.domain.{Delay, DelayStrategy}
import org.slf4j.LoggerFactory
import scala.util.{Success, Try}
/**
* @author Abhijit Sarkar
*/
object DelayYamlProtocol extends DefaultYamlProtocol {
val logger = LoggerFactory.getLogger(DelayYamlProtocol.getClass)
implicit object DelayYamlFormat extends YamlFormat[Delay] {
override def write(obj: Delay): YamlValue = ???
override def read(yaml: YamlValue): Delay = {
def resolveDelayStrategy(s: String): Option[String] = {
Try(Option(s).map(DelayStrategy.withName)) match {
case Success(x) => x.map(_.toString)
case _ => None
}
}
Try(yaml.asYamlObject.getFields(
YamlString("delayMillis"),
YamlString("delayStrategy")
) match {
case Seq(YamlNumber(x), YamlString(y)) => new Delay(Some(x.asInstanceOf[Int].toLong), resolveDelayStrategy(y))
case Seq(YamlNumber(x)) => new Delay(delayMillis = Some(x.asInstanceOf[Int].toLong), None)
case Seq(YamlString(x)) => new Delay(delayStrategy = resolveDelayStrategy(x))
}).recoverWith { case ex => logger.warn("Defaulting delay.", ex); throw ex }
.getOrElse(Delay())
}
}
}
| abhijitsarkar/feign | feign-domain/src/main/scala/org/abhijitsarkar/feign/domain/DelayYamlProtocol.scala | Scala | apache-2.0 | 1,365 |
package controllers.update
import play.api.mvc.Action
import play.api.mvc.Controller
import oldModel.OldUserDao
import scala.concurrent.ExecutionContext.Implicits.global
import oldModel.OldUser
import services.dao.UserDao
import scala.util.Failure
import scala.util.Success
import java.util.ArrayList
object UpdateController extends Controller {
def updateUser() = Action { implicit request =>
var addedUser = 0
var nonAddedUser = 0
var sizeAllUsers = 0
val f = OldUserDao.findAll.map { allUsers =>
sizeAllUsers = allUsers.size
allUsers.map { oldUser =>
val user = OldUser.toUser(oldUser)
UserDao.delete(oldUser.accounts.head.id).onComplete {
case Failure(e) => nonAddedUser = nonAddedUser+1
case Success(_) => {
UserDao.add(user).onComplete {
case Failure(e) => nonAddedUser = nonAddedUser+1
case Success(_) => addedUser = addedUser+1
}
}
}
}
}
Async {
f.map { n =>
val sb = new StringBuilder
sb ++= "Added : "
sb ++= addedUser.toString
sb ++= "/"
sb ++= sizeAllUsers.toString
sb ++= "\n\n"
var i = 0;
if(nonAddedUser > 0) {
sb ++= "non added users : "
sb ++= nonAddedUser.toString
sb ++= "\n\n"
}
Ok(sb.toString)
}
}
}
} | Froggies/Skimbo | app/controllers/update/UpdateController.scala | Scala | agpl-3.0 | 1,429 |
/*
Copyright 2015 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.db.macros.impl
import scala.language.experimental.macros
import scala.reflect.macros.Context
import scala.util.Try
import com.twitter.scalding.macros.impl.CaseClassFieldSetter
/**
* Helper class for setting case class fields in java.sql.Statement
*/
private[macros] object JdbcFieldSetter extends CaseClassFieldSetter {
override def absent(c: Context)(idx: Int, container: c.TermName): c.Tree = {
import c.universe._
q"""$container.setObject($idx + 1, null)"""
}
override def default(c: Context)(idx: Int, container: c.TermName, fieldValue: c.Tree): c.Tree = {
import c.universe._
q"""$container.setObject($idx + 1, $fieldValue)"""
}
override def from(c: Context)(fieldType: c.Type, idx: Int, container: c.TermName, fieldValue: c.Tree): Try[c.Tree] = Try {
import c.universe._
// jdbc Statement indexes are one-based, hence +1 here
def simpleType(accessor: Tree) = q"""${accessor}(${idx + 1}, $fieldValue)"""
fieldType match {
case tpe if tpe =:= typeOf[String] => simpleType(q"$container.setString")
case tpe if tpe =:= typeOf[Boolean] => simpleType(q"$container.setBoolean")
case tpe if tpe =:= typeOf[Short] => simpleType(q"$container.setShort")
case tpe if tpe =:= typeOf[Int] => simpleType(q"$container.setInt")
case tpe if tpe =:= typeOf[Long] => simpleType(q"$container.setLong")
case tpe if tpe =:= typeOf[Float] => simpleType(q"$container.setFloat")
case tpe if tpe =:= typeOf[Double] => simpleType(q"$container.setDouble")
case _ => sys.error(s"Unsupported primitive type ${fieldType}")
}
}
}
| sriramkrishnan/scalding | scalding-db/src/main/scala/com/twitter/scalding/db/macros/impl/JdbcFieldSetter.scala | Scala | apache-2.0 | 2,205 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.sbt.test
import sbt.Keys.{ ivyScala, sbtPlugin }
import sbt.AutoPlugin
private[test] trait MediatorWorkaroundPluginCompat extends AutoPlugin {
override def projectSettings = Seq(
ivyScala := { ivyScala.value map { _.copy(overrideScalaVersion = sbtPlugin.value) } }
)
}
| Shenker93/playframework | framework/src/sbt-plugin/src/main/scala-sbt-0.13/play/sbt/test/MediatorWorkaroundPluginCompat.scala | Scala | apache-2.0 | 371 |
import org.apache.spark._
import org.apache.spark.graphx._
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
object ShortestPath {
def main(args: Array[String]) {
//Spark Context
val tmpSC = new SparkContext
//Read the file describing the graph. Hard-coded to "graph.txt"
//File Format should be:
//SrcVertex DestVertex EdgeWeight
var graphFile = tmpSC.textFile("/user/ubuntu/GoogleGraphShortestPath/binaryTree.txt")
//Extract edge and vertex information from file. Note that this file cannot contain comments/empty lines
var tmpEdges = graphFile.map(s => Edge(s.split("\\\\s+")(0).toLong, s.split("\\\\s+")(1).toLong, s.split("\\\\s+")(2).toInt))
//Get all source vertices from 1st column
var tmpSrcVertices = graphFile.map(s => (s.split("\\\\s+")(0).toLong, 1))
//Get destination vertices from 2nd column
var tmpDstVertices = graphFile.map(s => (s.split("\\\\s+")(1).toLong, 1))
//Combine above 2 RDDs to remove duplicate vertices
var tmpVertices = tmpSrcVertices.union(tmpDstVertices).reduceByKey( (value1, value2) => 1 )
//Create graph using the edge, vertex RDDs
var tmpGraph = Graph(tmpVertices, tmpEdges)
//Initialize graph with vertex weight
var initGraph = tmpGraph.mapVertices((vid, weight) => if (vid==1) 0 else (Int.MaxValue/2))
//Pregel superstep: send message to destination vertex in map function and combine all recieved messages in reduce function
var superStep0: RDD[(VertexId, Int)] = initGraph.mapReduceTriplets(et => Iterator((et.dstId, (et.srcAttr+et.attr))), (value1, value2) => math.min(value1,value2))
//Count total number of messages
var messageCount: Long = superStep0.count()
var prevGraph: Graph[Int, Int] = null
var innerCount: Int = 0;
var outerCount: Int = 0;
initGraph.vertices.saveAsObjectFile("/user/ubuntu/GoogleGraphShortestPath/verticesObject_0")
initGraph.edges.saveAsObjectFile("/user/ubuntu/GoogleGraphShortestPath/edgesObject_0")
//Iterator
while ((messageCount > 0) && (outerCount < 1000)) { //(outerCount < (Int.MaxValue/2))) {
val iterVertices = tmpSC.objectFile[(VertexId, Int)]("/user/ubuntu/GoogleGraphShortestPath/verticesObject_"+outerCount)
val iterEdges = tmpSC.objectFile[Edge[Int]]("/user/ubuntu/GoogleGraphShortestPath/edgesObject_"+outerCount)
var iterGraph = Graph(iterVertices, iterEdges)
innerCount = 0
while ((messageCount > 0) && (innerCount < 50)) {
//Update vertex value if recieved message value is lesser than original
//Variable newVertices only contains the set of vertices which recieved atleast 1 message
val newVertices = iterGraph.vertices.innerJoin(superStep0)((Vid, oldattr, newattr) => if (newattr < oldattr) newattr else oldattr)
newVertices.cache()
//println("*********Message*************InnerJoin")
//Re-create the graph with new values
prevGraph = iterGraph
iterGraph = iterGraph.outerJoinVertices(newVertices){ (Vid, old, newOpt) => newOpt.getOrElse(old)}
iterGraph.cache()
//println("*********Message************OuterJoin")
//Re-run Pregel superstep restricting vertex set to newVertices
val oldSuperStep0 = superStep0
superStep0 = iterGraph.mapReduceTriplets(et => Iterator((et.dstId, (et.srcAttr+et.attr))), (value1, value2) => math.min(value1,value2), Option(newVertices, EdgeDirection.Out))
superStep0.cache()
//println("*********Message***********SuperStep")
//Count total number of messages
messageCount = superStep0.count()
innerCount += 1;
//println("Message count is: "+messageCount)
//println("Iteration updated")
//println("Iteration count is: "+(innerCount+outerCount))
newVertices.unpersist(blocking=false)
prevGraph.unpersistVertices(blocking=false)
prevGraph.edges.unpersist(blocking=false)
oldSuperStep0.unpersist(blocking=false)
}
//Print answer
outerCount += 50
//println("Outer Iteration count is: "+outerCount)
iterGraph.vertices.saveAsObjectFile("/user/ubuntu/GoogleGraphShortestPath/verticesObject_"+outerCount)
iterGraph.edges.saveAsObjectFile("/user/ubuntu/GoogleGraphShortestPath/edgesObject_"+outerCount)
iterGraph.vertices.unpersist(blocking=false)
iterGraph.edges.unpersist(blocking=false)
iterVertices.unpersist(blocking=false)
iterEdges.unpersist(blocking=false)
}
}
}
| azeemshaikh38/PregelShortestPath | src/main/scala/ShortestPath.scala | Scala | apache-2.0 | 5,781 |
package mesosphere.marathon
import java.util.concurrent.CountDownLatch
import java.util.{Timer, TimerTask}
import akka.Done
import akka.actor.{ActorRef, ActorSystem}
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import com.google.common.util.concurrent.AbstractExecutionThreadService
import com.typesafe.scalalogging.StrictLogging
import javax.inject.{Inject, Named}
import mesosphere.marathon.MarathonSchedulerActor._
import mesosphere.marathon.core.deployment.{DeploymentManager, DeploymentPlan, DeploymentStepInfo}
import mesosphere.marathon.core.election.{ElectionCandidate, ElectionService}
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.core.heartbeat._
import mesosphere.marathon.core.leadership.LeadershipCoordinator
import mesosphere.marathon.core.storage.store.PersistenceStore
import mesosphere.marathon.state.{AbsolutePathId, AppDefinition, Timestamp}
import mesosphere.marathon.storage.migration.Migration
import mesosphere.util.PromiseActor
import org.apache.mesos.SchedulerDriver
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.Failure
/**
* PrePostDriverCallback is implemented by callback receivers which have to listen for driver
* start/stop events
*/
trait PrePostDriverCallback {
/**
* Will get called _before_ the driver is running, but after migration.
*/
def preDriverStarts: Future[Unit]
/**
* Will get called _after_ the driver terminated
*/
def postDriverTerminates: Future[Unit]
}
/**
* DeploymentService provides methods to deploy plans.
*/
// TODO (AD): do we need this trait?
trait DeploymentService {
/**
* Deploy a plan.
* @param plan the plan to deploy.
* @param force only one deployment can be applied at a time. With this flag
* one can control, to stop a current deployment and start a new one.
* @return a failed future if the deployment failed.
*/
def deploy(plan: DeploymentPlan, force: Boolean = false): Future[Done]
def listRunningDeployments(): Future[Seq[DeploymentStepInfo]]
}
/**
* Wrapper class for the scheduler
*/
class MarathonSchedulerService @Inject() (
persistenceStore: PersistenceStore[_, _, _],
leadershipCoordinator: LeadershipCoordinator,
config: MarathonConf,
electionService: ElectionService,
prePostDriverCallbacks: Seq[PrePostDriverCallback],
groupManager: GroupManager,
driverFactory: SchedulerDriverFactory,
system: ActorSystem,
migration: Migration,
deploymentManager: DeploymentManager,
@Named("schedulerActor") schedulerActor: ActorRef,
heartbeatMonitor: MesosHeartbeatMonitor
)(implicit mat: Materializer)
extends AbstractExecutionThreadService
with ElectionCandidate
with DeploymentService
with StrictLogging {
import scala.concurrent.ExecutionContext.Implicits.global
implicit val zkTimeout = config.zkTimeoutDuration
val isRunningLatch = new CountDownLatch(1)
// Time to wait before trying to reconcile app tasks after driver starts
val reconciliationInitialDelay =
Duration(config.reconciliationInitialDelay(), MILLISECONDS)
// Interval between task reconciliation operations
val reconciliationInterval =
Duration(config.reconciliationInterval(), MILLISECONDS)
// Time to wait before trying to scale apps after driver starts
val scaleAppsInitialDelay =
Duration(config.scaleAppsInitialDelay(), MILLISECONDS)
// Interval between attempts to scale apps
val scaleAppsInterval =
Duration(config.scaleAppsInterval(), MILLISECONDS)
private[mesosphere] var timer = newTimer()
// This is a little ugly as we are using a mutable variable. But drivers can't
// be reused (i.e. once stopped they can't be started again. Thus,
// we have to allocate a new driver before each run or after each stop.
var driver: Option[SchedulerDriver] = None
implicit val timeout: Timeout = 5.seconds
protected def newTimer() = new Timer("marathonSchedulerTimer")
def deploy(plan: DeploymentPlan, force: Boolean = false): Future[Done] = {
logger.debug(s"Forwarding new deployment plan with planId=${plan.id}, force=$force to the MarathonSchedulerActor")
val future: Future[Any] = PromiseActor.askWithoutTimeout(system, schedulerActor, Deploy(plan, force))
future.map {
case DeploymentStarted(_) => Done
case DeploymentFailed(_, t) => throw t
}
}
def cancelDeployment(plan: DeploymentPlan): Unit =
schedulerActor ! CancelDeployment(plan)
def listAppVersions(appId: AbsolutePathId): Seq[Timestamp] =
Await.result(groupManager.appVersions(appId).map(Timestamp(_)).runWith(Sink.seq), config.zkTimeoutDuration)
def listRunningDeployments(): Future[Seq[DeploymentStepInfo]] =
deploymentManager.list()
def getApp(appId: AbsolutePathId, version: Timestamp): Option[AppDefinition] = {
Await.result(groupManager.appVersion(appId, version.toOffsetDateTime), config.zkTimeoutDuration)
}
//Begin Service interface
override def startUp(): Unit = {
logger.info("Starting up")
super.startUp()
}
override def run(): Unit = {
logger.info("Beginning run")
// The first thing we do is offer our leadership.
electionService.offerLeadership(this)
// Block on the latch which will be countdown only when shutdown has been
// triggered. This is to prevent run()
// from exiting.
scala.concurrent.blocking {
isRunningLatch.await()
}
logger.info("Completed run")
}
override def triggerShutdown(): Unit =
synchronized {
logger.info("Shutdown triggered")
electionService.abdicateLeadership()
stopDriver()
logger.info("Cancelling timer")
timer.cancel()
// The countdown latch blocks run() from exiting. Counting down the latch removes the block.
logger.info("Removing the blocking of run()")
isRunningLatch.countDown()
super.triggerShutdown()
}
private[this] def stopDriver(): Unit =
synchronized {
// many are the assumptions concerning when this is invoked. see startLeadership, stopLeadership,
// triggerShutdown.
logger.info("Stopping driver")
// Stopping the driver will cause the driver run() method to return.
driver.foreach(_.stop(true)) // failover = true
// signals that the driver was stopped manually (as opposed to crashing mid-process)
driver = None
}
//End Service interface
//Begin ElectionCandidate interface
override def startLeadership(): Unit =
synchronized {
logger.info("As new leader running the driver")
// allow interactions with the persistence store
persistenceStore.markOpen()
// Before reading to and writing from the storage, let's ensure that
// no stale values are read from the persistence store.
// Although in case of ZK it is done at the time of creation of CuratorZK,
// it is better to be safe than sorry.
Await.result(persistenceStore.sync(), Duration.Inf)
refreshCachesAndDoMigration()
// run all pre-driver callbacks
logger.info(s"""Call preDriverStarts callbacks on ${prePostDriverCallbacks.mkString(", ")}""")
Await.result(
Future.sequence(prePostDriverCallbacks.map(_.preDriverStarts)),
config.onElectedPrepareTimeout().millis
)
logger.info("Finished preDriverStarts callbacks")
// start all leadership coordination actors
Await.result(leadershipCoordinator.prepareForStart(), config.maxActorStartupTime().milliseconds)
// create new driver
driver = Some(driverFactory.createDriver())
// start timers
schedulePeriodicOperations()
// We have to start the Heartbeat monitor even before we're successfully registered, since in rare occasions driver
// can hang forever trying to connect to Mesos (or doing some other driver work). In this case we also want
// to suicide after not receiving any messages for a while.
driver.foreach(heartbeatMonitor.activate(_))
// The following block asynchronously runs the driver. Note that driver.run()
// blocks until the driver has been stopped (or aborted).
Future {
scala.concurrent.blocking {
driver.foreach(_.run())
}
} onComplete { result =>
synchronized {
logger.info(s"Driver future completed with result=$result.")
result match {
case Failure(t) => logger.error("Exception while running driver", t)
case _ =>
}
// ONLY do this if there's some sort of driver crash: avoid invoking abdication logic if
// the driver was stopped via stopDriver. stopDriver only happens when
// 1. we're being terminated (and have already abdicated)
// 2. we've lost leadership (no need to abdicate if we've already lost)
driver.foreach { _ =>
electionService.abdicateLeadership()
}
driver = None
logger.info(s"Call postDriverRuns callbacks on ${prePostDriverCallbacks.mkString(", ")}")
Await.result(Future.sequence(prePostDriverCallbacks.map(_.postDriverTerminates)), config.zkTimeoutDuration)
logger.info("Finished postDriverRuns callbacks")
}
}
}
private def refreshCachesAndDoMigration(): Unit = {
// We might not need to invalidate the group cache before migration, but it doesn't hurt. After migration we
// certainly want to make sure the migrated state is reloaded
// refresh group repository cache
Await.result(groupManager.invalidateGroupCache(), Duration.Inf)
// execute tasks, only the leader is allowed to
migration.migrate()
// refresh group repository again - migration or restore might changed zk state, this needs to be re-loaded
Await.result(groupManager.invalidateAndRefreshGroupCache(), Duration.Inf)
}
override def stopLeadership(): Unit =
synchronized {
// invoked by election service upon loss of leadership (state transitioned to Idle)
logger.info("Lost leadership")
// disallow any interaction with the persistence storage
persistenceStore.markClosed()
leadershipCoordinator.stop()
val oldTimer = timer
timer = newTimer()
oldTimer.cancel()
driver.foreach { driverInstance =>
heartbeatMonitor.deactivate(driverInstance)
// Our leadership has been defeated. Thus, stop the driver.
stopDriver()
}
}
//End ElectionDelegate interface
private def schedulePeriodicOperations(): Unit =
synchronized {
timer.schedule(
new TimerTask {
def run(): Unit = {
if (electionService.isLeader) {
schedulerActor ! ScaleRunSpecs
} else logger.info("Not leader therefore not scaling apps")
}
},
scaleAppsInitialDelay.toMillis,
scaleAppsInterval.toMillis
)
timer.schedule(
new TimerTask {
def run(): Unit = {
if (electionService.isLeader) {
schedulerActor ! ReconcileTasks
schedulerActor ! ReconcileHealthChecks
} else logger.info("Not leader therefore not reconciling tasks")
}
},
reconciliationInitialDelay.toMillis,
reconciliationInterval.toMillis
)
}
}
| mesosphere/marathon | src/main/scala/mesosphere/marathon/MarathonSchedulerService.scala | Scala | apache-2.0 | 11,436 |
package myscalc.calc
import myscalc.variables.Variables
case class Undef() extends Base {
override def advance(va: Variables) = sys.error("未定義にadvanceされた")
override def hasFinished(va: Variables): Boolean = true
override def string: String = "Undef"
}
| soukouki/myscalc | src/main/scala/calc/Undef.scala | Scala | mit | 271 |
package com.metebalci
// lens l : C <=> A
// C: set of concrete
// A: set of abstract views
// you can think of C as source and A as target
// or C as source and A as view
// or database and view
// or json and field
// or memory model and UI representation
// or xml and pretty printed xml
// C <== connected structures ==> A
case class Lens[C, A](
g: C => A,
p: (A, C) => C
) {
// get: C => A
def get(c: C) : A = g(c)
// put: AxC => C
def put(a: A, c: C) : C = p(a, c)
// get and put has to be total functions !
// composable !
def andThen[Z](that: Lens[A, Z]):Lens[C, Z] = Lens[C, Z](
// new get method is C => Z
// get a from c (this)
// get z from a (that)
(c: C) => that.get(this.get(c)),
// new put method is ZxC => C
// get a from c (this)
// put a to z (that), new z is z'
// put z' to c (this)
(z: Z, c: C) => this.put(
that.put(z, this.get(c))
, c
)
)
// rule putget / acceptability / exact translation of view
// updates on view should be translated exactly to concrete
def isAcceptable(a:A, c:C) = (get(put(a, c)) == a)
// rule getput / stability / source integrity
// if the target does not change, neither should the source
def isStable(c:C) = (put(get(c), c) == c)
// rule putput / forgetfulness
// each update should completely overwrite the effect
// of the previous one. thus, the effect of two putbacks in a row
// should be the same as just the second.
// this is usually too restrictive
def isForgetful(a1:A, a2:A, c:C) = (put(a1, put(a2, c)) == put(a1, c))
// Well-Behaved = Reasonable to Use
def isWellBehaved(a1:A, a2:A, c:C) =
isAcceptable(a1, c) && isStable(c)
def isVeryWellBehaved(a1:A, a2:A, c:C) =
isAcceptable(a1, c) && isStable(c) && isForgetful(a1, a2, c)
// this is too strong for many applications
def isBijective(a:A, c1:C, c2:C) = (put(a, c1) == put(a, c2))
// above(get/put) is one formulation
// there can be different formulations of lens
// get/modify
def modify(f: A => A, t: C) : C = put(f(get(t)), t)
// if mod(f=_ => new_value, a), it is same as put(new_value, a)
// mod is a generalization of put
// another formulation is a get/put pair for a target
// store comonad
def store(t:C):(Unit => A, A => C) = (
Unit => get(t),
put(_, t)
)
}
| metebalci/experiment-lenses-scala | src/main/scala/lens.scala | Scala | gpl-2.0 | 2,353 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.prop.PropertyChecks
import org.scalatest.exceptions.TestFailedException
import Matchers._
import org.scalactic.Prettifier
class ShouldBeAnySpec extends FunSpec with PropertyChecks with ReturnsNormallyThrowsAssertion {
// Checking for equality with "be"
describe("The be token") {
it("should compare arrays structurally") {
Array(1, 2) should be (Array(1, 2))
}
it("should call .deep on an array in either left or ride sides") {
Array(1, 2) should be (List(1, 2))
List(1, 2) should be (Array(1, 2))
}
it("should do nothing when equal") {
1 should be (1)
// 1 shouldBe 1
// objects should equal themselves
forAll((s: String) => s should be (s))
forAll((i: Int) => i should be (i))
// a string should equal another string with the same value
forAll((s: String) => s should be (new String(s)))
}
it("should do nothing when not equal and used with not") {
1 should not { be (2) }
1 should not be (2)
// unequal objects should not equal each other
forAll((s: String, t: String) => if (s != t) s should not { be (t) } else succeed)
forAll((s: String, t: String) => if (s != t) s should not be (t) else succeed)
}
it("should do nothing when equal and used in a logical-and expression") {
1 should (be (1) and be (2 - 1))
}
it("should do nothing when equal and used in multi-part logical expressions") {
// Just to make sure these work strung together
1 should (be (1) and be (1) and be (1) and be (1))
1 should (be (1) and be (1) or be (1) and be (1) or be (1))
1 should (
be (1) and
be (1) or
be (1) and
be (1) or
be (1)
)
}
it("should do nothing when equal and used in a logical-or expression") {
1 should { be (1) or be (2 - 1) }
}
it("should do nothing when not equal and used in a logical-and expression with not") {
1 should { not { be (2) } and not { be (3 - 1) }}
1 should { not be (2) and (not be (3 - 1)) }
1 should (not be (2) and not be (3 - 1))
}
it("should do nothing when not equal and used in a logical-or expression with not") {
1 should { not { be (2) } or not { be (3 - 1) }}
1 should { not be (2) or (not be (3 - 1)) }
1 should (not be (2) or not be (3 - 1))
}
it("should throw an assertion error when not equal") {
val caught1 = intercept[TestFailedException] {
1 should be (2)
}
assert(caught1.getMessage === "1 was not equal to 2")
// unequal objects used with "a should equal (b)" should throw an TestFailedException
forAll((s: String, t: String) => if (s != t) assertThrows[TestFailedException](s should be (t)) else succeed)
val caught2 = intercept[TestFailedException] {
1 should not (not be (2))
}
assert(caught2.getMessage === "1 was not equal to 2")
val s: String = null
val caught3 = intercept[TestFailedException] {
s should be ("hi")
}
assert(caught3.getMessage === "null was not equal to \\"hi\\"")
}
it("should throw an assertion error when equal but used with should not") {
val caught1 = intercept[TestFailedException] {
1 should not { be (1) }
}
assert(caught1.getMessage === "1 was equal to 1")
val caught2 = intercept[TestFailedException] {
1 should not be (1)
}
assert(caught2.getMessage === "1 was equal to 1")
// the same object used with "a should not { equal (a) } should throw TestFailedException
forAll((s: String) => assertThrows[TestFailedException](s should not { be (s) }))
forAll((i: Int) => assertThrows[TestFailedException](i should not { be (i) }))
forAll((s: String) => assertThrows[TestFailedException](s should not be (s)))
forAll((i: Int) => assertThrows[TestFailedException](i should not be (i)))
// two different strings with the same value used with "s should not { be (t) } should throw TestFailedException
forAll((s: String) => assertThrows[TestFailedException](s should not { be (new String(s)) }))
forAll((s: String) => assertThrows[TestFailedException](s should not be (new String(s))))
val caught3 = intercept[TestFailedException] {
1 should not (not (not be (1)))
}
assert(caught3.getMessage === "1 was equal to 1")
}
it("should throw an assertion error when not equal and used in a logical-and expression") {
val caught = intercept[TestFailedException] {
1 should { be (5) and be (2 - 1) }
}
assert(caught.getMessage === "1 was not equal to 5")
}
it("should throw an assertion error when not equal and used in a logical-or expression") {
val caught = intercept[TestFailedException] {
1 should { be (5) or be (5 - 1) }
}
assert(caught.getMessage === "1 was not equal to 5, and 1 was not equal to 4")
}
it("should throw an assertion error when equal and used in a logical-and expression with not") {
val caught1 = intercept[TestFailedException] {
1 should { not { be (1) } and not { be (3 - 1) }}
}
assert(caught1.getMessage === "1 was equal to 1")
val caught2 = intercept[TestFailedException] {
1 should { not be (1) and (not be (3 - 1)) }
}
assert(caught2.getMessage === "1 was equal to 1")
val caught3 = intercept[TestFailedException] {
1 should (not be (1) and not be (3 - 1))
}
assert(caught3.getMessage === "1 was equal to 1")
val caught4 = intercept[TestFailedException] {
1 should { not { be (2) } and not { be (1) }}
}
assert(caught4.getMessage === "1 was not equal to 2, but 1 was equal to 1")
val caught5 = intercept[TestFailedException] {
1 should { not be (2) and (not be (1)) }
}
assert(caught5.getMessage === "1 was not equal to 2, but 1 was equal to 1")
val caught6 = intercept[TestFailedException] {
1 should (not be (2) and not be (1))
}
assert(caught6.getMessage === "1 was not equal to 2, but 1 was equal to 1")
}
it("should throw an assertion error when equal and used in a logical-or expression with not") {
val caught1 = intercept[TestFailedException] {
1 should { not { be (1) } or not { be (2 - 1) }}
}
assert(caught1.getMessage === "1 was equal to 1, and 1 was equal to 1")
val caught2 = intercept[TestFailedException] {
1 should { not be (1) or { not be (2 - 1) }}
}
assert(caught2.getMessage === "1 was equal to 1, and 1 was equal to 1")
val caught3 = intercept[TestFailedException] {
1 should (not be (1) or not be (2 - 1))
}
assert(caught3.getMessage === "1 was equal to 1, and 1 was equal to 1")
}
it("should use custom implicit Prettifier when it is in scope") {
implicit val customPrettifier =
Prettifier {
case s: String => "!!! " + s + " !!!"
case other => Prettifier.default(other)
}
val e = intercept[TestFailedException] {
"test 1" should be ("test 2")
}
assert(e.message == Some("!!! test [1] !!! was not equal to !!! test [2] !!!"))
}
// SKIP-SCALATESTJS,NATIVE-START
it("should produce TestFailedExceptions that can be serialized") {
import scala.util.Try
val result = Try(1 shouldBe 2)
val baos = new java.io.ByteArrayOutputStream
val oos = new java.io.ObjectOutputStream(baos)
oos.writeObject(result) // Should not throw an exeption
}
// SKIP-SCALATESTJS,NATIVE-END
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/ShouldBeAnySpec.scala | Scala | apache-2.0 | 8,354 |
package com.twitter.finagle.builder
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.client.Transporter.Credentials
import com.twitter.finagle.client.{DefaultPool, StackClient, StdStackClient}
import com.twitter.finagle.client.{StackBasedClient, Transporter}
import com.twitter.finagle.factory.{BindingFactory, TimeoutFactory}
import com.twitter.finagle.filter.ExceptionSourceFilter
import com.twitter.finagle.loadbalancer.LoadBalancerFactory
import com.twitter.finagle.netty3.Netty3Transporter
import com.twitter.finagle.service.FailFastFactory.FailFast
import com.twitter.finagle.service._
import com.twitter.finagle.ssl.Ssl
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.tracing.{NullTracer, TraceInitializerFilter}
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.util._
import com.twitter.util
import com.twitter.util.{Duration, Future, NullMonitor, Time, Try}
import java.net.{InetSocketAddress, SocketAddress}
import java.util.concurrent.atomic.AtomicBoolean
import java.util.logging.Level
import javax.net.ssl.SSLContext
import org.jboss.netty.channel.{Channel, ChannelFactory}
import scala.annotation.implicitNotFound
/**
* Factory for [[com.twitter.finagle.builder.ClientBuilder]] instances
*/
object ClientBuilder {
type Complete[Req, Rep] =
ClientBuilder[Req, Rep, ClientConfig.Yes, ClientConfig.Yes, ClientConfig.Yes]
type NoCluster[Req, Rep] =
ClientBuilder[Req, Rep, Nothing, ClientConfig.Yes, ClientConfig.Yes]
type NoCodec =
ClientBuilder[_, _, ClientConfig.Yes, Nothing, ClientConfig.Yes]
def apply() = new ClientBuilder()
/**
* Used for Java access.
*/
def get() = apply()
/**
* Provides a typesafe `build` for Java.
*/
def safeBuild[Req, Rep](builder: Complete[Req, Rep]): Service[Req, Rep] =
builder.build()(ClientConfigEvidence.FullyConfigured)
/**
* Provides a typesafe `buildFactory` for Java.
*/
def safeBuildFactory[Req, Rep](builder: Complete[Req, Rep]): ServiceFactory[Req, Rep] =
builder.buildFactory()(ClientConfigEvidence.FullyConfigured)
/**
* Returns a [[com.twitter.finagle.client.StackClient]] which is equivalent to a
* `ClientBuilder` configured with the same codec; that is, given
* {{{
* val cb = ClientBuilder()
* .dest(dest)
* .name(name)
* .codec(codec)
*
* val sc = ClientBuilder.stackClientOfCodec(codec)
* }}}
* then the following are equivalent
* {{{
* cb.build()
* sc.newService(dest, name)
* }}}
* and the following are also equivalent
* {{{
* cb.buildFactory()
* sc.newClient(dest, name)
* }}}
*/
def stackClientOfCodec[Req, Rep](
codecFactory: CodecFactory[Req, Rep]#Client
): StackClient[Req, Rep] =
ClientBuilderClient(CodecClient[Req, Rep](codecFactory))
}
object ClientConfig {
sealed trait Yes
type FullySpecified[Req, Rep] = ClientConfig[Req, Rep, Yes, Yes, Yes]
val DefaultName = "client"
private case class NilClient[Req, Rep](
stack: Stack[ServiceFactory[Req, Rep]] = StackClient.newStack[Req, Rep],
params: Stack.Params = DefaultParams
) extends StackBasedClient[Req, Rep] {
def withParams(ps: Stack.Params) = copy(params = ps)
def transformed(t: Stack.Transformer) = copy(stack = t(stack))
def newService(dest: Name, label: String): Service[Req, Rep] =
newClient(dest, label).toService
def newClient(dest: Name, label: String): ServiceFactory[Req, Rep] =
ServiceFactory(() => Future.value(Service.mk[Req, Rep](_ => Future.exception(
new Exception("unimplemented")))))
}
def nilClient[Req, Rep]: StackBasedClient[Req, Rep] = NilClient[Req, Rep]()
// params specific to ClientBuilder
case class DestName(name: Name) {
def mk(): (DestName, Stack.Param[DestName]) =
(this, DestName.param)
}
object DestName {
implicit val param = Stack.Param(DestName(Name.empty))
}
case class GlobalTimeout(timeout: Duration) {
def mk(): (GlobalTimeout, Stack.Param[GlobalTimeout]) =
(this, GlobalTimeout.param)
}
object GlobalTimeout {
implicit val param = Stack.Param(GlobalTimeout(Duration.Top))
}
case class Retries(policy: RetryPolicy[Try[Nothing]]) {
def mk(): (Retries, Stack.Param[Retries]) =
(this, Retries.param)
}
object Retries {
implicit val param = Stack.Param(Retries(RetryPolicy.Never))
}
case class Daemonize(onOrOff: Boolean) {
def mk(): (Daemonize, Stack.Param[Daemonize]) =
(this, Daemonize.param)
}
object Daemonize {
implicit val param = Stack.Param(Daemonize(true))
}
case class MonitorFactory(mFactory: String => util.Monitor) {
def mk(): (MonitorFactory, Stack.Param[MonitorFactory]) =
(this, MonitorFactory.param)
}
object MonitorFactory {
implicit val param = Stack.Param(MonitorFactory(_ => NullMonitor))
}
// historical defaults for ClientBuilder
val DefaultParams = Stack.Params.empty +
param.Stats(NullStatsReceiver) +
param.Label(DefaultName) +
DefaultPool.Param(low = 1, high = Int.MaxValue,
bufferSize = 0, idleTime = 5.seconds, maxWaiters = Int.MaxValue) +
param.Tracer(NullTracer) +
param.Monitor(NullMonitor) +
param.Reporter(NullReporterFactory) +
Daemonize(false)
}
@implicitNotFound("Builder is not fully configured: Cluster: ${HasCluster}, Codec: ${HasCodec}, HostConnectionLimit: ${HasHostConnectionLimit}")
private[builder] trait ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]
private[builder] object ClientConfigEvidence {
implicit object FullyConfigured extends ClientConfigEvidence[ClientConfig.Yes, ClientConfig.Yes, ClientConfig.Yes]
}
/**
* TODO: do we really need to specify HasCodec? -- it's implied in a
* way by the proper Req, Rep.
*
* Note: these are documented in ClientBuilder, as that is where they
* are accessed by the end-user.
*/
private[builder] final class ClientConfig[Req, Rep, HasCluster, HasCodec, HasHostConnectionLimit]
/**
* A builder of Finagle [[com.twitter.finagle.Client Clients]].
*
* Please see the
* [[http://twitter.github.io/finagle/guide/FAQ.html#configuring-finagle6 Finagle user guide]]
* for information on a newer set of client-construction APIs introduced in Finagle v6.
*
* {{{
* val client = ClientBuilder()
* .codec(Http)
* .hosts("localhost:10000,localhost:10001,localhost:10003")
* .hostConnectionLimit(1)
* .tcpConnectTimeout(1.second) // max time to spend establishing a TCP connection.
* .retries(2) // (1) per-request retries
* .reportTo(new OstrichStatsReceiver) // export host-level load data to ostrich
* .logger(Logger.getLogger("http"))
* .build()
* }}}
*
* The `ClientBuilder` requires the definition of `cluster`, `codec`,
* and `hostConnectionLimit`. In Scala, these are statically type
* checked, and in Java the lack of any of the above causes a runtime
* error.
*
* The `build` method uses an implicit argument to statically
* typecheck the builder (to ensure completeness, see above). The Java
* compiler cannot provide such implicit, so we provide a separate
* function in Java to accomplish this. Thus, the Java code for the
* above is
*
* {{{
* Service<HttpRequest, HttpResponse> service =
* ClientBuilder.safeBuild(
* ClientBuilder.get()
* .codec(new Http())
* .hosts("localhost:10000,localhost:10001,localhost:10003")
* .hostConnectionLimit(1)
* .tcpConnectTimeout(1.second)
* .retries(2)
* .reportTo(new OstrichStatsReceiver())
* .logger(Logger.getLogger("http")))
* }}}
*
* Alternatively, using the `unsafeBuild` method on `ClientBuilder`
* verifies the builder dynamically, resulting in a runtime error
* instead of a compiler error.
*
* =Defaults=
*
* The following defaults are applied to clients constructed via ClientBuilder,
* unless overridden with the corresponding method. These defaults were chosen
* carefully so as to work well for most use cases.
*
* Commonly-configured options:
*
* - `connectTimeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `tcpConnectTimeout`: 1 second
* - `requestTimeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `timeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `hostConnectionLimit`: `Int.MaxValue`
* - `hostConnectionCoresize`: 0
* - `hostConnectionIdleTime`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `hostConnectionMaxWaiters`: `Int.MaxValue`
* - `failFast`: true
* - `failureAccrualParams`, `failureAccrualFactory`:
* `numFailures` = 5, `markDeadFor` = 5 seconds
*
* Advanced options:
*
* ''Before changing any of these, make sure that you know exactly how they will
* affect your application -- these options are typically only changed by expert
* users.''
*
* - `keepAlive`: Unspecified, in which case the
* [[http://docs.oracle.com/javase/7/docs/api/java/net/StandardSocketOptions.html?is-external=true#SO_KEEPALIVE Java default]]
* of `false` is used
* - `readerIdleTimeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `writerIdleTimeout`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `hostConnectionMaxIdleTime`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `hostConnectionMaxLifeTime`: [[com.twitter.util.Duration.Top Duration.Top]]
* - `sendBufferSize`, `recvBufferSize`: OS-defined default value
*/
class ClientBuilder[Req, Rep, HasCluster, HasCodec, HasHostConnectionLimit] private[finagle](
client: StackBasedClient[Req, Rep]
) {
import ClientConfig._
import com.twitter.finagle.param._
// Convenient aliases.
type FullySpecifiedConfig = FullySpecified[Req, Rep]
type ThisConfig = ClientConfig[Req, Rep, HasCluster, HasCodec, HasHostConnectionLimit]
type This = ClientBuilder[Req, Rep, HasCluster, HasCodec, HasHostConnectionLimit]
private[builder] def this() = this(ClientConfig.nilClient)
override def toString() = "ClientBuilder(%s)".format(params)
private def copy[Req1, Rep1, HasCluster1, HasCodec1, HasHostConnectionLimit1](
client: StackBasedClient[Req1, Rep1]
): ClientBuilder[Req1, Rep1, HasCluster1, HasCodec1, HasHostConnectionLimit1] =
new ClientBuilder(client)
private def configured[P: Stack.Param, HasCluster1, HasCodec1, HasHostConnectionLimit1](
param: P
): ClientBuilder[Req, Rep, HasCluster1, HasCodec1, HasHostConnectionLimit1] =
copy(client.configured(param))
def params: Stack.Params = client.params
/**
* Specify the set of hosts to connect this client to. Requests
* will be load balanced across these. This is a shorthand form for
* specifying a cluster.
*
* One of the {{hosts}} variations or direct specification of the
* cluster (via {{cluster}}) is required.
*
* @param hostnamePortCombinations comma-separated "host:port"
* string.
*/
def hosts(
hostnamePortCombinations: String
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] = {
val addresses = InetSocketAddressUtil.parseHosts(hostnamePortCombinations)
hosts(addresses)
}
/**
* A variant of {{hosts}} that takes a sequence of
* [[java.net.SocketAddress]] instead.
*/
def hosts(
addrs: Seq[SocketAddress]
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
dest(Name.bound(addrs:_*))
/**
* A convenience method for specifying a one-host
* [[java.net.SocketAddress]] client.
*/
def hosts(
address: SocketAddress
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
hosts(Seq(address))
/**
* The logical destination of requests dispatched through this
* client, as evaluated by a resolver. If the name evaluates a
* label, this replaces the builder's current name.
*/
def dest(
addr: String
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] = {
Resolver.evalLabeled(addr) match {
case (n, "") => dest(n)
case (n, l) =>
val Label(label) = params[Label]
val cb =
if (label.isEmpty || l != addr)
this.name(l)
else
this
cb.dest(n)
}
}
/**
* The logical destination of requests dispatched through this
* client.
*/
def dest(
name: Name
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
configured(DestName(name))
/**
* The base [[com.twitter.finagle.Dtab]] used to interpret logical
* destinations for this client. (This is given as a function to
* permit late initialization of [[com.twitter.finagle.Dtab.base]].)
*/
def baseDtab(baseDtab: () => Dtab): This =
configured(BindingFactory.BaseDtab(baseDtab))
/**
* Specify a cluster directly. A
* [[com.twitter.finagle.builder.Cluster]] defines a dynamic
* mechanism for specifying a set of endpoints to which this client
* remains connected.
*/
def cluster(
cluster: Cluster[SocketAddress]
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
group(Group.fromCluster(cluster))
def group(
group: Group[SocketAddress]
): ClientBuilder[Req, Rep, Yes, HasCodec, HasHostConnectionLimit] =
dest(Name.fromGroup(group))
/**
* Specify a load balancer. The load balancer implements
* a strategy for choosing one from a set of hosts to service a request
*/
def loadBalancer(loadBalancer: LoadBalancerFactory): This =
configured(LoadBalancerFactory.Param(loadBalancer))
/**
* Specify the codec. The codec implements the network protocol
* used by the client, and consequently determines the `Req` and `Rep`
* type variables. One of the codec variations is required.
*/
def codec[Req1, Rep1](
codec: Codec[Req1, Rep1]
): ClientBuilder[Req1, Rep1, HasCluster, Yes, HasHostConnectionLimit] =
this.codec(Function.const(codec)(_))
.configured(ProtocolLibrary(codec.protocolLibraryName))
/**
* A variation of `codec` that supports codec factories. This is
* used by codecs that need dynamic construction, but should be
* transparent to the user.
*/
def codec[Req1, Rep1](
codecFactory: CodecFactory[Req1, Rep1]
): ClientBuilder[Req1, Rep1, HasCluster, Yes, HasHostConnectionLimit] =
this.codec(codecFactory.client)
.configured(ProtocolLibrary(codecFactory.protocolLibraryName))
/**
* A variation of codec for codecs that support only client-codecs.
*/
def codec[Req1, Rep1](
codecFactory: CodecFactory[Req1, Rep1]#Client
): ClientBuilder[Req1, Rep1, HasCluster, Yes, HasHostConnectionLimit] =
copy(CodecClient[Req1, Rep1](codecFactory).withParams(params))
/**
* Overrides the stack and [[com.twitter.finagle.Client]] that will be used
* by this builder.
*
* @param client A `StackBasedClient` representation of a
* [[com.twitter.finagle.Client]]. `client` is materialized with the state of
* configuration when `build` is called. There is no guarantee that all
* builder parameters will be used by the resultant `Client`; it is up to the
* discretion of `client` itself and the protocol implementation. For example,
* the Mux protocol has no use for most connection pool parameters (e.g.
* `hostConnectionLimit`). Thus when configuring
* [[com.twitter.finagle.ThriftMux]] clients (via [[stack(ThriftMux.client)]]),
* such connection pool parameters will not be applied.
*/
def stack[Req1, Rep1](
client: StackBasedClient[Req1, Rep1]
): ClientBuilder[Req1, Rep1, HasCluster, Yes, Yes] = {
copy(client.withParams(client.params ++ params))
}
@deprecated("Use tcpConnectTimeout instead", "5.0.1")
def connectionTimeout(duration: Duration): This = tcpConnectTimeout(duration)
/**
* Specify the TCP connection timeout.
*/
def tcpConnectTimeout(duration: Duration): This =
configured(Transporter.ConnectTimeout(duration))
/**
* The request timeout is the time given to a *single* request (if
* there are retries, they each get a fresh request timeout). The
* timeout is applied only after a connection has been acquired.
* That is: it is applied to the interval between the dispatch of
* the request and the receipt of the response.
*/
def requestTimeout(duration: Duration): This =
configured(TimeoutFilter.Param(duration))
/**
* The connect timeout is the timeout applied to the acquisition of
* a Service. This includes both queueing time (eg. because we
* cannot create more connections due to `hostConnectionLimit` and
* there are more than `hostConnectionLimit` requests outstanding)
* as well as physical connection time. Futures returned from
* `factory()` will always be satisfied within this timeout.
*
* This timeout is also used for name resolution, separately from
* queueing and physical connection time, so in the worst case the
* time to acquire a service may be double the given duration before
* timing out.
*/
def connectTimeout(duration: Duration): This =
configured(TimeoutFactory.Param(duration))
/**
* Total request timeout. This timeout is applied from the issuance
* of a request (through `service(request)`) until the
* satisfaction of that reply future. No request will take longer
* than this.
*
* Applicable only to service-builds (`build()`)
*/
def timeout(duration: Duration): This =
configured(GlobalTimeout(duration))
/**
* Apply TCP keepAlive (`SO_KEEPALIVE` socket option).
*/
def keepAlive(value: Boolean): This =
configured(params[Transport.Liveness].copy(keepAlive = Some(value)))
/**
* The maximum time a connection may have received no data.
*/
def readerIdleTimeout(duration: Duration): This =
configured(params[Transport.Liveness].copy(readTimeout = duration))
/**
* The maximum time a connection may not have sent any data.
*/
def writerIdleTimeout(duration: Duration): This =
configured(params[Transport.Liveness].copy(writeTimeout = duration))
/**
* Report stats to the given `StatsReceiver`. This will report
* verbose global statistics and counters, that in turn may be
* exported to monitoring applications.
*
* @note Per hosts statistics will '''NOT''' be exported to this receiver
*
* @see [[ClientBuilder.reportHostStats]]
*/
def reportTo(receiver: StatsReceiver): This =
configured(Stats(receiver))
/**
* Report per host stats to the given `StatsReceiver`.
* The statsReceiver will be scoped per client, like this:
* client/connect_latency_ms_max/0.0.0.0:64754
*/
def reportHostStats(receiver: StatsReceiver): This =
configured(LoadBalancerFactory.HostStats(receiver))
/**
* Give a meaningful name to the client. Required.
*/
def name(value: String): This =
configured(Label(value))
/**
* The maximum number of connections that are allowed per host.
* Required. Finagle guarantees to never have more active
* connections than this limit.
*/
def hostConnectionLimit(value: Int): ClientBuilder[Req, Rep, HasCluster, HasCodec, Yes] =
configured(params[DefaultPool.Param].copy(high = value))
/**
* The core size of the connection pool: the pool is not shrinked below this limit.
*/
def hostConnectionCoresize(value: Int): This =
configured(params[DefaultPool.Param].copy(low = value))
/**
* The amount of time a connection is allowed to linger (when it
* otherwise would have been closed by the pool) before being
* closed.
*/
def hostConnectionIdleTime(timeout: Duration): This =
configured(params[DefaultPool.Param].copy(idleTime = timeout))
/**
* The maximum queue size for the connection pool.
*/
def hostConnectionMaxWaiters(nWaiters: Int): This =
configured(params[DefaultPool.Param].copy(maxWaiters = nWaiters))
/**
* The maximum time a connection is allowed to linger unused.
*/
def hostConnectionMaxIdleTime(timeout: Duration): This =
configured(params[ExpiringService.Param].copy(idleTime = timeout))
/**
* The maximum time a connection is allowed to exist, regardless of occupancy.
*/
def hostConnectionMaxLifeTime(timeout: Duration): This =
configured(params[ExpiringService.Param].copy(lifeTime = timeout))
/**
* Experimental option to buffer `size` connections from the pool.
* The buffer is fast and lock-free, reducing contention for
* services with very high requests rates. The buffer size should
* be sized roughly to the expected concurrency. Buffers sized by
* power-of-twos may be faster due to the use of modular
* arithmetic.
*
* @note This will be integrated into the mainline pool, at
* which time the experimental option will go away.
*/
def expHostConnectionBufferSize(size: Int): This =
configured(params[DefaultPool.Param].copy(bufferSize = size))
/**
* Retry (some) failed requests up to `value - 1` times.
*
* Retries are only done if the request failed with something
* known to be safe to retry. This includes [[WriteException WriteExceptions]]
* and [[Failure]]s that are marked [[Failure.Restartable restartable]].
*
* @param value the maximum number of attempts (including retries) that
* can be made.
* - A value of `1` means one attempt and no retries
* on failure.
* - A value of `2` means one attempt and then a
* single retry if the failure is known to be safe to retry.
*
* @note The failures seen in the client will '''not include'''
* application level failures. This is particularly important for
* codecs that include exceptions, such as `Thrift`.
*
* This is only applicable to service-builds (`build()`).
*
* @see [[com.twitter.finagle.service.RetryPolicy.tries]]
*/
def retries(value: Int): This =
retryPolicy(RetryPolicy.tries(value))
/**
* Retry failed requests according to the given [[RetryPolicy]].
*
* @note The failures seen in the client will '''not include'''
* application level failures. This is particularly important for
* codecs that include exceptions, such as `Thrift`.
*
* This is only applicable to service-builds (`build()`).
*/
def retryPolicy(value: RetryPolicy[Try[Nothing]]): This =
configured(Retries(value))
/**
* Sets the TCP send buffer size.
*/
def sendBufferSize(value: Int): This =
configured(params[Transport.BufferSizes].copy(send = Some(value)))
/**
* Sets the TCP recv buffer size.
*/
def recvBufferSize(value: Int): This =
configured(params[Transport.BufferSizes].copy(recv = Some(value)))
/**
* Use the given channel factory instead of the default. Note that
* when using a non-default ChannelFactory, finagle can't
* meaningfully reference count factory usage, and so the caller is
* responsible for calling `releaseExternalResources()`.
*/
def channelFactory(cf: ChannelFactory): This =
configured(Netty3Transporter.ChannelFactory(cf))
/**
* Encrypt the connection with SSL. Hostname verification will be
* provided against the given hostname.
*/
def tls(hostname: String): This = {
configured((Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.client(hostname, inet.getPort)
case _ => Ssl.client()
}))))
.configured(Transporter.TLSHostname(Some(hostname)))
}
/**
* Encrypt the connection with SSL. The Engine to use can be passed into the client.
* This allows the user to use client certificates
* No SSL Hostname Validation is performed
*/
def tls(sslContext: SSLContext): This =
configured((Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.client(sslContext, inet.getHostName, inet.getPort)
case _ => Ssl.client(sslContext)
}))))
/**
* Encrypt the connection with SSL. The Engine to use can be passed into the client.
* This allows the user to use client certificates
* SSL Hostname Validation is performed, on the passed in hostname
*/
def tls(sslContext: SSLContext, hostname: Option[String]): This =
configured((Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.client(sslContext, hostname.getOrElse(inet.getHostName), inet.getPort)
case _ => Ssl.client(sslContext)
}))))
.configured(Transporter.TLSHostname(hostname))
/**
* Do not perform TLS validation. Probably dangerous.
*/
def tlsWithoutValidation(): This =
configured(Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.clientWithoutCertificateValidation(inet.getHostName, inet.getPort)
case _ => Ssl.clientWithoutCertificateValidation()
})))
/**
* Make connections via the given HTTP proxy.
* If this is defined concurrently with socksProxy, the order in which they are applied is undefined.
*/
def httpProxy(httpProxy: SocketAddress): This =
configured(params[Transporter.HttpProxy].copy(sa = Some(httpProxy)))
/**
* For the http proxy use these [[Credentials]] for authentication.
*/
def httpProxyUsernameAndPassword(credentials: Credentials): This =
configured(params[Transporter.HttpProxy].copy(credentials = Some(credentials)))
@deprecated("Use socksProxy(socksProxy: Option[SocketAddress])", "2014-12-02")
def socksProxy(socksProxy: SocketAddress): This =
configured(params[Transporter.SocksProxy].copy(sa = Some(socksProxy)))
/**
* Make connections via the given SOCKS proxy.
* If this is defined concurrently with httpProxy, the order in which they are applied is undefined.
*/
def socksProxy(socksProxy: Option[SocketAddress]): This =
configured(params[Transporter.SocksProxy].copy(sa = socksProxy))
/**
* For the socks proxy use this username for authentication.
* socksPassword and socksProxy must be set as well
*/
def socksUsernameAndPassword(credentials: (String,String)): This =
configured(params[Transporter.SocksProxy].copy(credentials = Some(credentials)))
/**
* Specifies a tracer that receives trace events.
* See [[com.twitter.finagle.tracing]] for details.
*/
@deprecated("Use tracer() instead", "7.0.0")
def tracerFactory(factory: com.twitter.finagle.tracing.Tracer.Factory): This =
tracer(factory())
// API compatibility method
@deprecated("Use tracer() instead", "7.0.0")
def tracerFactory(t: com.twitter.finagle.tracing.Tracer): This =
tracer(t)
/**
* Specifies a tracer that receives trace events.
* See [[com.twitter.finagle.tracing]] for details.
*/
def tracer(t: com.twitter.finagle.tracing.Tracer): This =
configured(Tracer(t))
def monitor(mFactory: String => com.twitter.util.Monitor): This =
configured(MonitorFactory(mFactory))
/**
* Log very detailed debug information to the given logger.
*/
def logger(logger: java.util.logging.Logger): This =
configured(Logger(logger))
/**
* Use the given parameters for failure accrual. The first parameter
* is the number of *successive* failures that are required to mark
* a host failed. The second parameter specifies how long the host
* is dead for, once marked.
*
* To completely disable [[FailureAccrualFactory]] use `noFailureAccrual`.
*/
def failureAccrualParams(pair: (Int, Duration)): This = {
val (numFailures, markDeadFor) = pair
configured(FailureAccrualFactory.Param(numFailures, () => markDeadFor))
}
/**
* Disables [[FailureAccrualFactory]].
*
* To replace the [[FailureAccrualFactory]] use `failureAccrualFactory`.
*/
def noFailureAccrual: This =
configured(FailureAccrualFactory.Disabled)
/**
* Completely replaces the [[FailureAccrualFactory]] from the underlying stack
* with the [[ServiceFactoryWrapper]] returned from the given function `factory`.
*
* To completely disable [[FailureAccrualFactory]] use `noFailureAccrual`.
*/
def failureAccrualFactory(factory: util.Timer => ServiceFactoryWrapper): This =
configured(FailureAccrualFactory.Replaced(factory))
@deprecated(
"No longer experimental: Use failFast()." +
"The new default value is true, so replace .expFailFast(true) with nothing at all",
"5.3.10")
def expFailFast(onOrOff: Boolean): This =
failFast(onOrOff)
/**
* Marks a host dead on connection failure. The host remains dead
* until we successfully connect. Intermediate connection attempts
* *are* respected, but host availability is turned off during the
* reconnection period.
*/
def failFast(onOrOff: Boolean): This =
configured(FailFast(onOrOff))
/**
* When true, the client is daemonized. As with java threads, a
* process can exit only when all remaining clients are daemonized.
* False by default.
*/
def daemon(daemonize: Boolean): This =
configured(Daemonize(daemonize))
/**
* Provide an alternative to putting all request exceptions under
* a "failures" stat. Typical implementations may report any
* cancellations or validation errors separately so success rate
* considers only valid non cancelled requests.
*
* @param exceptionStatsHandler function to record failure details.
*/
def exceptionCategorizer(exceptionStatsHandler: stats.ExceptionStatsHandler): This =
configured(ExceptionStatsHandler(exceptionStatsHandler))
/*** BUILD ***/
// This is only used for client alterations outside of the stack.
// a more ideal usage would be to retrieve the stats param inside your specific module
// instead of using this statsReceiver as it keeps the params closer to where they're used
private[finagle] lazy val statsReceiver = {
val Stats(sr) = params[Stats]
val Label(label) = params[Label]
sr.scope(label)
}
/**
* Construct a ServiceFactory. This is useful for stateful protocols
* (e.g., those that support transactions or authentication).
*/
def buildFactory()(
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ClientBuilder_DOCUMENTATION:
ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]
): ServiceFactory[Req, Rep] = {
val Label(label) = params[Label]
val DestName(dest) = params[DestName]
ClientBuilderClient.newClient(client, dest, label)
}
@deprecated("Used for ABI compat", "5.0.1")
def buildFactory(
THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ClientBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): ServiceFactory[Req, Rep] = buildFactory()(
new ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]{})
/**
* Construct a Service.
*/
def build()(
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ClientBuilder_DOCUMENTATION:
ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]
): Service[Req, Rep] = {
val Label(label) = params[Label]
val DestName(dest) = params[DestName]
ClientBuilderClient.newService(client, dest, label)
}
@deprecated("Used for ABI compat", "5.0.1")
def build(
THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ClientBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): Service[Req, Rep] = build()(
new ClientConfigEvidence[HasCluster, HasCodec, HasHostConnectionLimit]{})
private[this] def validated = {
if (!params.contains[DestName])
throw new IncompleteSpecification("No destination was specified")
this.asInstanceOf[ClientBuilder[Req, Rep, Yes, Yes, Yes]]
}
/**
* Construct a Service, with runtime checks for builder
* completeness.
*/
def unsafeBuild(): Service[Req, Rep] =
validated.build()
/**
* Construct a ServiceFactory, with runtime checks for builder
* completeness.
*/
def unsafeBuildFactory(): ServiceFactory[Req, Rep] =
validated.buildFactory()
}
/**
* A [[com.twitter.finagle.client.StackClient]] which adds the
* filters historically included in `ClientBuilder` clients.
*/
private case class ClientBuilderClient[Req, Rep](
client: StackClient[Req, Rep]
) extends StackClient[Req, Rep] {
def params = client.params
def withParams(ps: Stack.Params) = copy(client.withParams(ps))
def stack = client.stack
def withStack(stack: Stack[ServiceFactory[Req, Rep]]) = copy(client.withStack(stack))
def newClient(dest: Name, label: String) =
ClientBuilderClient.newClient(client, dest, label)
def newService(dest: Name, label: String) =
ClientBuilderClient.newService(client, dest, label)
}
private object ClientBuilderClient {
import ClientConfig._
import com.twitter.finagle.param._
private class RetryingFilterModule[Req, Rep]
extends Stack.Module3[Stats, Retries, Timer, ServiceFactory[Req, Rep]] {
override val role = new Stack.Role("ClientBuilder RetryingFilter")
override val description = "Application-configured retries"
override def make(
statsP: Stats,
retriesP: Retries,
timerP: Timer,
next: ServiceFactory[Req, Rep]
) = {
val Stats(statsReceiver) = statsP
val Retries(policy) = retriesP
val Timer(timer) = timerP
if (policy eq RetryPolicy.Never) next
else {
val retries = new RetryingFilter[Req, Rep](policy, timer, statsReceiver)
retries andThen next
}
}
}
private class StatsFilterModule[Req, Rep]
extends Stack.Module2[Stats, ExceptionStatsHandler, ServiceFactory[Req, Rep]] {
override val role = new Stack.Role("ClientBuilder StatsFilter")
override val description = "Record request stats scoped to 'tries'"
override def make(
statsP: Stats,
exceptionStatsHandlerP: ExceptionStatsHandler,
next: ServiceFactory[Req, Rep]
) = {
val Stats(statsReceiver) = statsP
val ExceptionStatsHandler(categorizer) = exceptionStatsHandlerP
val stats = new StatsFilter[Req, Rep](statsReceiver.scope("tries"), categorizer)
stats andThen next
}
}
private class GlobalTimeoutModule[Req, Rep]
extends Stack.Module2[GlobalTimeout, Timer, ServiceFactory[Req, Rep]] {
override val role = new Stack.Role("ClientBuilder GlobalTimeoutFilter")
override val description = "Application-configured global timeout"
override def make(
globalTimeoutP: GlobalTimeout,
timerP: Timer,
next: ServiceFactory[Req, Rep]
) = {
val GlobalTimeout(timeout) = globalTimeoutP
val Timer(timer) = timerP
if (timeout == Duration.Top) next
else {
val exception = new GlobalRequestTimeoutException(timeout)
val globalTimeout = new TimeoutFilter[Req, Rep](timeout, exception, timer)
globalTimeout andThen next
}
}
}
private class ExceptionSourceFilterModule[Req, Rep]
extends Stack.Module1[Label, ServiceFactory[Req, Rep]] {
override val role = new Stack.Role("ClientBuilder ExceptionSourceFilter")
override val description = "Exception source filter"
override def make(
labelP: Label,
next: ServiceFactory[Req, Rep]
) = {
val Label(label) = labelP
val exceptionSource = new ExceptionSourceFilter[Req, Rep](label)
exceptionSource andThen next
}
}
def newClient[Req, Rep](
client: StackBasedClient[Req, Rep],
dest: Name,
label: String
): ServiceFactory[Req, Rep] = {
val params = client.params
val Daemonize(daemon) = params[Daemonize]
val Logger(logger) = params[Logger]
val MonitorFactory(mFactory) = params[MonitorFactory]
val clientParams = params + Monitor(mFactory(label))
val factory = client.withParams(clientParams).newClient(dest, label)
val exitGuard = if (!daemon) Some(ExitGuard.guard(s"client for '$label'")) else None
new ServiceFactoryProxy[Req, Rep](factory) {
private[this] val closed = new AtomicBoolean(false)
override def close(deadline: Time): Future[Unit] = {
if (!closed.compareAndSet(false, true)) {
logger.log(Level.WARNING, "Close on ServiceFactory called multiple times!",
new Exception/*stack trace please*/)
return Future.exception(new IllegalStateException)
}
super.close(deadline) ensure {
exitGuard.foreach(_.unguard())
}
}
}
}
def newService[Req, Rep](
client0: StackBasedClient[Req, Rep],
dest: Name,
label: String
): Service[Req, Rep] = {
val client =
client0
.transformed(new Stack.Transformer {
def apply[Req, Rep](stack: Stack[ServiceFactory[Req, Rep]]) =
stack
.insertBefore(Requeues.role, new StatsFilterModule[Req, Rep])
.insertBefore(Requeues.role, new RetryingFilterModule[Req, Rep])
.prepend(new GlobalTimeoutModule[Req, Rep])
.prepend(new ExceptionSourceFilterModule[Req, Rep])
})
.configured(FactoryToService.Enabled(true))
val factory = newClient(client, dest, label)
val service: Service[Req, Rep] = new FactoryToService[Req, Rep](factory)
new ServiceProxy[Req, Rep](service) {
private[this] val released = new AtomicBoolean(false)
override def close(deadline: Time): Future[Unit] = {
if (!released.compareAndSet(false, true)) {
val Logger(logger) = client.params[Logger]
logger.log(java.util.logging.Level.WARNING, "Release on Service called multiple times!",
new Exception/*stack trace please*/)
return Future.exception(new IllegalStateException)
}
super.close(deadline)
}
}
}
}
/**
* A [[com.twitter.finagle.client.StackClient]] based on a
* [[com.twitter.finagle.Codec]].
*/
private case class CodecClient[Req, Rep](
codecFactory: CodecFactory[Req, Rep]#Client,
stack: Stack[ServiceFactory[Req, Rep]] = StackClient.newStack[Req, Rep],
params: Stack.Params = ClientConfig.DefaultParams
) extends StackClient[Req, Rep] {
import com.twitter.finagle.param._
def withParams(ps: Stack.Params) = copy(params = ps)
def withStack(stack: Stack[ServiceFactory[Req, Rep]]) = copy(stack = stack)
def newClient(dest: Name, label: String): ServiceFactory[Req, Rep] = {
val codec = codecFactory(ClientCodecConfig(label))
val prepConn = new Stack.Module1[Stats, ServiceFactory[Req, Rep]] {
val role = StackClient.Role.prepConn
val description = "Connection preparation phase as defined by a Codec"
def make(_stats: Stats, next: ServiceFactory[Req, Rep]) = {
val Stats(stats) = _stats
val underlying = codec.prepareConnFactory(next)
new ServiceFactoryProxy(underlying) {
val stat = stats.stat("codec_connection_preparation_latency_ms")
override def apply(conn: ClientConnection) = {
val begin = Time.now
super.apply(conn) ensure {
stat.add((Time.now - begin).inMilliseconds)
}
}
}
}
}
val clientStack = {
val stack0 = stack
.replace(StackClient.Role.prepConn, prepConn)
.replace(StackClient.Role.prepFactory, (next: ServiceFactory[Req, Rep]) =>
codec.prepareServiceFactory(next))
.replace(TraceInitializerFilter.role, codec.newTraceInitializer)
// disable failFast if the codec requests it or it is
// disabled via the ClientBuilder parameter.
val FailFast(failFast) = params[FailFast]
if (!codec.failFastOk || !failFast) stack0.remove(FailFastFactory.role) else stack0
}
case class Client(
stack: Stack[ServiceFactory[Req, Rep]] = clientStack,
params: Stack.Params = params
) extends StdStackClient[Req, Rep, Client] {
protected def copy1(
stack: Stack[ServiceFactory[Req, Rep]] = this.stack,
params: Stack.Params = this.params): Client = copy(stack, params)
protected type In = Any
protected type Out = Any
protected def newTransporter(): Transporter[Any, Any] = {
val Stats(stats) = params[Stats]
val newTransport = (ch: Channel) => codec.newClientTransport(ch, stats)
Netty3Transporter[Any, Any](codec.pipelineFactory,
params + Netty3Transporter.TransportFactory(newTransport))
}
protected def newDispatcher(transport: Transport[In, Out]) =
codec.newClientDispatcher(transport, params)
}
Client().newClient(dest, label)
}
// not called
def newService(dest: Name, label: String): Service[Req, Rep] = ???
}
| kingtang/finagle | finagle-core/src/main/scala/com/twitter/finagle/builder/ClientBuilder.scala | Scala | apache-2.0 | 40,508 |
package perm.tryfuture.exchange
import javafx.application.Application
import javafx.scene.Scene
import javafx.scene.chart.{LineChart, NumberAxis, XYChart}
import javafx.scene.layout.VBox
import javafx.scene.text.Text
import javafx.stage.Stage
object UI {
def main(args: Array[String]): Unit = {
Application.launch(classOf[UI], args: _*)
}
}
class UI extends Application {
private[this] val buysSeries = new XYChart.Series[Number, Number]
private[this] val sellsSeries = new XYChart.Series[Number, Number]
private[this] val newsSeries = new XYChart.Series[Number, Number]
def setNumberOfPositiveNews(time: Long, newsRate: Int): Boolean = {
newsSeries.getData.add(new XYChart.Data(time, newsRate))
}
def setRates(tick: Long, buyRateOpt: Option[BigDecimal], sellRateOpt: Option[BigDecimal]): Unit = {
buyRateOpt.map(buyRate => buysSeries.getData.add(new XYChart.Data(tick, buyRate)))
sellRateOpt.map(sellRate => sellsSeries.getData.add(new XYChart.Data(tick, sellRate)))
println(s"$tick ${buyRateOpt.getOrElse("-")} ${sellRateOpt.getOrElse("-")}")
}
override def start(primaryStage: Stage): Unit = {
primaryStage.setTitle("Akka stock exchange demo")
val vbox = new VBox()
buysSeries.setName("Buys")
sellsSeries.setName("Sells")
newsSeries.setName("News OK?")
val xAxisPrice: NumberAxis = new NumberAxis
val yAxisPrice: NumberAxis = new NumberAxis
xAxisPrice.setLabel("Tick")
yAxisPrice.setLabel("Price")
val xAxisNews: NumberAxis = new NumberAxis
val yAxisNews: NumberAxis = new NumberAxis
xAxisNews.setLabel("Tick")
yAxisNews.setLabel("# of OK news")
val lineChartPrice: LineChart[Number, Number] = new LineChart[Number, Number](xAxisPrice, yAxisPrice)
val lineChartNews: LineChart[Number, Number] = new LineChart[Number, Number](xAxisNews, yAxisNews)
lineChartPrice.getData.add(buysSeries)
lineChartPrice.getData.add(sellsSeries)
lineChartNews.getData.add(newsSeries)
vbox.getChildren.addAll(lineChartPrice, lineChartNews, new Text("Russia, Perm, 2018"))
primaryStage.setScene(new Scene(vbox))
val statisticsCollector = new StatisticsCollector(this)
primaryStage.setOnCloseRequest(_ => statisticsCollector.actorSystem.terminate())
primaryStage.show()
}
}
| ipostanogov/akka-stock-exchange | src/main/scala/perm/tryfuture/exchange/UI.scala | Scala | mit | 2,296 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package docs.scaladsl.services
package implementhelloclient {
import helloservice.HelloService
import com.lightbend.lagom.scaladsl.server.LagomApplication
import com.lightbend.lagom.scaladsl.server.LagomApplicationContext
import play.api.libs.ws.ahc.AhcWSComponents
//#implement-hello-client
abstract class MyApplication(context: LagomApplicationContext)
extends LagomApplication(context)
with AhcWSComponents {
lazy val helloService = serviceClient.implement[HelloService]
}
//#implement-hello-client
}
package helloconsumer {
import akka.NotUsed
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.api.ServiceCall
import helloservice.HelloService
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
trait MyService extends Service {
def sayHelloLagom: ServiceCall[NotUsed, String]
override def descriptor = {
import Service._
named("myservice").withCalls(call(sayHelloLagom))
}
}
//#hello-consumer
class MyServiceImpl(helloService: HelloService)(implicit ec: ExecutionContext) extends MyService {
override def sayHelloLagom = ServiceCall { _ =>
val result: Future[String] =
helloService.sayHello.invoke("Lagom")
result.map { response =>
s"Hello service said: $response"
}
}
}
//#hello-consumer
}
package circuitbreakers {
import com.lightbend.lagom.scaladsl.api.Descriptor
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.api.ServiceCall
trait HelloServiceWithCircuitBreaker extends Service {
def sayHi: ServiceCall[String, String]
def hiAgain: ServiceCall[String, String]
// @formatter:off
//#circuit-breaker
import com.lightbend.lagom.scaladsl.api.CircuitBreaker
def descriptor: Descriptor = {
import Service._
named("hello").withCalls(
namedCall("hi", this.sayHi),
namedCall("hiAgain", this.hiAgain)
.withCircuitBreaker(CircuitBreaker.identifiedBy("hello2"))
)
}
//#circuit-breaker
// @formatter:on
}
}
| ignasi35/lagom | docs/manual/scala/guide/services/code/ServiceClients.scala | Scala | apache-2.0 | 2,201 |
package com.dhruv
import java.io.InputStream
import java.util.Properties
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.mllib.feature.HashingTF
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.classification.NaiveBayes
import edu.stanford.nlp.ling.CoreAnnotations.{LemmaAnnotation, SentencesAnnotation, TokensAnnotation}
import edu.stanford.nlp.pipeline.{Annotation, StanfordCoreNLP}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
object Train {
def main(args: Array[String]) {
if (args.length == 0) {
System.err.println("Usage: " + this.getClass.getSimpleName + " <training file> ")
System.exit(1)
}
val sparkConf = new SparkConf().
setAppName("Twitter Sentiment Analyzer")
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
val sc = new SparkContext(sparkConf)
val stopWords = sc.broadcast(loadStopWords("/stopwords.txt")).value
val allData = sc.textFile(args(0))
val header = allData.first()
val data = allData.filter(x => x != header)
val splits = data.randomSplit(Array(0.8, 0.2), seed = 11L)
val training = splits(0)
val test = splits(1)
def toLabels(line: String) = {
val words = line.split(',')
(words(1), words(3))
}
val hashingTF = new HashingTF(1000)
val training_labeled = training.map(x => toLabels(x)).
map(t => (t._1, Utils.featurize(t._2))).
map(x => new LabeledPoint((x._1).toDouble, x._2))
def time[R](block: => R): R = {
val t0 = System.nanoTime()
val result = block // call-by-name
val t1 = System.nanoTime()
println("\\n\\nElapsed time: " + (t1 - t0)/1000 + "ms")
result
}
println("\\n\\n********* Training **********\\n\\n")
val model = time { NaiveBayes.train(training_labeled, 1.0) }
println("\\n\\n********* Testing **********\\n\\n")
val testing_labeled = test.map(x => toLabels(x)).
map(t => (t._1, Utils.featurize(t._2), t._2)).
map(x => {
val lp = new LabeledPoint((x._1).toDouble, x._2)
(lp, x._3)
})
val predictionAndLabel = time { testing_labeled.map(p => {
val labeledPoint = p._1
val text = p._2
val features = labeledPoint.features
val actual_label = labeledPoint.label
val predicted_label = model.predict(features)
(actual_label, predicted_label, text)
}) }
val accuracy = 1.0 * predictionAndLabel.filter(x => x._1 == x._2).count() / test.count()
println("Training and Testing complete. Accuracy is = " + accuracy)
println("\\nSome Predictions:\\n")
predictionAndLabel.take(10).foreach( x => {
println("---------------------------------------------------------------")
println("Text = " + x._3)
println("Actual Label = " + (if (x._1 == 1) "positive" else "negative"))
println("Predicted Label = " + (if (x._2 == 1) "positive" else "negative"))
println("----------------------------------------------------------------\\n\\n")
} )
if (args.length == 2) {
val savePath = args(1) + "/" + accuracy.toString
model.save(sc, args(1))
println("\\n\\n********* Model saved to: " + savePath + "\\n\\n")
}
sc.stop()
println("\\n\\n********* Stopped Spark Context succesfully, exiting ********")
}
/**
* Methods included for future extension. Some ideas:
* - use stopwords.txt to weed out "the", "in", etc.
* - lemmify text
*/
def createNLPPipeline(): StanfordCoreNLP = {
val props = new Properties()
props.put("annotators", "tokenize, ssplit, pos, lemma")
new StanfordCoreNLP(props)
}
def plainTextToLemmas(text: String, stopWords: Set[String], pipeline: StanfordCoreNLP)
: Seq[String] = {
val doc = new Annotation(text)
pipeline.annotate(doc)
val lemmas = new ArrayBuffer[String]()
val sentences = doc.get(classOf[SentencesAnnotation])
for (sentence <- sentences.asScala;
token <- sentence.get(classOf[TokensAnnotation]).asScala) {
val lemma = token.get(classOf[LemmaAnnotation])
if (lemma.length > 2 && !stopWords.contains(lemma) && isOnlyLetters(lemma)) {
lemmas += lemma.toLowerCase
}
}
lemmas
}
def isOnlyLetters(str: String): Boolean = {
var i = 0
while (i < str.length) {
if (!Character.isLetter(str.charAt(i))) {
return false
}
i += 1
}
true
}
def loadStopWords(path: String) = {
val stream: InputStream = getClass.getResourceAsStream(path)
val lines = scala.io.Source.fromInputStream(stream).getLines
lines.toSet
}
}
| DhruvKumar/spark-twitter-sentiment | src/main/scala/com/dhruv/Train.scala | Scala | apache-2.0 | 4,682 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.v2
import org.apache.spark.sql.{AnalysisException, Row}
import org.apache.spark.sql.execution.command
import org.apache.spark.sql.types.{StringType, StructType}
/**
* The class contains tests for the `SHOW TABLES` command to check V2 table catalogs.
*/
class ShowTablesSuite extends command.ShowTablesSuiteBase with CommandSuiteBase {
override def defaultNamespace: Seq[String] = Nil
override def showSchema: StructType = {
new StructType()
.add("namespace", StringType, nullable = false)
.add("tableName", StringType, nullable = false)
}
override def getRows(showRows: Seq[ShowRow]): Seq[Row] = {
showRows.map {
case ShowRow(namespace, table, _) => Row(namespace, table)
}
}
// The test fails for V1 catalog with the error:
// org.apache.spark.sql.AnalysisException:
// The namespace in session catalog must have exactly one name part: spark_catalog.n1.n2.db
test("show tables in nested namespaces") {
withTable(s"$catalog.n1.n2.db") {
spark.sql(s"CREATE TABLE $catalog.n1.n2.db.table_name (id bigint, data string) $defaultUsing")
runShowTablesSql(
s"SHOW TABLES FROM $catalog.n1.n2.db",
Seq(ShowRow("n1.n2.db", "table_name", false)))
}
}
// The test fails for V1 catalog with the error:
// org.apache.spark.sql.AnalysisException:
// The namespace in session catalog must have exactly one name part: spark_catalog.table
test("using v2 catalog with empty namespace") {
withTable(s"$catalog.table") {
spark.sql(s"CREATE TABLE $catalog.table (id bigint, data string) $defaultUsing")
runShowTablesSql(s"SHOW TABLES FROM $catalog", Seq(ShowRow("", "table", false)))
}
}
// The test fails for V1 catalog with the error:
// org.apache.spark.sql.AnalysisException:
// The namespace in session catalog must have exactly one name part: spark_catalog.ns1.ns2.tbl
test("SHOW TABLE EXTENDED not valid v1 database") {
def testV1CommandNamespace(sqlCommand: String, namespace: String): Unit = {
val e = intercept[AnalysisException] {
sql(sqlCommand)
}
assert(e.message.contains(s"SHOW TABLE EXTENDED is not supported for v2 tables"))
}
val namespace = s"$catalog.ns1.ns2"
val table = "tbl"
withTable(s"$namespace.$table") {
sql(s"CREATE TABLE $namespace.$table (id bigint, data string) " +
s"$defaultUsing PARTITIONED BY (id)")
testV1CommandNamespace(s"SHOW TABLE EXTENDED FROM $namespace LIKE 'tb*'",
namespace)
testV1CommandNamespace(s"SHOW TABLE EXTENDED IN $namespace LIKE 'tb*'",
namespace)
testV1CommandNamespace("SHOW TABLE EXTENDED " +
s"FROM $namespace LIKE 'tb*' PARTITION(id=1)",
namespace)
testV1CommandNamespace("SHOW TABLE EXTENDED " +
s"IN $namespace LIKE 'tb*' PARTITION(id=1)",
namespace)
}
}
// TODO(SPARK-33393): Support SHOW TABLE EXTENDED in DSv2
test("SHOW TABLE EXTENDED: an existing table") {
val table = "people"
withTable(s"$catalog.$table") {
sql(s"CREATE TABLE $catalog.$table (name STRING, id INT) $defaultUsing")
val errMsg = intercept[AnalysisException] {
sql(s"SHOW TABLE EXTENDED FROM $catalog LIKE '*$table*'").collect()
}.getMessage
assert(errMsg.contains("SHOW TABLE EXTENDED is not supported for v2 tables"))
}
}
}
| witgo/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala | Scala | apache-2.0 | 4,215 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.concurrent.atomic.AtomicReference
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import org.apache.spark.{ExecutorAllocationClient, SparkConf, SparkContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config
import org.apache.spark.util.{Clock, SystemClock, Utils}
/**
* BlacklistTracker is designed to track problematic executors and nodes. It supports blacklisting
* executors and nodes across an entire application (with a periodic expiry). TaskSetManagers add
* additional blacklisting of executors and nodes for individual tasks and stages which works in
* concert with the blacklisting here.
*
* The tracker needs to deal with a variety of workloads, eg.:
*
* * bad user code -- this may lead to many task failures, but that should not count against
* individual executors
* * many small stages -- this may prevent a bad executor for having many failures within one
* stage, but still many failures over the entire application
* * "flaky" executors -- they don't fail every task, but are still faulty enough to merit
* blacklisting
*
* See the design doc on SPARK-8425 for a more in-depth discussion.
*
* THREADING: As with most helpers of TaskSchedulerImpl, this is not thread-safe. Though it is
* called by multiple threads, callers must already have a lock on the TaskSchedulerImpl. The
* one exception is [[nodeBlacklist()]], which can be called without holding a lock.
*/
private[scheduler] class BlacklistTracker (
private val listenerBus: LiveListenerBus,
conf: SparkConf,
allocationClient: Option[ExecutorAllocationClient],
clock: Clock = new SystemClock()) extends Logging {
def this(sc: SparkContext, allocationClient: Option[ExecutorAllocationClient]) = {
this(sc.listenerBus, sc.conf, allocationClient)
}
BlacklistTracker.validateBlacklistConfs(conf)
private val MAX_FAILURES_PER_EXEC = conf.get(config.MAX_FAILURES_PER_EXEC)
private val MAX_FAILED_EXEC_PER_NODE = conf.get(config.MAX_FAILED_EXEC_PER_NODE)
val BLACKLIST_TIMEOUT_MILLIS = BlacklistTracker.getBlacklistTimeout(conf)
private val BLACKLIST_FETCH_FAILURE_ENABLED = conf.get(config.BLACKLIST_FETCH_FAILURE_ENABLED)
/**
* A map from executorId to information on task failures. Tracks the time of each task failure,
* so that we can avoid blacklisting executors due to failures that are very far apart. We do not
* actively remove from this as soon as tasks hit their timeouts, to avoid the time it would take
* to do so. But it will not grow too large, because as soon as an executor gets too many
* failures, we blacklist the executor and remove its entry here.
*/
private val executorIdToFailureList = new HashMap[String, ExecutorFailureList]()
val executorIdToBlacklistStatus = new HashMap[String, BlacklistedExecutor]()
val nodeIdToBlacklistExpiryTime = new HashMap[String, Long]()
/**
* An immutable copy of the set of nodes that are currently blacklisted. Kept in an
* AtomicReference to make [[nodeBlacklist()]] thread-safe.
*/
private val _nodeBlacklist = new AtomicReference[Set[String]](Set())
/**
* Time when the next blacklist will expire. Used as a
* shortcut to avoid iterating over all entries in the blacklist when none will have expired.
*/
var nextExpiryTime: Long = Long.MaxValue
/**
* Mapping from nodes to all of the executors that have been blacklisted on that node. We do *not*
* remove from this when executors are removed from spark, so we can track when we get multiple
* successive blacklisted executors on one node. Nonetheless, it will not grow too large because
* there cannot be many blacklisted executors on one node, before we stop requesting more
* executors on that node, and we clean up the list of blacklisted executors once an executor has
* been blacklisted for BLACKLIST_TIMEOUT_MILLIS.
*/
val nodeToBlacklistedExecs = new HashMap[String, HashSet[String]]()
/**
* Un-blacklists executors and nodes that have been blacklisted for at least
* BLACKLIST_TIMEOUT_MILLIS
*/
def applyBlacklistTimeout(): Unit = {
val now = clock.getTimeMillis()
// quickly check if we've got anything to expire from blacklist -- if not, avoid doing any work
if (now > nextExpiryTime) {
// Apply the timeout to blacklisted nodes and executors
val execsToUnblacklist = executorIdToBlacklistStatus.filter(_._2.expiryTime < now).keys
if (execsToUnblacklist.nonEmpty) {
// Un-blacklist any executors that have been blacklisted longer than the blacklist timeout.
logInfo(s"Removing executors $execsToUnblacklist from blacklist because the blacklist " +
s"for those executors has timed out")
execsToUnblacklist.foreach { exec =>
val status = executorIdToBlacklistStatus.remove(exec).get
val failedExecsOnNode = nodeToBlacklistedExecs(status.node)
listenerBus.post(SparkListenerExecutorUnblacklisted(now, exec))
failedExecsOnNode.remove(exec)
if (failedExecsOnNode.isEmpty) {
nodeToBlacklistedExecs.remove(status.node)
}
}
}
val nodesToUnblacklist = nodeIdToBlacklistExpiryTime.filter(_._2 < now).keys
if (nodesToUnblacklist.nonEmpty) {
// Un-blacklist any nodes that have been blacklisted longer than the blacklist timeout.
logInfo(s"Removing nodes $nodesToUnblacklist from blacklist because the blacklist " +
s"has timed out")
nodesToUnblacklist.foreach { node =>
nodeIdToBlacklistExpiryTime.remove(node)
listenerBus.post(SparkListenerNodeUnblacklisted(now, node))
}
_nodeBlacklist.set(nodeIdToBlacklistExpiryTime.keySet.toSet)
}
updateNextExpiryTime()
}
}
private def updateNextExpiryTime(): Unit = {
val execMinExpiry = if (executorIdToBlacklistStatus.nonEmpty) {
executorIdToBlacklistStatus.map{_._2.expiryTime}.min
} else {
Long.MaxValue
}
val nodeMinExpiry = if (nodeIdToBlacklistExpiryTime.nonEmpty) {
nodeIdToBlacklistExpiryTime.values.min
} else {
Long.MaxValue
}
nextExpiryTime = math.min(execMinExpiry, nodeMinExpiry)
}
private def killExecutor(exec: String, msg: String): Unit = {
allocationClient match {
case Some(a) =>
logInfo(msg)
a.killExecutors(Seq(exec), adjustTargetNumExecutors = false, countFailures = false,
force = true)
case None =>
logInfo(s"Not attempting to kill blacklisted executor id $exec " +
s"since allocation client is not defined.")
}
}
private def killBlacklistedExecutor(exec: String): Unit = {
if (conf.get(config.BLACKLIST_KILL_ENABLED)) {
killExecutor(exec,
s"Killing blacklisted executor id $exec since ${config.BLACKLIST_KILL_ENABLED.key} is set.")
}
}
private[scheduler] def killBlacklistedIdleExecutor(exec: String): Unit = {
killExecutor(exec,
s"Killing blacklisted idle executor id $exec because of task unschedulability and trying " +
"to acquire a new executor.")
}
private def killExecutorsOnBlacklistedNode(node: String): Unit = {
if (conf.get(config.BLACKLIST_KILL_ENABLED)) {
allocationClient match {
case Some(a) =>
logInfo(s"Killing all executors on blacklisted host $node " +
s"since ${config.BLACKLIST_KILL_ENABLED.key} is set.")
if (a.killExecutorsOnHost(node) == false) {
logError(s"Killing executors on node $node failed.")
}
case None =>
logWarning(s"Not attempting to kill executors on blacklisted host $node " +
s"since allocation client is not defined.")
}
}
}
def updateBlacklistForFetchFailure(host: String, exec: String): Unit = {
if (BLACKLIST_FETCH_FAILURE_ENABLED) {
// If we blacklist on fetch failures, we are implicitly saying that we believe the failure is
// non-transient, and can't be recovered from (even if this is the first fetch failure,
// stage is retried after just one failure, so we don't always get a chance to collect
// multiple fetch failures).
// If the external shuffle-service is on, then every other executor on this node would
// be suffering from the same issue, so we should blacklist (and potentially kill) all
// of them immediately.
val now = clock.getTimeMillis()
val expiryTimeForNewBlacklists = now + BLACKLIST_TIMEOUT_MILLIS
if (conf.get(config.SHUFFLE_SERVICE_ENABLED)) {
if (!nodeIdToBlacklistExpiryTime.contains(host)) {
logInfo(s"blacklisting node $host due to fetch failure of external shuffle service")
nodeIdToBlacklistExpiryTime.put(host, expiryTimeForNewBlacklists)
listenerBus.post(SparkListenerNodeBlacklisted(now, host, 1))
_nodeBlacklist.set(nodeIdToBlacklistExpiryTime.keySet.toSet)
killExecutorsOnBlacklistedNode(host)
updateNextExpiryTime()
}
} else if (!executorIdToBlacklistStatus.contains(exec)) {
logInfo(s"Blacklisting executor $exec due to fetch failure")
executorIdToBlacklistStatus.put(exec, BlacklistedExecutor(host, expiryTimeForNewBlacklists))
// We hardcoded number of failure tasks to 1 for fetch failure, because there's no
// reattempt for such failure.
listenerBus.post(SparkListenerExecutorBlacklisted(now, exec, 1))
updateNextExpiryTime()
killBlacklistedExecutor(exec)
val blacklistedExecsOnNode = nodeToBlacklistedExecs.getOrElseUpdate(host, HashSet[String]())
blacklistedExecsOnNode += exec
}
}
}
def updateBlacklistForSuccessfulTaskSet(
stageId: Int,
stageAttemptId: Int,
failuresByExec: HashMap[String, ExecutorFailuresInTaskSet]): Unit = {
// if any tasks failed, we count them towards the overall failure count for the executor at
// this point.
val now = clock.getTimeMillis()
failuresByExec.foreach { case (exec, failuresInTaskSet) =>
val appFailuresOnExecutor =
executorIdToFailureList.getOrElseUpdate(exec, new ExecutorFailureList)
appFailuresOnExecutor.addFailures(stageId, stageAttemptId, failuresInTaskSet)
appFailuresOnExecutor.dropFailuresWithTimeoutBefore(now)
val newTotal = appFailuresOnExecutor.numUniqueTaskFailures
val expiryTimeForNewBlacklists = now + BLACKLIST_TIMEOUT_MILLIS
// If this pushes the total number of failures over the threshold, blacklist the executor.
// If its already blacklisted, we avoid "re-blacklisting" (which can happen if there were
// other tasks already running in another taskset when it got blacklisted), because it makes
// some of the logic around expiry times a little more confusing. But it also wouldn't be a
// problem to re-blacklist, with a later expiry time.
if (newTotal >= MAX_FAILURES_PER_EXEC && !executorIdToBlacklistStatus.contains(exec)) {
logInfo(s"Blacklisting executor id: $exec because it has $newTotal" +
s" task failures in successful task sets")
val node = failuresInTaskSet.node
executorIdToBlacklistStatus.put(exec, BlacklistedExecutor(node, expiryTimeForNewBlacklists))
listenerBus.post(SparkListenerExecutorBlacklisted(now, exec, newTotal))
executorIdToFailureList.remove(exec)
updateNextExpiryTime()
killBlacklistedExecutor(exec)
// In addition to blacklisting the executor, we also update the data for failures on the
// node, and potentially put the entire node into a blacklist as well.
val blacklistedExecsOnNode = nodeToBlacklistedExecs.getOrElseUpdate(node, HashSet[String]())
blacklistedExecsOnNode += exec
// If the node is already in the blacklist, we avoid adding it again with a later expiry
// time.
if (blacklistedExecsOnNode.size >= MAX_FAILED_EXEC_PER_NODE &&
!nodeIdToBlacklistExpiryTime.contains(node)) {
logInfo(s"Blacklisting node $node because it has ${blacklistedExecsOnNode.size} " +
s"executors blacklisted: ${blacklistedExecsOnNode}")
nodeIdToBlacklistExpiryTime.put(node, expiryTimeForNewBlacklists)
listenerBus.post(SparkListenerNodeBlacklisted(now, node, blacklistedExecsOnNode.size))
_nodeBlacklist.set(nodeIdToBlacklistExpiryTime.keySet.toSet)
killExecutorsOnBlacklistedNode(node)
}
}
}
}
def isExecutorBlacklisted(executorId: String): Boolean = {
executorIdToBlacklistStatus.contains(executorId)
}
/**
* Get the full set of nodes that are blacklisted. Unlike other methods in this class, this *IS*
* thread-safe -- no lock required on a taskScheduler.
*/
def nodeBlacklist(): Set[String] = {
_nodeBlacklist.get()
}
def isNodeBlacklisted(node: String): Boolean = {
nodeIdToBlacklistExpiryTime.contains(node)
}
def handleRemovedExecutor(executorId: String): Unit = {
// We intentionally do not clean up executors that are already blacklisted in
// nodeToBlacklistedExecs, so that if another executor on the same node gets blacklisted, we can
// blacklist the entire node. We also can't clean up executorIdToBlacklistStatus, so we can
// eventually remove the executor after the timeout. Despite not clearing those structures
// here, we don't expect they will grow too big since you won't get too many executors on one
// node, and the timeout will clear it up periodically in any case.
executorIdToFailureList -= executorId
}
/**
* Tracks all failures for one executor (that have not passed the timeout).
*
* In general we actually expect this to be extremely small, since it won't contain more than the
* maximum number of task failures before an executor is failed (default 2).
*/
private[scheduler] final class ExecutorFailureList extends Logging {
private case class TaskId(stage: Int, stageAttempt: Int, taskIndex: Int)
/**
* All failures on this executor in successful task sets.
*/
private var failuresAndExpiryTimes = ArrayBuffer[(TaskId, Long)]()
/**
* As an optimization, we track the min expiry time over all entries in failuresAndExpiryTimes
* so its quick to tell if there are any failures with expiry before the current time.
*/
private var minExpiryTime = Long.MaxValue
def addFailures(
stage: Int,
stageAttempt: Int,
failuresInTaskSet: ExecutorFailuresInTaskSet): Unit = {
failuresInTaskSet.taskToFailureCountAndFailureTime.foreach {
case (taskIdx, (_, failureTime)) =>
val expiryTime = failureTime + BLACKLIST_TIMEOUT_MILLIS
failuresAndExpiryTimes += ((TaskId(stage, stageAttempt, taskIdx), expiryTime))
if (expiryTime < minExpiryTime) {
minExpiryTime = expiryTime
}
}
}
/**
* The number of unique tasks that failed on this executor. Only counts failures within the
* timeout, and in successful tasksets.
*/
def numUniqueTaskFailures: Int = failuresAndExpiryTimes.size
def isEmpty: Boolean = failuresAndExpiryTimes.isEmpty
/**
* Apply the timeout to individual tasks. This is to prevent one-off failures that are very
* spread out in time (and likely have nothing to do with problems on the executor) from
* triggering blacklisting. However, note that we do *not* remove executors and nodes from
* the blacklist as we expire individual task failures -- each have their own timeout. Eg.,
* suppose:
* * timeout = 10, maxFailuresPerExec = 2
* * Task 1 fails on exec 1 at time 0
* * Task 2 fails on exec 1 at time 5
* --> exec 1 is blacklisted from time 5 - 15.
* This is to simplify the implementation, as well as keep the behavior easier to understand
* for the end user.
*/
def dropFailuresWithTimeoutBefore(dropBefore: Long): Unit = {
if (minExpiryTime < dropBefore) {
var newMinExpiry = Long.MaxValue
val newFailures = new ArrayBuffer[(TaskId, Long)]
failuresAndExpiryTimes.foreach { case (task, expiryTime) =>
if (expiryTime >= dropBefore) {
newFailures += ((task, expiryTime))
if (expiryTime < newMinExpiry) {
newMinExpiry = expiryTime
}
}
}
failuresAndExpiryTimes = newFailures
minExpiryTime = newMinExpiry
}
}
override def toString(): String = {
s"failures = $failuresAndExpiryTimes"
}
}
}
private[spark] object BlacklistTracker extends Logging {
private val DEFAULT_TIMEOUT = "1h"
/**
* Returns true if the blacklist is enabled, based on checking the configuration in the following
* order:
* 1. Is it specifically enabled or disabled?
* 2. Is it enabled via the legacy timeout conf?
* 3. Default is off
*/
def isBlacklistEnabled(conf: SparkConf): Boolean = {
conf.get(config.BLACKLIST_ENABLED) match {
case Some(enabled) =>
enabled
case None =>
// if they've got a non-zero setting for the legacy conf, always enable the blacklist,
// otherwise, use the default.
val legacyKey = config.BLACKLIST_LEGACY_TIMEOUT_CONF.key
conf.get(config.BLACKLIST_LEGACY_TIMEOUT_CONF).exists { legacyTimeout =>
if (legacyTimeout == 0) {
logWarning(s"Turning off blacklisting due to legacy configuration: $legacyKey == 0")
false
} else {
logWarning(s"Turning on blacklisting due to legacy configuration: $legacyKey > 0")
true
}
}
}
}
def getBlacklistTimeout(conf: SparkConf): Long = {
conf.get(config.BLACKLIST_TIMEOUT_CONF).getOrElse {
conf.get(config.BLACKLIST_LEGACY_TIMEOUT_CONF).getOrElse {
Utils.timeStringAsMs(DEFAULT_TIMEOUT)
}
}
}
/**
* Verify that blacklist configurations are consistent; if not, throw an exception. Should only
* be called if blacklisting is enabled.
*
* The configuration for the blacklist is expected to adhere to a few invariants. Default
* values follow these rules of course, but users may unwittingly change one configuration
* without making the corresponding adjustment elsewhere. This ensures we fail-fast when
* there are such misconfigurations.
*/
def validateBlacklistConfs(conf: SparkConf): Unit = {
def mustBePos(k: String, v: String): Unit = {
throw new IllegalArgumentException(s"$k was $v, but must be > 0.")
}
Seq(
config.MAX_TASK_ATTEMPTS_PER_EXECUTOR,
config.MAX_TASK_ATTEMPTS_PER_NODE,
config.MAX_FAILURES_PER_EXEC_STAGE,
config.MAX_FAILED_EXEC_PER_NODE_STAGE,
config.MAX_FAILURES_PER_EXEC,
config.MAX_FAILED_EXEC_PER_NODE
).foreach { config =>
val v = conf.get(config)
if (v <= 0) {
mustBePos(config.key, v.toString)
}
}
val timeout = getBlacklistTimeout(conf)
if (timeout <= 0) {
// first, figure out where the timeout came from, to include the right conf in the message.
conf.get(config.BLACKLIST_TIMEOUT_CONF) match {
case Some(t) =>
mustBePos(config.BLACKLIST_TIMEOUT_CONF.key, timeout.toString)
case None =>
mustBePos(config.BLACKLIST_LEGACY_TIMEOUT_CONF.key, timeout.toString)
}
}
val maxTaskFailures = conf.get(config.TASK_MAX_FAILURES)
val maxNodeAttempts = conf.get(config.MAX_TASK_ATTEMPTS_PER_NODE)
if (maxNodeAttempts >= maxTaskFailures) {
throw new IllegalArgumentException(s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key} " +
s"( = ${maxNodeAttempts}) was >= ${config.TASK_MAX_FAILURES.key} " +
s"( = ${maxTaskFailures} ). Though blacklisting is enabled, with this configuration, " +
s"Spark will not be robust to one bad node. Decrease " +
s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key}, increase ${config.TASK_MAX_FAILURES.key}, " +
s"or disable blacklisting with ${config.BLACKLIST_ENABLED.key}")
}
}
}
private final case class BlacklistedExecutor(node: String, expiryTime: Long)
| pgandhi999/spark | core/src/main/scala/org/apache/spark/scheduler/BlacklistTracker.scala | Scala | apache-2.0 | 21,141 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.core
private class MutableHandlerMap extends HandlerMap {
@volatile
private var handlers = Map[Symbol, Handler]()
def hasHandlers = !handlers.isEmpty
def isEmpty = handlers.isEmpty
def isMethodAllowed(method: Symbol) = handlers.contains(method)
def supportedMethods = handlers.keySet.map(_.name)
def apply(method: Symbol) = handlers(method)
def put(method: Symbol, handler: Handler) {
handlers = handlers + (method -> handler)
}
def putAll(source: MutableHandlerMap) {
handlers = handlers ++ source.handlers
}
def addMissingHandlers(source: MutableHandlerMap) {
for (handler <- source.handlers if ! handlers.contains(handler._1))
handlers = handlers + handler
}
override def toString = handlers.toString
} | sptz45/coeus | src/main/scala/com/tzavellas/coeus/core/MutableHandlerMap.scala | Scala | apache-2.0 | 935 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.mocks
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito.when
import org.mockito.stubbing.OngoingStubbing
import services.TimeService
import uk.gov.hmrc.time.workingdays.{BankHoliday, BankHolidaySet}
import java.time.{LocalDate, LocalDateTime}
trait TimeServiceMock {
import org.joda.time.{LocalDate => JodaLocalDate, LocalDateTime => JodaLocalDateTime}
val mockTimeService: TimeService
def mockAllTimeService(date: LocalDateTime, minAdditionalDayInFuture: Int): OngoingStubbing[BankHolidaySet] = {
val ldt = JodaLocalDateTime.parse(date.toString)
val ld = ldt.toLocalDate
when(mockTimeService.currentLocalDate)
.thenReturn(ld)
when(mockTimeService.currentDateTime)
.thenReturn(ldt)
when(mockTimeService.dayEndHour)
.thenReturn(14)
when(mockTimeService.getMinWorkingDayInFuture(any()))
.thenReturn(LocalDate.parse(ld.plusDays(minAdditionalDayInFuture).toString))
when(mockTimeService.addMonths(any()))
.thenReturn(LocalDate.parse(ld.plusMonths(3).toString))
when(mockTimeService.minusYears(any()))
.thenReturn(LocalDate.parse(ld.minusYears(4).toString))
when(mockTimeService.bankHolidaySet)
.thenReturn(BankHolidaySet("england-and-wales", List(
BankHoliday(title = "Good Friday", date = new JodaLocalDate(2017, 4, 14)),
BankHoliday(title = "Easter Monday", date = new JodaLocalDate(2017, 4, 17)),
BankHoliday(title = "Early May bank holiday", date = new JodaLocalDate(2017, 5, 1)),
BankHoliday(title = "Spring bank holiday", date = new JodaLocalDate(2017, 5, 29)),
BankHoliday(title = "Summer bank holiday", date = new JodaLocalDate(2017, 8, 28)),
BankHoliday(title = "Christmas Day", date = new JodaLocalDate(2017, 12, 25)),
BankHoliday(title = "Boxing Day", date = new JodaLocalDate(2017, 12, 26)),
BankHoliday(title = "New Year's Day", date = new JodaLocalDate(2018, 1, 1))
)))
}
def mockToday(todaysDate: LocalDate): OngoingStubbing[LocalDate] =
when(mockTimeService.today).thenReturn(todaysDate)
}
| hmrc/vat-registration-frontend | test/services/mocks/TimeServiceMock.scala | Scala | apache-2.0 | 2,712 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import org.specs._
import org.apache.hadoop.conf.Configuration
class MultiTsvInputJob(args: Args) extends Job(args) {
try {
MultipleTsvFiles(List("input0", "input1"), ('query, 'queryStats)).read.write(Tsv("output0"))
} catch {
case e: Exception => e.printStackTrace()
}
}
class SequenceFileInputJob(args: Args) extends Job(args) {
try {
SequenceFile("input0").read.write(SequenceFile("output0"))
WritableSequenceFile("input1", ('query, 'queryStats)).read.write(WritableSequenceFile("output1", ('query, 'queryStats)))
} catch {
case e: Exception => e.printStackTrace()
}
}
class FileSourceTest extends Specification {
noDetailedDiffs()
import Dsl._
"A MultipleTsvFile Source" should {
JobTest(new MultiTsvInputJob(_)).
source(MultipleTsvFiles(List("input0", "input1"), ('query, 'queryStats)),
List(("foobar", 1), ("helloworld", 2))).
sink[(String, Int)](Tsv("output0")) {
outBuf =>
"take multiple Tsv files as input sources" in {
outBuf.length must be_==(2)
outBuf.toList must be_==(List(("foobar", 1), ("helloworld", 2)))
}
}
.run
.finish
}
"A WritableSequenceFile Source" should {
JobTest(new SequenceFileInputJob(_)).
source(SequenceFile("input0"),
List(("foobar0", 1), ("helloworld0", 2))).
source(WritableSequenceFile("input1", ('query, 'queryStats)),
List(("foobar1", 1), ("helloworld1", 2))).
sink[(String, Int)](SequenceFile("output0")) {
outBuf =>
"sequence file input" in {
outBuf.length must be_==(2)
outBuf.toList must be_==(List(("foobar0", 1), ("helloworld0", 2)))
}
}
.sink[(String, Int)](WritableSequenceFile("output1", ('query, 'queryStats))) {
outBuf =>
"writable sequence file input" in {
outBuf.length must be_==(2)
outBuf.toList must be_==(List(("foobar1", 1), ("helloworld1", 2)))
}
}
.run
.finish
}
"TextLine.toIterator" should {
"correctly read strings" in {
TextLine("../tutorial/data/hello.txt").toIterator(Config.default, Local(true)).toList must be_==(
List("Hello world", "Goodbye world"))
}
}
/**
* The layout of the test data looks like this:
*
* /test_data/2013/03 (dir with a single data file in it)
* /test_data/2013/03/2013-03.txt
*
* /test_data/2013/04 (dir with a single data file and a _SUCCESS file)
* /test_data/2013/04/2013-04.txt
* /test_data/2013/04/_SUCCESS
*
* /test_data/2013/05 (empty dir)
*
* /test_data/2013/06 (dir with only a _SUCCESS file)
* /test_data/2013/06/_SUCCESS
*/
"default pathIsGood" should {
import TestFileSource.pathIsGood
"accept a directory with data in it" in {
pathIsGood("test_data/2013/03/") must be_==(true)
pathIsGood("test_data/2013/03/*") must be_==(true)
}
"accept a directory with data and _SUCCESS in it" in {
pathIsGood("test_data/2013/04/") must be_==(true)
pathIsGood("test_data/2013/04/*") must be_==(true)
}
"reject an empty directory" in {
pathIsGood("test_data/2013/05/") must be_==(false)
pathIsGood("test_data/2013/05/*") must be_==(false)
}
"reject a directory with only _SUCCESS when specified as a glob" in {
pathIsGood("test_data/2013/06/*") must be_==(false)
}
"accept a directory with only _SUCCESS when specified without a glob" in {
pathIsGood("test_data/2013/06/") must be_==(true)
}
}
"success file source pathIsGood" should {
import TestSuccessFileSource.pathIsGood
"reject a directory with data in it but no _SUCCESS file" in {
pathIsGood("test_data/2013/03/") must be_==(false)
pathIsGood("test_data/2013/03/*") must be_==(false)
}
"accept a directory with data and _SUCCESS in it when specified as a glob" in {
pathIsGood("test_data/2013/04/*") must be_==(true)
}
"reject a directory with data and _SUCCESS in it when specified without a glob" in {
pathIsGood("test_data/2013/04/") must be_==(false)
}
"reject an empty directory" in {
pathIsGood("test_data/2013/05/") must be_==(false)
pathIsGood("test_data/2013/05/*") must be_==(false)
}
"reject a directory with only _SUCCESS when specified as a glob" in {
pathIsGood("test_data/2013/06/*") must be_==(false)
}
"reject a directory with only _SUCCESS when specified without a glob" in {
pathIsGood("test_data/2013/06/") must be_==(false)
}
}
}
object TestPath {
def getCurrentDirectory = new java.io.File(".").getCanonicalPath
def prefix = getCurrentDirectory.split("/").last match {
case "scalding-core" => getCurrentDirectory
case _ => getCurrentDirectory + "/scalding-core"
}
val testfsPathRoot = prefix + "/src/test/resources/com/twitter/scalding/test_filesystem/"
}
object TestFileSource extends FileSource {
import TestPath.testfsPathRoot
override def hdfsPaths: Iterable[String] = Iterable.empty
override def localPath: String = ""
val conf = new Configuration()
def pathIsGood(p: String) = super.pathIsGood(testfsPathRoot + p, conf)
}
object TestSuccessFileSource extends FileSource with SuccessFileSource {
import TestPath.testfsPathRoot
override def hdfsPaths: Iterable[String] = Iterable.empty
override def localPath: String = ""
val conf = new Configuration()
def pathIsGood(p: String) = super.pathIsGood(testfsPathRoot + p, conf)
}
| zirpins/scalding | scalding-core/src/test/scala/com/twitter/scalding/FileSourceTest.scala | Scala | apache-2.0 | 6,237 |
import org.apache.spark.rdd.RDD
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization._
import scala.util.parsing.json.JSON
object NotsFilter {
def filterText(input: RDD[String]): RDD[String] = input.map(x => JSON.parseFull(x)
.asInstanceOf[Some[Map[String, Any]]].getOrElse(Map[String, Any]())).filter(x => {
val nots = x.get("nots").asInstanceOf[Some[List[String]]].getOrElse(List[String]()).map(x=>x.toLowerCase())
val text = x.get("text").asInstanceOf[Some[String]].getOrElse("")
val resultArray = nots.map(x => {
!(text.toLowerCase.contains(x))
})
val result = resultArray.foldLeft(true)(_ & _)
result
}).map(x => {
implicit val formats = Serialization.formats(NoTypeHints)
write(x)
})
def filterTextNoJSONOut(input: RDD[String]): RDD[Map[String, Any]] = input.map(x => JSON.parseFull(x)
.asInstanceOf[Some[Map[String, Any]]].getOrElse(Map[String, Any]())).filter(x => {
val nots = x.get("nots").asInstanceOf[Some[List[String]]].getOrElse(List[String]()).map(x=>x.toLowerCase())
val text = x.get("text").asInstanceOf[Some[String]].getOrElse("")
val resultArray = nots.map(x => {
!(text.toLowerCase.contains(x))
})
val result = resultArray.foldLeft(true)(_ & _)
result
})
}
| canademar/me_extractors | BRMDemo_MarchMeeting/src/main/scala/NotsFilter.scala | Scala | gpl-2.0 | 1,329 |
package playchat.api
// External imports
import akka.actor.Actor
import akka.actor.ActorRef
// Internal imports
import playchat.models._
/**
* Represents an actor capable to originate and reply to «ham» messages
*/
abstract class UserActor extends BaseActor | MrBogomips/PlayChat | app/playchat/api/UserActor.scala | Scala | gpl-2.0 | 264 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import akka.actor.ActorSystem
import akka.serialization.{Serializer, SerializationExtension, Serialization}
import cmwell.domain._
import cmwell.domain.FString
import cmwell.rts.{Rule, Publisher}
import k.grid.{GridConnection, Grid}
import scala.io.StdIn
/**
* Created by markz on 7/10/14.
*/
object RTSPub extends App {
// java -Done-jar.main.class=RTSPub -jar cmwell-rts-ng_2.10-1.2.1-SNAPSHOT-one-jar.jar
val ip : String = "127.0.0.1"
// start grid
//Grid.roles = Set("publisher")
Grid.setGridConnection(GridConnection(memberName = "rts", hostName = ip, seeds = Set("127.0.0.1:2551"), port = 0))
Grid.joinClient
//Grid.join(Set("127.0.0.1"),Set("subscriber"))
Publisher.init
// scalastyle:off
println("Press enter publisher.")
// scalastyle:on
StdIn.readLine()
// scalastyle:off
println("-----------------------------------------")
// scalastyle:on
(1 to 9).foreach{ i =>
val m : Map[String , Set[FieldValue]]= Map("name" -> Set(FString("gal"), FString("yoav")), "types" -> Set(FString("123"), FInt(123)))
val ii = ObjectInfoton("/cmt/cm/command-test/objinfo_" + i,"dc_test", None, m)
Publisher.publish(Vector(ii))
}
// scalastyle:off
println("-----------------------------------------")
// scalastyle:on
StdIn.readLine()
Grid.shutdown
}
| hochgi/CM-Well | server/cmwell-rts/src/test/scala/RTSPub.scala | Scala | apache-2.0 | 1,920 |
package com.daxin
class ApplyDemo {
}
//通常我们会在类的伴生对象中定义apply方法,当遇到类名(参数1,...参数n)时apply方法会被调用
object ApplyDemo {
//apply方法定义位置: 通常我们会在类的伴生对象中定义apply方法
//apply方法调用时机: 当遇到类名(参数1,...参数n)时apply方法会被调用
//apply方法定义位置: 通常我们会在类的伴生对象中定义apply方法
def apply(): Unit = {
println("this is def apply() ...")
}
//apply方法定义位置: 通常我们会在类的伴生对象中定义apply方法
def apply(name: String): Unit = {
println("this is def apply(name:String) ...")
}
//apply方法定义位置: 通常我们会在类的伴生对象中定义apply方法
def apply(name: String, age: Int): Unit = {
println("this is def apply(name:String,age:Int) ...")
}
def main(args: Array[String]): Unit = {
//app1 是ApplyDemo的单例对象
val app1 = ApplyDemo
//apply方法调用时机: 当遇到类名(参数1,...参数n)时apply方法会被调用
println("app1 : " + app1) //打印的是对象的hashcode
//
val app2 = ApplyDemo()
//apply方法调用时机: 当遇到类名(参数1,...参数n)时apply方法会被调用
println("app2 : " + app2) //调用了apply方法
val app3 = ApplyDemo("Dax1n")
//apply方法调用时机: 当遇到类名(参数1,...参数n)时apply方法会被调用
println("app3 : " + app3)
val app4 = ApplyDemo("Dax1n", 18)
//apply方法调用时机: 当遇到类名(参数1,...参数n)时apply方法会被调用
println("app4 : " + app4)
println("***************************************************************")
//调用了Array伴生对象的apply方法
//def apply(x: Int, xs: Int*): Array[Int]
//arr1中只有一个元素5
val arr1 = Array(5)//在apply中new Array
println(arr1.toBuffer)
//new了一个长度为5的array,数组里面包含5个null
var arr2 = new Array(5)
}
} | Dax1n/Scala | ApplyMethodDemo/src/com/daxin/ApplyDemo.scala | Scala | apache-2.0 | 2,130 |
package me.jeffshaw.scalaz.stream
import scalaz.concurrent.Task
import scalaz.stream.Process
object IteratorConstructors {
/**
* Create a Process from an iterator. This should not be used directly,
* because iterators are mutable.
*/
private [stream] def iteratorGo[O](iterator: Iterator[O]): Process[Task, O] = {
val hasNext = Task delay { iterator.hasNext }
val next = Task delay { iterator.next() }
def go: Process[Task, O] = Process.await(hasNext) { hn => if (hn) Process.eval(next) ++ go else Process.halt }
go
}
implicit def ProcessToProcessIteratorConstructors(x: Process.type): ProcessIteratorConstructors.type = {
ProcessIteratorConstructors
}
implicit def ProcessIoToProcessIoIteratorConstructors(x: scalaz.stream.io.type): ProcessIoIteratorConstructors.type = {
ProcessIoIteratorConstructors
}
}
| shawjef3/scalaz-stream-iterator | src/main/scala/me/jeffshaw/scalaz/stream/IteratorConstructors.scala | Scala | mit | 862 |
package org.scalajs.core.ir
import java.security.{MessageDigest, DigestOutputStream}
import java.io.{OutputStream, DataOutputStream}
import java.util.Arrays
import Trees._
import Types._
import Tags._
object Hashers {
def hashMethodDef(methodDef: MethodDef): MethodDef = {
if (methodDef.hash.isDefined) methodDef
else {
val hasher = new TreeHasher()
val MethodDef(static, name, args, resultType, body) = methodDef
hasher.mixPos(methodDef.pos)
hasher.mixBoolean(static)
hasher.mixPropertyName(name)
hasher.mixTrees(args)
hasher.mixType(resultType)
hasher.mixTree(body)
hasher.mixInt(methodDef.optimizerHints.bits)
val hash = hasher.finalizeHash()
MethodDef(static, name, args, resultType, body)(
methodDef.optimizerHints, Some(hash))(methodDef.pos)
}
}
/** Hash definitions from a ClassDef where applicable */
def hashDefs(defs: List[Tree]): List[Tree] = defs map {
case methodDef: MethodDef => hashMethodDef(methodDef)
case otherDef => otherDef
}
/** Hash the definitions in a ClassDef (where applicable) */
def hashClassDef(classDef: ClassDef): ClassDef = {
classDef.copy(defs = hashDefs(classDef.defs))(
classDef.optimizerHints)(classDef.pos)
}
def hashesEqual(x: TreeHash, y: TreeHash, considerPos: Boolean): Boolean = {
Arrays.equals(x.treeHash, y.treeHash) &&
(!considerPos || Arrays.equals(x.posHash, y.posHash))
}
def hashAsVersion(hash: TreeHash, considerPos: Boolean): String = {
// 2 chars per byte, 20 bytes per hash
val size = 2 * (if (considerPos) 2 else 1) * 20
val builder = new StringBuilder(size)
def append(hash: Array[Byte]) =
for (b <- hash) builder.append(f"$b%02X")
append(hash.treeHash)
if (considerPos)
append(hash.posHash)
builder.toString
}
private final class TreeHasher {
private def newDigest = MessageDigest.getInstance("SHA-1")
private def newDigestStream(digest: MessageDigest) = {
val out = new OutputStream {
def write(b: Int): Unit = ()
}
val digOut = new DigestOutputStream(out, digest)
new DataOutputStream(digOut)
}
private[this] val treeDigest = newDigest
private[this] val treeStream = newDigestStream(treeDigest)
private[this] val posDigest = newDigest
private[this] val posStream = newDigestStream(posDigest)
def finalizeHash(): TreeHash =
new TreeHash(treeDigest.digest(), posDigest.digest())
def mixTree(tree: Tree): Unit = {
mixPos(tree.pos)
tree match {
case EmptyTree =>
mixTag(TagEmptyTree)
case VarDef(ident, vtpe, mutable, rhs) =>
mixTag(TagVarDef)
mixIdent(ident)
mixType(vtpe)
mixBoolean(mutable)
mixTree(rhs)
case ParamDef(ident, ptpe, mutable, rest) =>
mixTag(TagParamDef)
mixIdent(ident)
mixType(ptpe)
mixBoolean(mutable)
/* TODO Remove this test in the next major release.
* In 0.6.x we need this test so that the hash of a non-rest ParamDef
* emitted in 0.6.3 format is the same as an (implicitly non-rest)
* ParamDef emitted in 0.6.0 format.
*/
if (rest)
mixBoolean(rest)
case Skip() =>
mixTag(TagSkip)
case Block(stats) =>
mixTag(TagBlock)
mixTrees(stats)
case Labeled(label, tpe, body) =>
mixTag(TagLabeled)
mixIdent(label)
mixType(tpe)
mixTree(body)
case Assign(lhs, rhs) =>
mixTag(TagAssign)
mixTree(lhs)
mixTree(rhs)
case Return(expr, label) =>
mixTag(TagReturn)
mixTree(expr)
mixOptIdent(label)
case If(cond, thenp, elsep) =>
mixTag(TagIf)
mixTree(cond)
mixTree(thenp)
mixTree(elsep)
mixType(tree.tpe)
case While(cond, body, label) =>
mixTag(TagWhile)
mixTree(cond)
mixTree(body)
mixOptIdent(label)
case DoWhile(body, cond, label) =>
mixTag(TagDoWhile)
mixTree(body)
mixTree(cond)
mixOptIdent(label)
case Try(block, errVar, handler, finalizer) =>
mixTag(TagTry)
mixTree(block)
mixIdent(errVar)
mixTree(handler)
mixTree(finalizer)
mixType(tree.tpe)
case Throw(expr) =>
mixTag(TagThrow)
mixTree(expr)
case Continue(label) =>
mixTag(TagContinue)
mixOptIdent(label)
case Match(selector, cases, default) =>
mixTag(TagMatch)
mixTree(selector)
cases foreach { case (patterns, body) =>
mixTrees(patterns)
mixTree(body)
}
mixTree(default)
mixType(tree.tpe)
case Debugger() =>
mixTag(TagDebugger)
case New(cls, ctor, args) =>
mixTag(TagNew)
mixType(cls)
mixIdent(ctor)
mixTrees(args)
case LoadModule(cls) =>
mixTag(TagLoadModule)
mixType(cls)
case StoreModule(cls, value) =>
mixTag(TagStoreModule)
mixType(cls)
mixTree(value)
case Select(qualifier, item) =>
mixTag(TagSelect)
mixTree(qualifier)
mixIdent(item)
mixType(tree.tpe)
case Apply(receiver, method, args) =>
mixTag(TagApply)
mixTree(receiver)
mixIdent(method)
mixTrees(args)
mixType(tree.tpe)
case ApplyStatically(receiver, cls, method, args) =>
mixTag(TagApplyStatically)
mixTree(receiver)
mixType(cls)
mixIdent(method)
mixTrees(args)
mixType(tree.tpe)
case ApplyStatic(cls, method, args) =>
mixTag(TagApplyStatic)
mixType(cls)
mixIdent(method)
mixTrees(args)
mixType(tree.tpe)
case UnaryOp(op, lhs) =>
mixTag(TagUnaryOp)
mixInt(op)
mixTree(lhs)
case BinaryOp(op, lhs, rhs) =>
mixTag(TagBinaryOp)
mixInt(op)
mixTree(lhs)
mixTree(rhs)
case NewArray(tpe, lengths) =>
mixTag(TagNewArray)
mixType(tpe)
mixTrees(lengths)
case ArrayValue(tpe, elems) =>
mixTag(TagArrayValue)
mixType(tpe)
mixTrees(elems)
case ArrayLength(array) =>
mixTag(TagArrayLength)
mixTree(array)
case ArraySelect(array, index) =>
mixTag(TagArraySelect)
mixTree(array)
mixTree(index)
mixType(tree.tpe)
case RecordValue(tpe, elems) =>
mixTag(TagRecordValue)
mixType(tpe)
mixTrees(elems)
case IsInstanceOf(expr, cls) =>
mixTag(TagIsInstanceOf)
mixTree(expr)
mixType(cls)
case AsInstanceOf(expr, cls) =>
mixTag(TagAsInstanceOf)
mixTree(expr)
mixType(cls)
case Unbox(expr, charCode) =>
mixTag(TagUnbox)
mixTree(expr)
mixInt(charCode)
case GetClass(expr) =>
mixTag(TagGetClass)
mixTree(expr)
case CallHelper(helper, args) =>
mixTag(TagCallHelper)
mixString(helper)
mixTrees(args)
mixType(tree.tpe)
case JSNew(ctor, args) =>
mixTag(TagJSNew)
mixTree(ctor)
mixTrees(args)
case JSDotSelect(qualifier, item) =>
mixTag(TagJSDotSelect)
mixTree(qualifier)
mixIdent(item)
case JSBracketSelect(qualifier, item) =>
mixTag(TagJSBracketSelect)
mixTree(qualifier)
mixTree(item)
case JSFunctionApply(fun, args) =>
mixTag(TagJSFunctionApply)
mixTree(fun)
mixTrees(args)
case JSDotMethodApply(receiver, method, args) =>
mixTag(TagJSDotMethodApply)
mixTree(receiver)
mixIdent(method)
mixTrees(args)
case JSBracketMethodApply(receiver, method, args) =>
mixTag(TagJSBracketMethodApply)
mixTree(receiver)
mixTree(method)
mixTrees(args)
case JSSpread(items) =>
mixTag(TagJSSpread)
mixTree(items)
case JSDelete(prop) =>
mixTag(TagJSDelete)
mixTree(prop)
case JSUnaryOp(op, lhs) =>
mixTag(TagJSUnaryOp)
mixInt(op)
mixTree(lhs)
case JSBinaryOp(op, lhs, rhs) =>
mixTag(TagJSBinaryOp)
mixInt(op)
mixTree(lhs)
mixTree(rhs)
case JSArrayConstr(items) =>
mixTag(TagJSArrayConstr)
mixTrees(items)
case JSObjectConstr(fields) =>
mixTag(TagJSObjectConstr)
fields foreach { case (pn, value) =>
mixPropertyName(pn)
mixTree(value)
}
case JSEnvInfo() =>
mixTag(TagJSEnvInfo)
case Undefined() =>
mixTag(TagUndefined)
case UndefinedParam() =>
mixTag(TagUndefinedParam)
mixType(tree.tpe)
case Null() =>
mixTag(TagNull)
case BooleanLiteral(value) =>
mixTag(TagBooleanLiteral)
mixBoolean(value)
case IntLiteral(value) =>
mixTag(TagIntLiteral)
mixInt(value)
case LongLiteral(value) =>
mixTag(TagLongLiteral)
mixLong(value)
case FloatLiteral(value) =>
mixTag(TagFloatLiteral)
mixFloat(value)
case DoubleLiteral(value) =>
mixTag(TagDoubleLiteral)
mixDouble(value)
case StringLiteral(value) =>
mixTag(TagStringLiteral)
mixString(value)
case ClassOf(cls) =>
mixTag(TagClassOf)
mixType(cls)
case VarRef(ident) =>
mixTag(TagVarRef)
mixIdent(ident)
mixType(tree.tpe)
case This() =>
mixTag(TagThis)
mixType(tree.tpe)
case Closure(captureParams, params, body, captureValues) =>
mixTag(TagClosure)
mixTrees(captureParams)
mixTrees(params)
mixTree(body)
mixTrees(captureValues)
case _ =>
sys.error(s"Unable to hash tree of class ${tree.getClass}")
}
}
def mixTrees(trees: List[Tree]): Unit =
trees.foreach(mixTree)
def mixType(tpe: Type): Unit = tpe match {
case AnyType => mixTag(TagAnyType)
case NothingType => mixTag(TagNothingType)
case UndefType => mixTag(TagUndefType)
case BooleanType => mixTag(TagBooleanType)
case IntType => mixTag(TagIntType)
case LongType => mixTag(TagLongType)
case FloatType => mixTag(TagFloatType)
case DoubleType => mixTag(TagDoubleType)
case StringType => mixTag(TagStringType)
case NullType => mixTag(TagNullType)
case NoType => mixTag(TagNoType)
case tpe: ClassType =>
mixTag(TagClassType)
mixString(tpe.className)
case tpe: ArrayType =>
mixTag(TagArrayType)
mixString(tpe.baseClassName)
mixInt(tpe.dimensions)
case RecordType(fields) =>
mixTag(TagRecordType)
for (RecordType.Field(name, originalName, tpe, mutable) <- fields) {
mixString(name)
originalName.foreach(mixString)
mixType(tpe)
mixBoolean(mutable)
}
}
def mixIdent(ident: Ident): Unit = {
mixPos(ident.pos)
mixString(ident.name)
ident.originalName.foreach(mixString)
}
def mixOptIdent(optIdent: Option[Ident]): Unit = optIdent.foreach(mixIdent)
def mixPropertyName(name: PropertyName): Unit = name match {
case name: Ident => mixIdent(name)
case name: StringLiteral => mixTree(name)
}
def mixPos(pos: Position): Unit = {
posStream.writeUTF(pos.source.toString)
posStream.writeInt(pos.line)
posStream.writeInt(pos.column)
}
@inline
final def mixTag(tag: Int): Unit = mixInt(tag)
@inline
final def mixString(str: String): Unit = treeStream.writeUTF(str)
@inline
final def mixInt(i: Int): Unit = treeStream.writeInt(i)
@inline
final def mixLong(l: Long): Unit = treeStream.writeLong(l)
@inline
final def mixBoolean(b: Boolean): Unit = treeStream.writeBoolean(b)
@inline
final def mixFloat(f: Float): Unit = treeStream.writeFloat(f)
@inline
final def mixDouble(d: Double): Unit = treeStream.writeDouble(d)
}
}
| matthughes/scala-js | ir/src/main/scala/org/scalajs/core/ir/Hashers.scala | Scala | bsd-3-clause | 12,858 |
package org.jetbrains.plugins.scala
package codeInspection
package collections
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
import scala.collection.immutable.ArraySeq
/**
* @author Nikolay.Tropin
*/
class ZeroIndexToHeadInspection extends OperationOnCollectionInspection {
override def possibleSimplificationTypes: ArraySeq[SimplificationType] = ArraySeq(ZeroIndexToHead)
}
object ZeroIndexToHead extends SimplificationType() {
override def hint: String = ScalaInspectionBundle.message("replace.with.head")
override def getSimplification(expr: ScExpression): Option[Simplification] = {
expr match {
case qual`.apply`(literal("0")) if isSeq(qual) && !isIndexedSeq(qual) =>
Some(replace(expr).withText(invocationText(qual, "head")).highlightFrom(qual))
case _ => None
}
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/collections/ZeroIndexToHeadInspection.scala | Scala | apache-2.0 | 836 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
// SBT
import sbt._
import Keys._
object BuildSettings {
// Basic settings for our app
lazy val basicSettings = Seq[Setting[_]](
organization := "com.snowplowanalytics",
version := "0.1.1",
description := "Kinesis Redshift sink",
resolvers ++= Dependencies.resolutionRepos
)
// Makes our SBT app settings available from within the app
lazy val scalifySettings = Seq(sourceGenerators in Compile <+= (sourceManaged in Compile, version, name, organization) map { (d, v, n, o) =>
val file = d / "settings.scala"
IO.write(file, """package com.snowplowanalytics.snowplow.storage.kinesis.redshift.generated
|object Settings {
| val organization = "%s"
| val version = "%s"
| val name = "%s"
|}
|""".stripMargin.format(o, v, n))
Seq(file)
})
// sbt-assembly settings for building a fat jar
import sbtassembly.AssemblyPlugin.autoImport._
lazy val sbtAssemblySettings = baseAssemblySettings ++ Seq(
// Name it as an executable
assemblyJarName in assembly := { s"${name.value}-${version.value}" },
assemblyExcludedJars in assembly <<= (fullClasspath in assembly) map { cp =>
val excludes = Set(
"junit-4.8.2.jar",
"jsp-2.1-6.1.14.jar",
"jasper-compiler-5.5.12.jar",
"jsp-api-2.1-6.1.14.jar",
"servlet-api-2.5-6.1.14.jar",
"commons-beanutils-1.7.0.jar",
"hadoop-lzo-0.4.19.jar",
"stax-api-1.0.1.jar",
"commons-beanutils-core-1.8.0.jar",
"minlog-1.2.jar",
"aws-java-sdk-1.6.10.jar"
)
cp filter { jar => excludes(jar.data.getName) }
},
assemblyMergeStrategy in assembly := {
case PathList("javax", "servlet", xs @ _*) => MergeStrategy.first
case PathList("org", "objectweb", "asm", xs @ _*) => MergeStrategy.first
case PathList(ps @ _*) if ps.last endsWith ".html" => MergeStrategy.first
case "application.conf" => MergeStrategy.concat
case x =>
val oldStrategy = (assemblyMergeStrategy in assembly).value
oldStrategy(x)
}
)
lazy val buildSettings = basicSettings ++ scalifySettings ++ sbtAssemblySettings
}
| jramos/snowplow | 4-storage/kinesis-redshift-sink/project/BuildSettings.scala | Scala | apache-2.0 | 2,953 |
package demo.akka.mapreduce.data
import scala.collection.mutable.HashMap
case class ReduceData(reduceDataMap: HashMap[String, Int]) | jianwu-github/akka-mapreduce-demo | src/main/scala/demo/akka/mapreduce/data/ReduceData.scala | Scala | cc0-1.0 | 133 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.frontend.logicalplan
import slamdata.Predef._
import quasar.{Func, TreeMatchers}
import quasar.common.data.Data
import quasar.common.SortDir
import quasar.std._, StdLib.structural._
import matryoshka._
import matryoshka.data.Fix
import org.scalacheck._
import pathy.Path._
import scalaz.{Free => _, _}, Scalaz._
import scalaz.scalacheck.ScalaCheckBinding._
import scalaz.scalacheck.ScalazProperties._
class OptimizerSpec extends quasar.Qspec with LogicalPlanHelpers with TreeMatchers {
type LP[A] = LogicalPlan[A]
"simplify" should {
"inline trivial binding" in {
optimizer.simplify(lpf.let('tmp0, read("foo"), lpf.free('tmp0))) must
beTreeEqual(read("foo"))
}
"not inline binding that's used twice" in {
optimizer.simplify(lpf.let('tmp0, read("foo"),
makeObj(
"bar" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("bar"))),
"baz" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("baz")))))) must
beTreeEqual(
lpf.let('tmp0, read("foo"),
makeObj(
"bar" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("bar"))),
"baz" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("baz"))))))
}
"completely inline stupid lets" in {
optimizer.simplify(lpf.let('tmp0, read("foo"), lpf.let('tmp1, lpf.free('tmp0), lpf.free('tmp1)))) must
beTreeEqual(read("foo"))
}
"inline correct value for shadowed binding" in {
optimizer.simplify(lpf.let('tmp0, read("foo"),
lpf.let('tmp0, read("bar"),
makeObj(
"bar" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("bar"))))))) must
beTreeEqual(
makeObj(
"bar" -> lpf.invoke2(MapProject, read("bar"), lpf.constant(Data.Str("bar")))))
}
"inline a binding used once, then shadowed once" in {
optimizer.simplify(lpf.let('tmp0, read("foo"),
lpf.invoke2(MapProject, lpf.free('tmp0),
lpf.let('tmp0, read("bar"),
makeObj(
"bar" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("bar")))))))) must
beTreeEqual(
lpf.invoke(MapProject, Func.Input2(
read("foo"),
makeObj(
"bar" -> lpf.invoke2(MapProject, read("bar"), lpf.constant(Data.Str("bar")))))))
}
"inline a binding used once, then shadowed twice" in {
optimizer.simplify(lpf.let('tmp0, read("foo"),
lpf.invoke2(MapProject, lpf.free('tmp0),
lpf.let('tmp0, read("bar"),
makeObj(
"bar" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("bar"))),
"baz" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("baz")))))))) must
beTreeEqual(
lpf.invoke(MapProject, Func.Input2(
read("foo"),
lpf.let('tmp0, read("bar"),
makeObj(
"bar" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("bar"))),
"baz" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("baz"))))))))
}
"partially inline a more interesting case" in {
optimizer.simplify(lpf.let('tmp0, read("person"),
lpf.let('tmp1,
makeObj(
"name" -> lpf.invoke2(MapProject, lpf.free('tmp0), lpf.constant(Data.Str("name")))),
lpf.let('tmp2,
lpf.sort(
lpf.free('tmp1),
(lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("name"))), SortDir.asc).wrapNel),
lpf.free('tmp2))))) must
beTreeEqual(
lpf.let('tmp1,
makeObj(
"name" ->
lpf.invoke2(MapProject, read("person"), lpf.constant(Data.Str("name")))),
lpf.sort(
lpf.free('tmp1),
(lpf.invoke2(MapProject, lpf.free('tmp1), lpf.constant(Data.Str("name"))), SortDir.asc).wrapNel)))
}
}
"preferProjections" should {
"ignore a delete with unknown shape" in {
optimizer.preferProjections(
lpf.invoke2(DeleteKey, lpf.read(file("zips")),
lpf.constant(Data.Str("pop")))) must
beTreeEqual[Fix[LP]](
lpf.invoke2(DeleteKey, lpf.read(file("zips")),
lpf.constant(Data.Str("pop"))))
}
"convert a delete after a projection" in {
optimizer.preferProjections(
lpf.let('meh, lpf.read(file("zips")),
lpf.invoke2(DeleteKey,
makeObj(
"city" -> lpf.invoke2(MapProject, lpf.free('meh), lpf.constant(Data.Str("city"))),
"pop" -> lpf.invoke2(MapProject, lpf.free('meh), lpf.constant(Data.Str("pop")))),
lpf.constant(Data.Str("pop"))))) must
beTreeEqual(
lpf.let('meh, lpf.read(file("zips")),
makeObj(
"city" ->
lpf.invoke2(MapProject,
makeObj(
"city" ->
lpf.invoke2(MapProject, lpf.free('meh), lpf.constant(Data.Str("city"))),
"pop" ->
lpf.invoke2(MapProject, lpf.free('meh), lpf.constant(Data.Str("pop")))),
lpf.constant(Data.Str("city"))))))
}
"convert a delete when the shape is hidden by a Free" in {
optimizer.preferProjections(
lpf.let('meh, lpf.read(file("zips")),
lpf.let('meh2,
makeObj(
"city" -> lpf.invoke2(MapProject, lpf.free('meh), lpf.constant(Data.Str("city"))),
"pop" -> lpf.invoke2(MapProject, lpf.free('meh), lpf.constant(Data.Str("pop")))),
makeObj(
"orig" -> lpf.free('meh2),
"cleaned" ->
lpf.invoke2(DeleteKey, lpf.free('meh2), lpf.constant(Data.Str("pop"))))))) must
beTreeEqual(
lpf.let('meh, lpf.read(file("zips")),
lpf.let('meh2,
makeObj(
"city" -> lpf.invoke2(MapProject, lpf.free('meh), lpf.constant(Data.Str("city"))),
"pop" -> lpf.invoke2(MapProject, lpf.free('meh), lpf.constant(Data.Str("pop")))),
makeObj(
"orig" -> lpf.free('meh2),
"cleaned" ->
makeObj(
"city" ->
lpf.invoke2(MapProject, lpf.free('meh2), lpf.constant(Data.Str("city"))))))))
}
}
"Component" should {
implicit def componentArbitrary[A: Arbitrary]: Arbitrary[Component[Fix[LP], A]] =
Arbitrary(Arbitrary.arbitrary[A]) ∘ (NeitherCond(_))
implicit def ArbComponentInt: Arbitrary[Component[Fix[LP], Int]] =
componentArbitrary[Int]
implicit def ArbComponentInt2Int: Arbitrary[Component[Fix[LP], Int => Int]] =
componentArbitrary[Int => Int]
// FIXME: this test isn't really testing much at this point because
// we cannot test the equality of two functions
implicit def EqualComponent: Equal[Component[Fix[LP], Int]] = new Equal[Component[Fix[LP], Int]] {
def equal(a1: Component[Fix[LP], Int], a2: Component[Fix[LP], Int]): Boolean = true
}
"obey applicative laws" in {
applicative.laws[Component[Fix[LP], ?]]
}
}
}
| quasar-analytics/quasar | frontend/src/test/scala/quasar/frontend/logicalplan/OptimizerSpec.scala | Scala | apache-2.0 | 7,825 |
package org.jetbrains.plugins.scala
package lang
package completion
import com.intellij.codeInsight.completion._
import com.intellij.codeInsight.lookup._
import com.intellij.openapi.project.Project
import com.intellij.psi._
import com.intellij.psi.search.searches.ClassInheritorsSearch
import com.intellij.psi.search.{GlobalSearchScope, LocalSearchScope}
import com.intellij.psi.util.PsiTreeUtil.getContextOfType
import com.intellij.util.ProcessingContext
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.completion.handlers.ScalaConstructorInsertHandler
import org.jetbrains.plugins.scala.lang.completion.lookups.{PresentationExt, ScalaLookupItem}
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReference
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScObject
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.ScDesignatorType
import org.jetbrains.plugins.scala.lang.psi.types.recursiveUpdate.ScSubstitutor
import scala.collection.mutable
import scala.jdk.CollectionConverters._
final class ScalaAfterNewCompletionContributor extends ScalaCompletionContributor {
import ScalaAfterNewCompletionContributor._
extend(
CompletionType.SMART,
afterNewKeywordPattern,
new CompletionProvider[CompletionParameters] {
override def addCompletions(parameters: CompletionParameters,
context: ProcessingContext,
result: CompletionResultSet): Unit = {
val place = positionFromParameters(parameters)
val (definition, types) = expectedTypes(place)
val propses = for {
expectedType <- types
prop <- collectProps(expectedType) {
isAccessible(_)(place)
}(definition.getProject)
} yield prop
if (propses.nonEmpty) {
val renamesMap = createRenamesMap(place)
for {
prop <- propses
lookupItem = prop.createLookupElement(renamesMap)
} result.addElement(lookupItem)
}
}
}
)
}
object ScalaAfterNewCompletionContributor {
def expectedTypeAfterNew(place: PsiElement, context: ProcessingContext): Option[PropsConstructor] =
// todo: probably we need to remove all abstracts here according to variance
if (afterNewKeywordPattern.accepts(place, context))
Some {
val (_, types) = expectedTypes(place)
(clazz: PsiClass) => {
val (actualType, hasSubstitutionProblem) = appropriateType(clazz, types)
LookupElementProps(actualType, hasSubstitutionProblem, clazz)
}
}
else
None
private def expectedTypes(place: PsiElement): (ScNewTemplateDefinition, Seq[ScType]) = {
val definition = getContextOfType(place, classOf[ScNewTemplateDefinition])
(definition, definition.expectedTypes().map {
case ScAbstractType(_, _, upper) => upper
case tp => tp
})
}
private[completion] type RenamesMap = Map[String, (PsiNamedElement, String)]
private[completion] type PropsConstructor = PsiClass => LookupElementProps
private[completion] def createRenamesMap(element: PsiElement): RenamesMap =
getContextOfType(element, false, classOf[ScReference]) match {
case null =>
Map.empty
case ref =>
ref.getVariants.flatMap {
case ScalaLookupItem(item, element) =>
item.isRenamed.map { name =>
element.name -> (element -> name)
}
case _ => None
}.toMap
}
private[this] def appropriateType(clazz: PsiClass, types: Seq[ScType]): (ScType, Boolean) = {
val (designatorType, parameters) = classComponents(clazz)
val maybeParameter = parameters match {
case Seq(head) => Some(head)
case _ => None
}
findAppropriateType(types, designatorType, maybeParameter).getOrElse {
(fromParameters(designatorType, maybeParameter), parameters.nonEmpty)
}
}
private[completion] final case class LookupElementProps(`type`: ScType,
hasSubstitutionProblem: Boolean,
`class`: PsiClass,
substitutor: ScSubstitutor = ScSubstitutor.empty) {
def createLookupElement(renamesMap: RenamesMap): LookupElement = {
val isRenamed = for {
(`class`, name) <- renamesMap.get(`class`.name)
} yield name
createLookupElement(isRenamed)
}
def createLookupElement(isRenamed: Option[String]): LookupElement = {
val name = `class`.name
val renamedPrefix = isRenamed.fold("")(_ + " <= ")
val isInterface = `class`.isInterface || `class`.hasAbstractModifier
val tailText = if (isInterface) " {...}" else ""
val typeParametersEvaluator: (ScType => String) => String = `type` match {
case ParameterizedType(_, types) => types.map(_).commaSeparated(Model.SquareBrackets)
case _ => Function.const("")
}
val renderer = new LookupElementRenderer[LookupElement] {
override def renderElement(ignore: LookupElement,
presentation: LookupElementPresentation): Unit = {
presentation.appendGrayedTailText(tailText)
presentation.appendGrayedTailText(" ")
presentation.appendGrayedTailText(`class`.getPresentation.getLocationString)
presentation.setIcon(`class`)
presentation.setStrikeout(`class`)
val parametersText = typeParametersEvaluator(substitutor.andThen(_.presentableText(`class`)))
presentation.setItemText(renamedPrefix + name + parametersText)
}
}
val insertHandler = new ScalaConstructorInsertHandler(
typeParametersEvaluator,
hasSubstitutionProblem,
isInterface,
isRenamed.isDefined,
ScalaCodeStyleSettings.getInstance(`class`.getProject).hasImportWithPrefix(`class`.qualifiedName)
)
val policy = {
import AutoCompletionPolicy._
if (isUnitTestMode) ALWAYS_AUTOCOMPLETE
else if (isInterface) NEVER_AUTOCOMPLETE
else SETTINGS_DEPENDENT
}
LookupElementBuilder
.createWithSmartPointer(isRenamed.getOrElse(name), `class`)
.withRenderer(renderer)
.withInsertHandler(insertHandler)
.withAutoCompletionPolicy(policy)
}
}
private def collectProps(`type`: ScType)
(isAccessible: PsiClass => Boolean)
(implicit project: Project): Seq[LookupElementProps] = {
val inheritors = `type`.extractClass.toSeq
.flatMap(findInheritors)
.filter { clazz =>
clazz.name match {
case null | "" => false
case _ => true
}
}
val substitutedInheritors = for {
clazz <- inheritors
(designatorType, parameters) = classComponents(clazz)
(actualType, hasSubstitutionProblem) <- findAppropriateType(Seq(`type`), designatorType, parameters)
} yield (actualType, hasSubstitutionProblem)
val addedClasses = mutable.HashSet.empty[String]
for {
(actualType, hasSubstitutionProblem) <- (`type`, false) +: substitutedInheritors
(extractedClass, extractedSubstitutor) <- extractValidClass(actualType)
if addedClasses.add(extractedClass.qualifiedName) && isAccessible(extractedClass)
} yield LookupElementProps(actualType, hasSubstitutionProblem, extractedClass, extractedSubstitutor)
}
private[this] def findInheritors(clazz: PsiClass)
(implicit project: Project) = {
// this change is important for Scala Worksheet/Script classes. Will not find inheritors, due to file copy.
val searchScope = clazz.getUseScope match {
case _: LocalSearchScope => GlobalSearchScope.allScope(project)
case useScope => useScope
}
ClassInheritorsSearch.search(clazz, searchScope, true).asScala
}
private[this] def extractValidClass(`type`: ScType): Option[(PsiClass, ScSubstitutor)] = {
val names = Set("scala.Boolean",
"scala.Byte", "scala.Short", "scala.Int", "scala.Long",
"scala.Float", "scala.Double",
"scala.AnyVal", "scala.Char", "scala.Unit", "scala.Any")
// filter base types (it's important for scala 2.9)
// todo: filter inner classes smarter (how? don't forget deep inner classes)
def isInvalid(clazz: PsiClass) =
clazz.isInstanceOf[ScObject] || names.contains(clazz.qualifiedName) || (clazz.containingClass match {
case null => false
case _: ScObject => clazz.hasModifierPropertyScala("static")
case _ => true
})
`type`.extractClassType.filterNot {
case (clazz, _) => isInvalid(clazz)
}
}
private[this] def classComponents(clazz: PsiClass): (ScDesignatorType, Seq[PsiTypeParameter]) =
(ScDesignatorType(clazz), clazz.getTypeParameters.toSeq)
private[this] def findAppropriateType(types: Seq[ScType],
designatorType: ScDesignatorType,
parameters: Iterable[PsiTypeParameter]): Option[(ScType, Boolean)] = {
if (types.isEmpty) return None
val undefinedTypes = parameters.map(UndefinedType(_))
val predefinedType = fromParametersTypes(designatorType, undefinedTypes)
for (t <- types) {
predefinedType.conformanceSubstitutor(t) match {
case Some(substitutor) =>
val valueType = fromParameters(designatorType, parameters)
return Some(substitutor(valueType), undefinedTypes.map(substitutor).exists(_.isInstanceOf[UndefinedType]))
case _ =>
}
}
None
}
private[this] def fromParameters(designatorType: ScDesignatorType, parameters: Iterable[PsiTypeParameter]): ValueType =
fromParametersTypes(designatorType, parameters.map(TypeParameterType(_)))
private[this] def fromParametersTypes(designatorType: ScDesignatorType, types: Iterable[ScType]): ValueType =
if (types.isEmpty) designatorType else ScParameterizedType(designatorType, types.toSeq)
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/completion/ScalaAfterNewCompletionContributor.scala | Scala | apache-2.0 | 10,407 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package patterns
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
class ScPatternsImpl(node: ASTNode) extends ScalaPsiElementImpl (node) with ScPatterns{
override def toString: String = "ArgumentPatterns"
override def patterns: Seq[ScPattern] = findChildrenByClass(classOf[ScPattern]).toSeq
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/patterns/ScPatternsImpl.scala | Scala | apache-2.0 | 495 |
package scala.build
/** Code generation of the AnyVal types and their companions. */
trait GenerateAnyValReps {
self: GenerateAnyVals =>
sealed abstract class AnyValNum(name: String, repr: Option[String], javaEquiv: String)
extends AnyValRep(name,repr,javaEquiv) {
case class Op(op : String, doc : String)
private def companionCoercions(tos: AnyValRep*) = {
tos.toList map (to =>
s"implicit def @javaequiv@2${to.javaEquiv}(x: @name@): ${to.name} = x.to${to.name}"
)
}
def coercionComment =
"""/** Language mandated coercions from @name@ to "wider" types. */
import scala.language.implicitConversions"""
def implicitCoercions: List[String] = {
val coercions = this match {
case B => companionCoercions(S, I, L, F, D)
case S | C => companionCoercions(I, L, F, D)
case I => companionCoercions(L, F, D)
case L => companionCoercions(F, D)
case F => companionCoercions(D)
case _ => Nil
}
if (coercions.isEmpty) Nil
else coercionComment.lines.toList ++ coercions
}
def isCardinal: Boolean = isIntegerType(this)
def unaryOps = {
val ops = List(
Op("+", "/** Returns this value, unmodified. */"),
Op("-", "/** Returns the negation of this value. */"))
if(isCardinal)
Op("~", "/**\\n" +
" * Returns the bitwise negation of this value.\\n" +
" * @example {{{\\n" +
" * ~5 == -6\\n" +
" * // in binary: ~00000101 ==\\n" +
" * // 11111010\\n" +
" * }}}\\n" +
" */") :: ops
else ops
}
def bitwiseOps =
if (isCardinal)
List(
Op("|", "/**\\n" +
" * Returns the bitwise OR of this value and `x`.\\n" +
" * @example {{{\\n" +
" * (0xf0 | 0xaa) == 0xfa\\n" +
" * // in binary: 11110000\\n" +
" * // | 10101010\\n" +
" * // --------\\n" +
" * // 11111010\\n" +
" * }}}\\n" +
" */"),
Op("&", "/**\\n" +
" * Returns the bitwise AND of this value and `x`.\\n" +
" * @example {{{\\n" +
" * (0xf0 & 0xaa) == 0xa0\\n" +
" * // in binary: 11110000\\n" +
" * // & 10101010\\n" +
" * // --------\\n" +
" * // 10100000\\n" +
" * }}}\\n" +
" */"),
Op("^", "/**\\n" +
" * Returns the bitwise XOR of this value and `x`.\\n" +
" * @example {{{\\n" +
" * (0xf0 ^ 0xaa) == 0x5a\\n" +
" * // in binary: 11110000\\n" +
" * // ^ 10101010\\n" +
" * // --------\\n" +
" * // 01011010\\n" +
" * }}}\\n" +
" */"))
else Nil
def shiftOps =
if (isCardinal)
List(
Op("<<", "/**\\n" +
" * Returns this value bit-shifted left by the specified number of bits,\\n" +
" * filling in the new right bits with zeroes.\\n" +
" * @example {{{ 6 << 3 == 48 // in binary: 0110 << 3 == 0110000 }}}\\n" +
" */"),
Op(">>>", "/**\\n" +
" * Returns this value bit-shifted right by the specified number of bits,\\n" +
" * filling the new left bits with zeroes.\\n" +
" * @example {{{ 21 >>> 3 == 2 // in binary: 010101 >>> 3 == 010 }}}\\n" +
" * @example {{{\\n" +
" * -21 >>> 3 == 536870909\\n" +
" * // in binary: 11111111 11111111 11111111 11101011 >>> 3 ==\\n" +
" * // 00011111 11111111 11111111 11111101\\n" +
" * }}}\\n" +
" */"),
Op(">>", "/**\\n" +
" * Returns this value bit-shifted right by the specified number of bits,\\n" +
" * filling in the left bits with the same value as the left-most bit of this.\\n" +
" * The effect of this is to retain the sign of the value.\\n" +
" * @example {{{\\n" +
" * -21 >> 3 == -3\\n" +
" * // in binary: 11111111 11111111 11111111 11101011 >> 3 ==\\n" +
" * // 11111111 11111111 11111111 11111101\\n" +
" * }}}\\n" +
" */"))
else Nil
def comparisonOps = List(
Op("==", "/** Returns `true` if this value is equal to x, `false` otherwise. */"),
Op("!=", "/** Returns `true` if this value is not equal to x, `false` otherwise. */"),
Op("<", "/** Returns `true` if this value is less than x, `false` otherwise. */"),
Op("<=", "/** Returns `true` if this value is less than or equal to x, `false` otherwise. */"),
Op(">", "/** Returns `true` if this value is greater than x, `false` otherwise. */"),
Op(">=", "/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */"))
def otherOps = List(
Op("+", "/** Returns the sum of this value and `x`. */"),
Op("-", "/** Returns the difference of this value and `x`. */"),
Op("*", "/** Returns the product of this value and `x`. */"),
Op("/", "/** Returns the quotient of this value and `x`. */"),
Op("%", "/** Returns the remainder of the division of this value by `x`. */"))
// Given two numeric value types S and T , the operation type of S and T is defined as follows:
// If both S and T are subrange types then the operation type of S and T is Int.
// Otherwise the operation type of S and T is the larger of the two types wrt ranking.
// Given two numeric values v and w the operation type of v and w is the operation type
// of their run-time types.
def opType(that: AnyValNum): AnyValNum = {
val rank = IndexedSeq(I, L, F, D)
(rank indexOf this, rank indexOf that) match {
case (-1, -1) => I
case (r1, r2) => rank apply (r1 max r2)
}
}
def mkCoercions = numeric map (x => "def to%s: %s".format(x, x))
def mkUnaryOps = unaryOps map (x => "%s\\n def unary_%s : %s".format(x.doc, x.op, this opType I))
def mkStringOps = List("def +(x: String): String")
def mkShiftOps = (
for (op <- shiftOps ; arg <- List(I, L)) yield
"%s\\n def %s(x: %s): %s".format(op.doc, op.op, arg, this opType I)
)
def clumps: List[List[String]] = {
val xs1 = List(mkCoercions, mkUnaryOps, mkStringOps, mkShiftOps) map (xs => if (xs.isEmpty) xs else xs :+ "")
val xs2 = List(
mkBinOpsGroup(comparisonOps, numeric, _ => Z),
mkBinOpsGroup(bitwiseOps, cardinal, this opType _),
mkBinOpsGroup(otherOps, numeric, this opType _)
)
xs1 ++ xs2
}
def classLines = (clumps :+ commonClassLines).foldLeft(List[String]()) {
case (res, Nil) => res
case (res, lines) =>
val xs = lines map {
case "" => ""
case s => interpolate(s)
}
res ++ xs
}
def objectLines = {
val comp = if (isCardinal) cardinalCompanion else floatingCompanion
interpolate(comp + allCompanions + "\\n" + nonUnitCompanions).trim.lines.toList ++ (implicitCoercions map interpolate)
}
/** Makes a set of binary operations based on the given set of ops, args, and resultFn.
*
* @param ops list of function names e.g. List(">>", "%")
* @param args list of types which should appear as arguments
* @param resultFn function which calculates return type based on arg type
* @return list of function definitions
*/
def mkBinOpsGroup(ops: List[Op], args: List[AnyValNum], resultFn: AnyValNum => AnyValRep): List[String] = (
ops flatMap (op =>
args.map(arg =>
"%s\\n def %s(x: %s): %s".format(op.doc, op.op, arg, resultFn(arg))) :+ ""
)
).toList
}
sealed abstract class AnyValRep(val name: String, val repr: Option[String], val javaEquiv: String) {
def classLines: List[String]
def objectLines: List[String]
def commonClassLines = List(
"// Provide a more specific return type for Scaladoc",
"override def getClass(): Class[@name@] = ???"
)
def lcname = name.toLowerCase
def boxedSimpleName = this match {
case C => "Character"
case I => "Integer"
case _ => name
}
def boxedName = this match {
case U => "scala.runtime.BoxedUnit"
case _ => "java.lang." + boxedSimpleName
}
def zeroRep = this match {
case L => "0L"
case F => "0.0f"
case D => "0.0d"
case _ => "0"
}
def representation = repr.map(", a " + _).getOrElse("")
def indent(s: String) = if (s == "") "" else " " + s
def indentN(s: String) = s.lines map indent mkString "\\n"
def boxUnboxInterpolations = Map(
"@boxRunTimeDoc@" -> """
* Runtime implementation determined by `scala.runtime.BoxesRunTime.boxTo%s`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*""".format(boxedSimpleName),
"@unboxRunTimeDoc@" -> """
* Runtime implementation determined by `scala.runtime.BoxesRunTime.unboxTo%s`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*""".format(name),
"@unboxDoc@" -> "the %s resulting from calling %sValue() on `x`".format(name, lcname),
"@boxImpl@" -> "???",
"@unboxImpl@" -> "???"
)
def interpolations = Map(
"@name@" -> name,
"@representation@" -> representation,
"@javaequiv@" -> javaEquiv,
"@boxed@" -> boxedName,
"@lcname@" -> lcname,
"@zero@" -> zeroRep
) ++ boxUnboxInterpolations
def interpolate(s: String): String = interpolations.foldLeft(s) {
case (str, (key, value)) => str.replaceAll(key, value)
}
def classDoc = interpolate(classDocTemplate)
def objectDoc = ""
def mkImports = ""
def mkClass = assemble("final abstract class " + name + " private extends AnyVal", classLines)
def mkObject = assemble("object " + name + " extends AnyValCompanion", objectLines)
def make() = List[String](
headerTemplate,
mkImports,
classDoc,
mkClass,
objectDoc,
mkObject
) mkString ""
def assemble(decl: String, lines: List[String]): String = {
val body = if (lines.isEmpty) " { }\\n\\n" else lines map indent mkString (" {\\n", "\\n", "\\n}\\n")
decl + body + "\\n"
}
override def toString = name
}
}
trait GenerateAnyValTemplates {
def headerTemplate = """/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
// DO NOT EDIT, CHANGES WILL BE LOST
// This auto-generated code can be modified in "project/GenerateAnyVals.scala".
// Afterwards, running "sbt generateSources" regenerates this source file.
package scala
"""
def classDocTemplate = ("""
/** `@name@`@representation@ (equivalent to Java's `@javaequiv@` primitive type) is a
* subtype of [[scala.AnyVal]]. Instances of `@name@` are not
* represented by an object in the underlying runtime system.
*
* There is an implicit conversion from [[scala.@name@]] => [[scala.runtime.Rich@name@]]
* which provides useful non-primitive operations.
*/
""".trim + "\\n")
def allCompanions = """
/** Transform a value type into a boxed reference type.
*@boxRunTimeDoc@
* @param x the @name@ to be boxed
* @return a @boxed@ offering `x` as its underlying value.
*/
def box(x: @name@): @boxed@ = @boxImpl@
/** Transform a boxed type into a value type. Note that this
* method is not typesafe: it accepts any Object, but will throw
* an exception if the argument is not a @boxed@.
*@unboxRunTimeDoc@
* @param x the @boxed@ to be unboxed.
* @throws ClassCastException if the argument is not a @boxed@
* @return @unboxDoc@
*/
def unbox(x: java.lang.Object): @name@ = @unboxImpl@
/** The String representation of the scala.@name@ companion object. */
override def toString = "object scala.@name@"
"""
def nonUnitCompanions = "" // todo
def cardinalCompanion = """
/** The smallest value representable as a @name@. */
final val MinValue = @boxed@.MIN_VALUE
/** The largest value representable as a @name@. */
final val MaxValue = @boxed@.MAX_VALUE
"""
def floatingCompanion = """
/** The smallest positive value greater than @zero@ which is
* representable as a @name@.
*/
final val MinPositiveValue = @boxed@.MIN_VALUE
final val NaN = @boxed@.NaN
final val PositiveInfinity = @boxed@.POSITIVE_INFINITY
final val NegativeInfinity = @boxed@.NEGATIVE_INFINITY
/** The negative number with the greatest (finite) absolute value which is representable
* by a @name@. Note that it differs from [[java.lang.@name@.MIN_VALUE]], which
* is the smallest positive value representable by a @name@. In Scala that number
* is called @name@.MinPositiveValue.
*/
final val MinValue = -@boxed@.MAX_VALUE
/** The largest finite positive number representable as a @name@. */
final val MaxValue = @boxed@.MAX_VALUE
"""
}
class GenerateAnyVals extends GenerateAnyValReps with GenerateAnyValTemplates {
object B extends AnyValNum("Byte", Some("8-bit signed integer"), "byte")
object S extends AnyValNum("Short", Some("16-bit signed integer"), "short")
object C extends AnyValNum("Char", Some("16-bit unsigned integer"), "char")
object I extends AnyValNum("Int", Some("32-bit signed integer"), "int")
object L extends AnyValNum("Long", Some("64-bit signed integer"), "long")
object F extends AnyValNum("Float", Some("32-bit IEEE-754 floating point number"), "float")
object D extends AnyValNum("Double", Some("64-bit IEEE-754 floating point number"), "double")
object Z extends AnyValRep("Boolean", None, "boolean") {
def classLines = """
/** Negates a Boolean expression.
*
* - `!a` results in `false` if and only if `a` evaluates to `true` and
* - `!a` results in `true` if and only if `a` evaluates to `false`.
*
* @return the negated expression
*/
def unary_! : Boolean
/** Compares two Boolean expressions and returns `true` if they evaluate to the same value.
*
* `a == b` returns `true` if and only if
* - `a` and `b` are `true` or
* - `a` and `b` are `false`.
*/
def ==(x: Boolean): Boolean
/**
* Compares two Boolean expressions and returns `true` if they evaluate to a different value.
*
* `a != b` returns `true` if and only if
* - `a` is `true` and `b` is `false` or
* - `a` is `false` and `b` is `true`.
*/
def !=(x: Boolean): Boolean
/** Compares two Boolean expressions and returns `true` if one or both of them evaluate to true.
*
* `a || b` returns `true` if and only if
* - `a` is `true` or
* - `b` is `true` or
* - `a` and `b` are `true`.
*
* @note This method uses 'short-circuit' evaluation and
* behaves as if it was declared as `def ||(x: => Boolean): Boolean`.
* If `a` evaluates to `true`, `true` is returned without evaluating `b`.
*/
def ||(x: Boolean): Boolean
/** Compares two Boolean expressions and returns `true` if both of them evaluate to true.
*
* `a && b` returns `true` if and only if
* - `a` and `b` are `true`.
*
* @note This method uses 'short-circuit' evaluation and
* behaves as if it was declared as `def &&(x: => Boolean): Boolean`.
* If `a` evaluates to `false`, `false` is returned without evaluating `b`.
*/
def &&(x: Boolean): Boolean
// Compiler won't build with these seemingly more accurate signatures
// def ||(x: => Boolean): Boolean
// def &&(x: => Boolean): Boolean
/** Compares two Boolean expressions and returns `true` if one or both of them evaluate to true.
*
* `a | b` returns `true` if and only if
* - `a` is `true` or
* - `b` is `true` or
* - `a` and `b` are `true`.
*
* @note This method evaluates both `a` and `b`, even if the result is already determined after evaluating `a`.
*/
def |(x: Boolean): Boolean
/** Compares two Boolean expressions and returns `true` if both of them evaluate to true.
*
* `a & b` returns `true` if and only if
* - `a` and `b` are `true`.
*
* @note This method evaluates both `a` and `b`, even if the result is already determined after evaluating `a`.
*/
def &(x: Boolean): Boolean
/** Compares two Boolean expressions and returns `true` if they evaluate to a different value.
*
* `a ^ b` returns `true` if and only if
* - `a` is `true` and `b` is `false` or
* - `a` is `false` and `b` is `true`.
*/
def ^(x: Boolean): Boolean
// Provide a more specific return type for Scaladoc
override def getClass(): Class[Boolean] = ???
""".trim.lines.toList
def objectLines = interpolate(allCompanions + "\\n" + nonUnitCompanions).lines.toList
}
object U extends AnyValRep("Unit", None, "void") {
override def classDoc = """
/** `Unit` is a subtype of [[scala.AnyVal]]. There is only one value of type
* `Unit`, `()`, and it is not represented by any object in the underlying
* runtime system. A method with return type `Unit` is analogous to a Java
* method which is declared `void`.
*/
"""
def classLines = List(
"// Provide a more specific return type for Scaladoc",
"override def getClass(): Class[Unit] = ???"
)
def objectLines = interpolate(allCompanions).lines.toList
override def boxUnboxInterpolations = Map(
"@boxRunTimeDoc@" -> "",
"@unboxRunTimeDoc@" -> "",
"@unboxDoc@" -> "the Unit value ()",
"@boxImpl@" -> "scala.runtime.BoxedUnit.UNIT",
"@unboxImpl@" -> "x.asInstanceOf[scala.runtime.BoxedUnit]"
)
}
def isSubrangeType = Set(B, S, C)
def isIntegerType = Set(B, S, C, I, L)
def isFloatingType = Set(F, D)
def isWideType = Set(L, D)
def cardinal = numeric filter isIntegerType
def numeric = List(B, S, C, I, L, F, D)
def values = List(U, Z) ++ numeric
def make() = values map (x => (x.name, x.make()))
}
object GenerateAnyVals {
def run(outDir: java.io.File) {
val av = new GenerateAnyVals
av.make() foreach { case (name, code ) =>
val file = new java.io.File(outDir, name + ".scala")
sbt.IO.write(file, code, java.nio.charset.Charset.forName("UTF-8"), false)
}
}
}
| shimib/scala | project/GenerateAnyVals.scala | Scala | bsd-3-clause | 19,496 |
/*
* The MIT License
*
* Copyright (c) 2018 Fulcrum Genomics
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package com.fulcrumgenomics.commons.async
import com.fulcrumgenomics.commons.io.Writer
import scala.collection.mutable.ListBuffer
/** Writer useful for testing. It stores when close() is called, and all items written. */
private[async] class StringWriter(var block: Boolean = false) extends Writer[String] {
var closed: Boolean = false
var items: ListBuffer[String] = new ListBuffer[String]()
def write(item: String): Unit = {
while (this.block) {
Thread.sleep(10)
}
items += item
}
def close(): Unit = this.closed = true
}
| fulcrumgenomics/commons | src/test/scala/com/fulcrumgenomics/commons/async/StringWriter.scala | Scala | mit | 1,703 |
// These are meant to be typed into the REPL. You can also run
// scala -Xnojline < repl-session.scala to run them all at once.
Set(2, 0, 1) + 1
Set(2, 0, 1) + 4
for (i <- Set(1, 2, 3, 4, 5, 6)) print(i + " ")
val weekdays = scala.collection.mutable.LinkedHashSet("Mo", "Tu", "We", "Th", "Fr")
collection.immutable.SortedSet(1, 2, 3, 4, 5, 6)
val digits = Set(1, 7, 2, 9)
digits contains 0 // false
Set(1, 2) subsetOf digits // true
val primes = Set(2, 3, 5, 7)
digits union primes
digits & primes
digits -- primes
| yeahnoob/scala-impatient-2e-code | src/ch13/sec05/repl-session.scala | Scala | gpl-3.0 | 524 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples
import scala.math.random
import org.apache.spark._
import org.apache.spark.SparkContext._
object LocalPi {
def main(args: Array[String]) {
var count = 0
for (i <- 1 to 100000) {
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y < 1) count += 1
}
println("Pi is roughly " + 4 * count / 100000.0)
}
}
| javachen/learning-spark | src/main/scala/org/apache/spark/examples/LocalPi.scala | Scala | apache-2.0 | 1,186 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.atomic
/** Atomic references wrapping `Int` values.
*
* Note that the equality test in `compareAndSet` is value based,
* since `Int` is a primitive.
*/
final class AtomicInt private[atomic]
(initialValue: Int) extends AtomicNumber[Int] {
private[this] var ref = initialValue
def getAndSet(update: Int): Int = {
val current = ref
ref = update
current
}
def compareAndSet(expect: Int, update: Int): Boolean = {
if (ref == expect) {
ref = update
true
}
else
false
}
def set(update: Int): Unit = {
ref = update
}
def get: Int = ref
def getAndSubtract(v: Int): Int = {
val c = ref
ref = ref - v
c
}
def subtractAndGet(v: Int): Int = {
ref = ref - v
ref
}
def subtract(v: Int): Unit = {
ref = ref - v
}
def getAndAdd(v: Int): Int = {
val c = ref
ref = ref + v
c
}
def getAndIncrement(v: Int = 1): Int = {
val c = ref
ref = ref + v
c
}
def addAndGet(v: Int): Int = {
ref = ref + v
ref
}
def incrementAndGet(v: Int = 1): Int = {
ref = ref + v
ref
}
def add(v: Int): Unit = {
ref = ref + v
}
def increment(v: Int = 1): Unit = {
ref = ref + v
}
def decrement(v: Int = 1): Unit = increment(-v)
def decrementAndGet(v: Int = 1): Int = incrementAndGet(-v)
def getAndDecrement(v: Int = 1): Int = getAndIncrement(-v)
}
/** @define createDesc Constructs an [[AtomicInt]] reference, allowing
* for fine-tuning of the created instance.
*
* A [[PaddingStrategy]] can be provided in order to counter
* the "false sharing" problem.
*
* Note that for ''Scala.js'' we aren't applying any padding,
* as it doesn't make much sense, since Javascript execution
* is single threaded, but this builder is provided for
* syntax compatibility anyway across the JVM and Javascript
* and we never know how Javascript engines will evolve.
*/
object AtomicInt {
/** Builds an [[AtomicInt]] reference.
*
* @param initialValue is the initial value with which to initialize the atomic
*/
def apply(initialValue: Int): AtomicInt =
new AtomicInt(initialValue)
/** $createDesc
*
* @param initialValue is the initial value with which to initialize the atomic
* @param padding is the [[PaddingStrategy]] to apply
*/
def withPadding(initialValue: Int, padding: PaddingStrategy): AtomicInt =
new AtomicInt(initialValue)
/** $createDesc
*
* Also this builder on top Java 8 also allows for turning off the
* Java 8 intrinsics, thus forcing usage of CAS-loops for
* `getAndSet` and for `getAndAdd`.
*
* @param initialValue is the initial value with which to initialize the atomic
* @param padding is the [[PaddingStrategy]] to apply
* @param allowPlatformIntrinsics is a boolean parameter that specifies whether
* the instance is allowed to use the Java 8 optimized operations
* for `getAndSet` and for `getAndAdd`
*/
def create(initialValue: Int, padding: PaddingStrategy, allowPlatformIntrinsics: Boolean): AtomicInt =
new AtomicInt(initialValue)
/** $createDesc
*
* This builder guarantees to construct a safe atomic reference that
* does not make use of `sun.misc.Unsafe`. On top of platforms that
* don't support it, notably some versions of Android or on top of
* the upcoming Java 9, this might be desirable.
*
* NOTE that explicit usage of this builder is not usually necessary
* because [[create]] can auto-detect whether the underlying platform
* supports `sun.misc.Unsafe` and if it does, then its usage is
* recommended, because the "safe" atomic instances have overhead.
*
* @param initialValue is the initial value with which to initialize the atomic
* @param padding is the [[PaddingStrategy]] to apply
*/
def safe(initialValue: Int, padding: PaddingStrategy): AtomicInt =
new AtomicInt(initialValue)
} | ddworak/monix | monix-execution/js/src/main/scala/monix/execution/atomic/AtomicInt.scala | Scala | apache-2.0 | 4,733 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.