code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package rpgboss.player
import com.badlogic.gdx.Game
import java.io.File
import rpgboss.model._
import rpgboss.model.resource._
import com.badlogic.gdx.ApplicationListener
import com.badlogic.gdx.utils.Logger
import com.badlogic.gdx.graphics._
import com.badlogic.gdx.Gdx
import com.badlogic.gdx.graphics.g2d._
import rpgboss.lib._
import rpgboss.player.entity._
import com.badlogic.gdx.graphics.Texture.TextureFilter
import java.util.concurrent.Executors
import com.badlogic.gdx.assets.AssetManager
import com.badlogic.gdx.assets.loaders.FileHandleResolver
import rpgboss.model.resource.RpgAssetManager
import java.lang.Thread.UncaughtExceptionHandler
import scala.concurrent.Promise
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import rpgboss.lib.GdxUtils
import com.badlogic.gdx.Screen
import rpgboss.save.SaveFile
import rpgboss.model.battle.Battle
import rpgboss.model.battle.RandomEnemyAI
case class MutableMapLoc(
var map: String = "",
var x: Float = 0,
var y: Float = 0) {
def this(other: MapLoc) = this(other.map, other.x, other.y)
def set(other: MapLoc) = {
this.map = other.map
this.x = other.x
this.y = other.y
}
def set(other: MutableMapLoc) = {
this.map = other.map
this.x = other.x
this.y = other.y
}
}
class RpgGame(gamepath: File)
extends Game
with HasScriptConstants
with ThreadChecked {
val project = Project.readFromDisk(gamepath).get
val logger = new Logger("Game", Logger.INFO)
val fps = new FPSLogger()
var startScreen: StartScreen = null
var mapScreen: MapScreen = null
var battleScreen: BattleScreen = null
var renderingOffForTesting = false
// Generate and pack sprites
val spritesets = Map() ++ Spriteset.list(project).map(
name => (name, Spriteset.readFromDisk(project, name)))
var atlasSprites: TextureAtlas = null
/*
* SpriteBatch manages its own matrices. By default, it sets its modelview
* matrix to the identity, and the projection matrix to an orthographic
* projection with its lower left corner of the screen at (0, 0) and its
* upper right corner at (Gdx.graphics.getWidth(), Gdx.graphics.getHeight())
*
* This makes the eye-coordinates the same as the screen-coordinates.
*
* If you'd like to specify your objects in some other space, simply
* change the projection and modelview (transform) matrices.
*/
var persistent: PersistentState = null
val assets = new RpgAssetManager(project)
override def getScreen() = super.getScreen().asInstanceOf[RpgScreen]
def create() = {
rebindToCurrentThread()
com.badlogic.gdx.utils.Timer.instance().start()
if (!renderingOffForTesting)
atlasSprites = GdxUtils.generateSpritesTextureAtlas(spritesets.values)
persistent = new PersistentState()
// TODO: Make configurable screen pixel dimensions
startScreen = new StartScreen(this)
battleScreen =
new BattleScreen(Some(this), assets, atlasSprites, project,
project.data.startup.screenW, project.data.startup.screenH,
renderingOffForTesting)
mapScreen = new MapScreen(this)
beginGame()
}
def beginGame(): Unit = {
assertOnBoundThread()
setScreen(startScreen)
// Standard setting for transitions
persistent.setInt("useTransition",-1)
// Standard setting for menu enabled/disabled
persistent.setInt("menuEnabled",1)
startScreen.scriptFactory.runFromFile(
ResourceConstants.systemStartScript,
ResourceConstants.systemStartCall)
}
def loadUserMainScript = {
val customScript = Script.readFromDisk(project, ResourceConstants.mainScript)
if(customScript.newDataStream != null) {
mapScreen.scriptFactory.runFromFile(
ResourceConstants.mainScript,
"main()")
}
val timerScript = Script.readFromDisk(project, ResourceConstants.timerScript)
if(timerScript.newDataStream != null) {
mapScreen.scriptFactory.runFromFile(
ResourceConstants.timerScript)
}
}
/**
* Sets the members of the player's party. Controls the sprite for both
* walking on the map, as well as the party members in a battle.
*
* TODO: Figure out if this requires @partyArray to be non-empty.
*/
def setParty(partyArray: Array[Int]) = {
assertOnBoundThread()
persistent.setIntArray(PARTY, partyArray)
}
def startNewGame() = {
assertOnBoundThread()
persistent.setInt(EVENTS_ENABLED, 1)
persistent.setInt(MENU_ENABLED, 1)
setParty(project.data.startup.startingParty.toArray)
initializeOrNormalizeArrays()
setPlayerLoc(project.data.startup.startingLoc)
mapScreen.windowManager.setTransition(0, 1.0f)
setScreen(mapScreen)
loadUserMainScript
}
def initializeOrNormalizeArrays() = {
var characters = project.data.enums.characters.toArray
persistent.setStringArrayNoOverwrite(
CHARACTER_NAMES, characters.map(_.name))
persistent.setIntArrayNoOverwrite(
CHARACTER_LEVELS, characters.map(_.initLevel))
val characterStats = for (c <- characters)
yield BattleStats(project.data, c.baseStats(project.data, c.initLevel),
c.startingEquipment)
persistent.setIntArrayNoOverwrite(CHARACTER_HPS, characterStats.map(_.mhp))
persistent.setIntArrayNoOverwrite(CHARACTER_MPS, characterStats.map(_.mmp))
persistent.setIntArrayNoOverwrite(CHARACTER_EXPS, characters.map(x => 0))
persistent.setIntArrayNoOverwrite(CHARACTER_ROWS, characters.map(x => 0))
}
def saveGame(slot: Int) = {
assertOnBoundThread()
mapScreen.persistPlayerLocation()
SaveFile.write(persistent.toSerializable, project, slot)
}
def loadGame(slot: Int) = {
assertOnBoundThread()
val save = SaveFile.read(project, slot)
assert(save.isDefined)
persistent = new PersistentState(save.get)
// Fix up menu and event enabled / disabled for legacy save games.
if (persistent.hasInt(EVENTS_ENABLED))
persistent.setInt(EVENTS_ENABLED, 1)
if (persistent.hasInt(MENU_ENABLED))
persistent.setInt(MENU_ENABLED, 1)
initializeOrNormalizeArrays()
setParty(persistent.getIntArray(PARTY))
// Restore player location.
setPlayerLoc(persistent.getLoc(PLAYER_LOC))
mapScreen.windowManager.setTransition(0, 1.0f)
setScreen(mapScreen)
loadUserMainScript
}
def setPlayerLoc(loc: MapLoc) = {
persistent.setLoc(PLAYER_LOC, loc)
if (mapScreen != null)
mapScreen.setPlayerLoc(loc)
}
def startBattle(encounterId: Int): Unit = {
assertOnBoundThread()
assert(encounterId >= 0)
assert(encounterId < project.data.enums.encounters.length)
val currentScreen = getScreen()
if (currentScreen == battleScreen)
return
// Fade out map
currentScreen.windowManager.setTransition(1, 0.6f)
currentScreen.windowManager.runAfterTransition(() => {
setScreen(battleScreen)
battleScreen.windowManager.setTransition(0, 0.6f)
val encounter = project.data.enums.encounters(encounterId)
val battle = new Battle(
project.data,
persistent.getIntArray(PARTY),
persistent.getPartyParameters(project.data.enums.characters),
encounter,
aiOpt = Some(new RandomEnemyAI))
val (battleBackground, battleMusic) =
mapScreen.mapAndAssetsOption.map { mapAndAssets =>
(mapAndAssets.battleBackground, mapAndAssets.battleMusic)
} getOrElse {
("", None)
}
battleScreen.startBattle(battle, battleBackground)
if (!battleMusic.isEmpty) {
battleScreen.playMusic(
0,
battleMusic,
true,
Transitions.fadeLength)
}
})
}
def setWindowskin(windowskinPath: String) = {
battleScreen.windowManager.setWindowskin(windowskinPath)
mapScreen.windowManager.setWindowskin(windowskinPath)
startScreen.windowManager.setWindowskin(windowskinPath)
}
def quit() {
assertOnBoundThread()
Gdx.app.exit()
}
def gameOver() {
battleScreen.reset()
mapScreen.reset()
startScreen.reset()
// TODO: Ghetto but effective to just land on the start screen again.
beginGame()
}
override def dispose() {
assertOnBoundThread()
battleScreen.dispose()
mapScreen.dispose()
if (atlasSprites != null)
atlasSprites.dispose()
assets.dispose()
super.dispose()
}
} | DrDub/rpgboss | core/src/main/scala/rpgboss/player/RpgGame.scala | Scala | agpl-3.0 | 8,387 |
/*
* Copyright © 2014 TU Berlin (emma@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package lib.ml.classification
import api._
import lib.linalg._
import lib.ml._
import lib.ml.optimization._
class LogRegFlinkSpec extends LogRegSpec with FlinkAware {
// hyperparameters for the SGD solver
override val learningRate = 0.001
override val iterations = 5
override val miniBatchSize = 100
override val lambda = 0.25
// threshold for the mispredicted elements
override val maxMispredicted = 5
// initial weights for the test problems
override val weights = dense(Array(0.0, 0.0))
// initial seed for the test problems
override val seed = -8827055269646172160L
override def run(instances: Seq[LDPoint[Int, Double]])(weights: DVector, seed: Long): LinearModel =
withDefaultFlinkEnv(implicit flink => emma.onFlink {
val insts = DataBag(instances.map(x => x.copy(pos = dense(x.pos.values.drop(1)))))
val solve = solver.sgd[Int](
learningRate = learningRate,
iterations = iterations,
miniBatchSize = miniBatchSize,
lambda = lambda
)(
error.crossentropy,
regularization.l2
)(
weights
)(_)
logreg.train(insts, solve)
})
}
| emmalanguage/emma | emma-lib-flink/src/test/scala/org/emmalanguage/lib/ml/classification/LogRegFlinkSpec.scala | Scala | apache-2.0 | 1,804 |
package 四法
trait P2Num1 {
def method2(num2: P2Num2): P2Num1
}
case class P2Num1S(tail: P2Num1) extends P2Num1 {
override def method2(num2: P2Num2): P2Num1 = num2.method1(tail)
}
case object P2Num1T extends P2Num1 {
override def method2(num2: P2Num2): P2Num1 = P2Num1T
}
trait P2Num2 {
def method1(num1: P2Num1): P2Num1
}
case class P2Num2S(tail: P2Num2) extends P2Num2 {
override def method1(num1: P2Num1): P2Num1 = num1.method2(tail)
}
case object P2Num2T extends P2Num2 {
override def method1(num1: P2Num1): P2Num1 = num1
}
| djx314/ubw | a60-四/src/main/scala/四法/Counter2.scala | Scala | bsd-3-clause | 544 |
package graphique.backends
import graphique.UnitSpec
class ContentTest extends UnitSpec {
"detectMimeTypeE" should "detect mime types of images correctly" in {
(Content detectMimeType readResource("like_a_sir.jpg")).get should be("image/jpeg")
(Content detectMimeType readResource("ayam_soda.png")).get should be("image/png")
}
} | amrhassan/graphique | src/test/scala/graphique/backends/ContentTest.scala | Scala | mit | 344 |
package slick.test.lifted
import org.junit.Test
import org.junit.Assert._
/** Test case for the SQL schema support in table definitions */
class SchemaSupportTest {
@Test def testSchemaSupport {
import slick.jdbc.H2Profile.api._
class T(tag: Tag) extends Table[Int](tag, Some("myschema"), "mytable") {
def id = column[Int]("id")
def * = id
}
val ts = TableQuery[T]
val s1 = ts.filter(_.id < 5).result.statements.head
println(s1)
assertTrue("select ... from uses schema name", s1 contains """from "myschema"."mytable"""")
//val s2 = ts.insertStatement
//println(s2)
val s3 = ts.filter(_.id < 5).updateStatement
println(s3)
assertTrue("update uses schema name", s3 contains """update "myschema"."mytable"""")
val s4 = ts.filter(_.id < 5).delete.statements.head
println(s4)
assertTrue("delete uses schema name", s4 contains """delete from "myschema"."mytable"""")
val s5 = ts.schema.createStatements.toList
s5.foreach(println)
s5.foreach(s => assertTrue("DDL (create) uses schema name", s contains """ "myschema"."mytable""""))
val s6 = ts.schema.dropStatements.toList
s6.foreach(println)
s6.foreach(s => assertTrue("DDL (drop) uses schema name", s contains """ "myschema"."mytable""""))
}
}
| marko-asplund/slick | slick-testkit/src/test/scala/slick/test/lifted/SchemaSupportTest.scala | Scala | bsd-2-clause | 1,299 |
package org.jetbrains.plugins.scala
package lang.psi.controlFlow.impl
import com.intellij.psi.PsiNamedElement
import org.jetbrains.plugins.scala.lang.psi.controlFlow.ScControlFlowPolicy
/**
* Nikolay.Tropin
* 2014-04-14
*/
object AllVariablesControlFlowPolicy extends ScControlFlowPolicy {
override def isElementAccepted(named: PsiNamedElement): Boolean = true
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/controlFlow/impl/AllVariablesControlFlowPolicy.scala | Scala | apache-2.0 | 370 |
package org.genericConfig.admin.client.views
import org.genericConfig.admin.client.controllers.listner.Mouse
import org.genericConfig.admin.client.controllers.websocket.ActionsForClient
import org.genericConfig.admin.client.views.html.{HtmlElementIds, HtmlElementText}
import org.genericConfig.admin.shared.Actions
import org.genericConfig.admin.shared.common.ErrorDTO
import org.scalajs.jquery.{JQuery, jQuery}
import util.CommonFunction
class StartPage() extends CommonFunction{
def drawStartPage(errors : Option[List[ErrorDTO]] = None) : Unit = {
cleanPage
val main : JQuery = HtmlElementText.mainPage("Administrator für generischer Konfigurator")
val jQueryInputFieldUsername : JQuery = HtmlElementText.drawInputField("username", "Benutzername")
val jQueryInputFieldPassword : JQuery= HtmlElementText.drawInputField("password", "Password", typeofInput = "password")
val jQueryButtonLogin : JQuery = HtmlElementText.drawButton("login", "Anmelden")
val jQueryButtonRegister : JQuery = HtmlElementText.drawButton("register", "Neuer Benutzer hinzufügen")
main.appendTo(jQuery(HtmlElementIds.section))
jQueryInputFieldUsername.appendTo(main)
jQueryInputFieldPassword.appendTo(main)
jQueryButtonLogin.appendTo(main)
jQueryButtonRegister.appendTo(main)
new Mouse().mouseClick(jQueryButtonLogin, Actions.GET_USER)
new Mouse().mouseClick(jQueryButtonRegister, ActionsForClient.REGISTER_PAGE)
}
}
| gennadij/admin | client/src/main/scala/org/genericConfig/admin/client/views/StartPage.scala | Scala | apache-2.0 | 1,458 |
package com.twitter.finagle
import com.twitter.conversions.storage._
import com.twitter.finagle.client._
import com.twitter.finagle.dispatch.SerialServerDispatcher
import com.twitter.finagle.httpx.codec.{HttpClientDispatcher, HttpServerDispatcher}
import com.twitter.finagle.httpx.filter.DtabFilter
import com.twitter.finagle.httpx.{
HttpTransport, HttpServerTraceInitializer, HttpClientTraceInitializer,
Request, Response
}
import com.twitter.finagle.netty3._
import com.twitter.finagle.param.Stats
import com.twitter.finagle.server._
import com.twitter.finagle.ssl.Ssl
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.tracing._
import com.twitter.util.{Future, StorageUnit}
import java.net.{InetSocketAddress, SocketAddress}
import org.jboss.netty.channel.Channel
/**
* A rich client with a *very* basic URL fetcher. (It does not handle
* redirects, does not have a cookie jar, etc.)
*/
trait HttpxRichClient { self: Client[Request, Response] =>
def fetchUrl(url: String): Future[Response] = fetchUrl(new java.net.URL(url))
def fetchUrl(url: java.net.URL): Future[Response] = {
val addr = {
val port = if (url.getPort < 0) url.getDefaultPort else url.getPort
new InetSocketAddress(url.getHost, port)
}
val group = Group[SocketAddress](addr)
val req = httpx.RequestBuilder().url(url).buildGet()
val service = newClient(group).toService
service(req) ensure {
service.close()
}
}
}
/**
* Http protocol support, including client and server.
*/
object Httpx extends Client[Request, Response] with HttpxRichClient
with Server[Request, Response] {
object param {
case class MaxRequestSize(size: StorageUnit)
implicit object MaxRequestSize extends Stack.Param[MaxRequestSize] {
val default = MaxRequestSize(5.megabytes)
}
case class MaxResponseSize(size: StorageUnit)
implicit object MaxResponseSize extends Stack.Param[MaxResponseSize] {
val default = MaxResponseSize(5.megabytes)
}
case class Streaming(enabled: Boolean)
implicit object Streaming extends Stack.Param[Streaming] {
val default = Streaming(false)
}
private[Httpx] def applyToCodec(
params: Stack.Params, codec: httpx.Http): httpx.Http =
codec
.maxRequestSize(params[MaxRequestSize].size)
.maxResponseSize(params[MaxResponseSize].size)
.streaming(params[Streaming].enabled)
}
object Client {
val stack: Stack[ServiceFactory[Request, Response]] = StackClient.newStack
}
case class Client(
stack: Stack[ServiceFactory[Request, Response]] = Client.stack.replace(
TraceInitializerFilter.role, new HttpClientTraceInitializer[Request, Response]),
params: Stack.Params = StackClient.defaultParams
) extends StdStackClient[Request, Response, Client] {
protected type In = Any
protected type Out = Any
protected def newTransporter(): Transporter[Any, Any] = {
val com.twitter.finagle.param.Label(label) = params[com.twitter.finagle.param.Label]
val codec = param.applyToCodec(params, httpx.Http())
.client(ClientCodecConfig(label))
val Stats(stats) = params[Stats]
val newTransport = (ch: Channel) => codec.newClientTransport(ch, stats)
Netty3Transporter(
codec.pipelineFactory,
params + Netty3Transporter.TransportFactory(newTransport))
}
protected def copy1(
stack: Stack[ServiceFactory[Request, Response]] = this.stack,
params: Stack.Params = this.params
): Client = copy(stack, params)
protected def newDispatcher(transport: Transport[Any, Any]): Service[Request, Response] =
new HttpClientDispatcher(transport)
def withTls(cfg: Netty3TransporterTLSConfig): Client =
configured((Transport.TLSClientEngine(Some(cfg.newEngine))))
.configured(Transporter.TLSHostname(cfg.verifyHost))
.transformed { stk => httpx.TlsFilter.module +: stk }
def withTls(hostname: String): Client =
withTls(new Netty3TransporterTLSConfig({
case inet: InetSocketAddress => Ssl.client(hostname, inet.getPort)
case _ => Ssl.client()
}, Some(hostname)))
def withTlsWithoutValidation(): Client =
configured(Transport.TLSClientEngine(Some({
case inet: InetSocketAddress => Ssl.clientWithoutCertificateValidation(inet.getHostName, inet.getPort)
case _ => Ssl.clientWithoutCertificateValidation()
})))
def withMaxRequestSize(size: StorageUnit): Client =
configured(param.MaxRequestSize(size))
def withMaxResponseSize(size: StorageUnit): Client =
configured(param.MaxResponseSize(size))
}
val client = Client()
def newClient(dest: Name, label: String): ServiceFactory[Request, Response] =
client.newClient(dest, label)
case class Server(
stack: Stack[ServiceFactory[Request, Response]] =
StackServer.newStack.replace(
TraceInitializerFilter.role,
new HttpServerTraceInitializer[Request, Response]),
params: Stack.Params = StackServer.defaultParams
) extends StdStackServer[Request, Response, Server] {
protected type In = Any
protected type Out = Any
protected def newListener(): Listener[Any, Any] = {
val com.twitter.finagle.param.Label(label) = params[com.twitter.finagle.param.Label]
val httpPipeline =
param.applyToCodec(params, httpx.Http())
.server(ServerCodecConfig(label, new SocketAddress{}))
.pipelineFactory
Netty3Listener(httpPipeline, params)
}
protected def newDispatcher(transport: Transport[In, Out],
service: Service[Request, Response]) = {
val dtab = new DtabFilter.Finagle[Request]
val Stats(stats) = params[Stats]
new HttpServerDispatcher(new HttpTransport(transport), dtab andThen service, stats.scope("dispatch"))
}
protected def copy1(
stack: Stack[ServiceFactory[Request, Response]] = this.stack,
params: Stack.Params = this.params
): Server = copy(stack, params)
def withTls(cfg: Netty3ListenerTLSConfig): Server =
configured(Transport.TLSServerEngine(Some(cfg.newEngine)))
def withMaxRequestSize(size: StorageUnit): Server =
configured(param.MaxRequestSize(size))
def withMaxResponseSize(size: StorageUnit): Server =
configured(param.MaxResponseSize(size))
}
val server = Server()
def serve(addr: SocketAddress, service: ServiceFactory[Request, Response]): ListeningServer =
server.serve(addr, service)
}
| kristofa/finagle | finagle-httpx/src/main/scala/com/twitter/finagle/Httpx.scala | Scala | apache-2.0 | 6,516 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.util.UUID
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType
import org.apache.hadoop.hive.serde2.thrift.Type
import org.apache.hadoop.hive.serde2.thrift.Type._
import org.apache.hive.service.cli.OperationState
import org.apache.hive.service.cli.operation.GetTypeInfoOperation
import org.apache.hive.service.cli.session.HiveSession
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SQLContext
/**
* Spark's own GetTypeInfoOperation
*
* @param sqlContext SQLContext to use
* @param parentSession a HiveSession from SessionManager
*/
private[hive] class SparkGetTypeInfoOperation(
val sqlContext: SQLContext,
parentSession: HiveSession)
extends GetTypeInfoOperation(parentSession)
with SparkOperation
with Logging {
override def runInternal(): Unit = {
statementId = UUID.randomUUID().toString
val logMsg = "Listing type info"
logInfo(s"$logMsg with $statementId")
setState(OperationState.RUNNING)
// Always use the latest class loader provided by executionHive's state.
val executionHiveClassLoader = sqlContext.sharedState.jarClassLoader
Thread.currentThread().setContextClassLoader(executionHiveClassLoader)
if (isAuthV2Enabled) {
authorizeMetaGets(HiveOperationType.GET_TYPEINFO, null)
}
HiveThriftServer2.eventManager.onStatementStart(
statementId,
parentSession.getSessionHandle.getSessionId.toString,
logMsg,
statementId,
parentSession.getUsername)
try {
SparkGetTypeInfoUtil.supportedType.foreach(typeInfo => {
val rowData = Array[AnyRef](
typeInfo.getName, // TYPE_NAME
typeInfo.toJavaSQLType.asInstanceOf[AnyRef], // DATA_TYPE
typeInfo.getMaxPrecision.asInstanceOf[AnyRef], // PRECISION
typeInfo.getLiteralPrefix, // LITERAL_PREFIX
typeInfo.getLiteralSuffix, // LITERAL_SUFFIX
typeInfo.getCreateParams, // CREATE_PARAMS
typeInfo.getNullable.asInstanceOf[AnyRef], // NULLABLE
typeInfo.isCaseSensitive.asInstanceOf[AnyRef], // CASE_SENSITIVE
typeInfo.getSearchable.asInstanceOf[AnyRef], // SEARCHABLE
typeInfo.isUnsignedAttribute.asInstanceOf[AnyRef], // UNSIGNED_ATTRIBUTE
typeInfo.isFixedPrecScale.asInstanceOf[AnyRef], // FIXED_PREC_SCALE
typeInfo.isAutoIncrement.asInstanceOf[AnyRef], // AUTO_INCREMENT
typeInfo.getLocalizedName, // LOCAL_TYPE_NAME
typeInfo.getMinimumScale.asInstanceOf[AnyRef], // MINIMUM_SCALE
typeInfo.getMaximumScale.asInstanceOf[AnyRef], // MAXIMUM_SCALE
null, // SQL_DATA_TYPE, unused
null, // SQL_DATETIME_SUB, unused
typeInfo.getNumPrecRadix // NUM_PREC_RADIX
)
rowSet.addRow(rowData)
})
setState(OperationState.FINISHED)
} catch onError()
HiveThriftServer2.eventManager.onStatementFinish(statementId)
}
}
private[hive] object SparkGetTypeInfoUtil {
val supportedType: Seq[Type] = {
Seq(NULL_TYPE, BOOLEAN_TYPE, STRING_TYPE, BINARY_TYPE,
TINYINT_TYPE, SMALLINT_TYPE, INT_TYPE, BIGINT_TYPE,
FLOAT_TYPE, DOUBLE_TYPE, DECIMAL_TYPE,
DATE_TYPE, TIMESTAMP_TYPE,
ARRAY_TYPE, MAP_TYPE, STRUCT_TYPE, CHAR_TYPE, VARCHAR_TYPE,
INTERVAL_YEAR_MONTH_TYPE, INTERVAL_DAY_TIME_TYPE)
}
}
| ueshin/apache-spark | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTypeInfoOperation.scala | Scala | apache-2.0 | 4,199 |
package com.sksamuel.elastic4s.reindex
import com.sksamuel.elastic4s.script.ScriptDefinition
import com.sksamuel.elastic4s.searches.queries.QueryDefinition
import com.sksamuel.elastic4s.{Indexes, URLParameters}
import com.sksamuel.exts.OptionImplicits._
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy
import scala.concurrent.duration.FiniteDuration
case class ReindexDefinition(sourceIndexes: Indexes,
targetIndex: String,
targetType: Option[String] = None,
filter: Option[QueryDefinition] = None,
maxRetries: Option[Int] = None,
retryBackoffInitialTime: Option[FiniteDuration] = None,
shouldStoreResult: Option[Boolean] = None,
size: Option[Int] = None,
script: Option[ScriptDefinition] = None,
urlParams: URLParameters = URLParameters()
) {
def timeout(timeout: FiniteDuration): ReindexDefinition =
copy(urlParams = urlParams.timeout(timeout))
def refresh(refresh: RefreshPolicy): ReindexDefinition =
copy(urlParams = urlParams.refresh(refresh))
def requestsPerSecond(requestsPerSecond: Float): ReindexDefinition =
copy(urlParams = urlParams.requestsPerSecond(requestsPerSecond))
def waitForActiveShards(waitForActiveShards: Int): ReindexDefinition =
copy(urlParams = urlParams.waitForActiveShards(waitForActiveShards))
def waitForCompletion(waitForCompletion: Boolean): ReindexDefinition =
copy(urlParams = urlParams.waitForCompletion(waitForCompletion))
def urlParams(urlParams: URLParameters): ReindexDefinition = copy(urlParams = urlParams)
def filter(filter: QueryDefinition): ReindexDefinition = copy(filter = filter.some)
def maxRetries(maxRetries: Int): ReindexDefinition = copy(maxRetries = maxRetries.some)
def retryBackoffInitialTime(retryBackoffInitialTime: FiniteDuration): ReindexDefinition =
copy(retryBackoffInitialTime = retryBackoffInitialTime.some)
def size(size: Int): ReindexDefinition = copy(size = size.some)
def shouldStoreResult(shouldStoreResult: Boolean): ReindexDefinition =
copy(shouldStoreResult = shouldStoreResult.some)
def script(script: ScriptDefinition): ReindexDefinition = copy(script = script.some)
}
| aroundus-inc/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/reindex/ReindexDefinition.scala | Scala | apache-2.0 | 2,417 |
package creg.compiler
import org.scalatest._
import creg._
class SynonymGeneratorSpec extends FlatSpec {
"SynonymGenerator" should "generate a synonym for flat datatypes" in {
@data def Person = PersonT {
Boss
Manager(dept = Int)
Employee(name = String, dept = Int)
}
val boss : Person = Boss
val manager : Person = Manager(51)
val employee : Person = Employee("Julia O'Brien", 1984)
}
it should "generate synonyms for recursive datatypes" in {
@data def IntList = Fix(intList => IntListT {
Nil
Cons(head = Int, tail = intList)
})
val nil: IntList = Roll[IntListF](Nil)
def cons(x: Int, xs: IntList): IntList = Roll[IntListF](Cons(x, xs))
val xs: IntList = cons(1, cons(2, cons(3, cons(4, nil))))
info(s"xs = $xs")
}
it should "generate synonyms for generic recursive datatypes" in {
@data def GList[A] = Fix(gList => GListT {
Nil
Cons(head = A, tail = gList)
})
object InnerModuleForTechnicalReasons {
private[this] type GF[+A] = {
// covariance in inner type is possible because the synonym GF is local to this file
// technical detail: only private[this] works. private[SynonymSpec] does not work.
// this is the technical reason to have InnerModuleForTechnicalReasons.
type λ[+R] = GListF[A, R]
}
def nil[A]: GList[A] = Roll[GF[A]#λ](Nil)
def cons[A](x: A, xs: GList[A]): GList[A] = Roll[GF[A]#λ](Cons(x, xs))
}
import InnerModuleForTechnicalReasons._
val xs: GList[Int] = cons(1, cons(2, cons(3, cons(4, nil))))
info(s"xs = $xs")
}
it should "generate nested synonyms" in {
@data def Nat = NatT {
def Even = EvenT { Zero ; ESuc(pred = Odd) }
def Odd = OSuc(pred = Fix(even => EvenT { Zero ; ESuc(pred = OSuc(pred = even)) }))
}
// test that the constructors are properly tagged `Record` or `Variant`
import Fix.{Record, Variant}
implicitly[EvenT[Any, Any] <:< Variant]
implicitly[ESuc[Any] <:< Record]
implicitly[OSuc[Any] <:< Record]
// test that synonyms `Odd`, `Even` and `Natz are generated correctly
type MuEven = Fix[({ type λ[+even] = EvenT[Zero, ESuc[OSuc[even]]] })#λ]
implicitly[Odd =:= OSuc[MuEven]]
implicitly[Even =:= EvenT[Zero, ESuc[Odd]]]
implicitly[Nat =:= NatT[Even, Odd]]
}
it should "not generate unbound type names in nested synonyms" in {
@data def Rolled = Fix(rolled => { def Unrolled = Loop(get = rolled) })
implicitly[Rolled =:= Fix[RolledF]]
implicitly[Unrolled =:= Loop[Rolled]]
}
}
| yfcai/CREG | test/SynonymGeneratorSpec.scala | Scala | mit | 2,604 |
package uk.co.bbc.redux
import scala.xml._
import scala.io.Source
import org.apache.commons.httpclient._
import org.apache.commons.httpclient.methods._
import org.apache.commons.httpclient.params.HttpMethodParams
import org.apache.commons.httpclient.cookie.CookiePolicy
trait Http {
var httpClient:HttpClient = new HttpClient
/****************************************
* DOMAIN SPECIFIC GET REQUEST METHODS
****************************************/
protected def contentRequest[T] (url:String, block: NodeSeq => T) : T = {
var response:NodeSeq = getRequestWithXmlResponse(url, status => status match {
case 403 => throw new SessionInvalidException
case 404 => throw new ContentNotFoundException
case _ => otherHttpException(status)
})
block(response)
}
protected def userRequest[T] (url:String, block: NodeSeq => T) : T = {
var response:NodeSeq = getRequestWithXmlResponse(url, status => status match {
case 403 => throw new UserPasswordException
case 404 => throw new UserNotFoundException
case _ => otherHttpException(status)
})
block(response)
}
protected def otherHttpException(status:Int) = {
throw new ClientHttpException(status.toString)
}
/****************************************
* GENERIC GET REQUEST METHODS
****************************************/
protected def getRequestWithXmlResponse (url:String, error: Int => NodeSeq) : NodeSeq = {
getRequest(url, method => {
XML.load(method.getResponseBodyAsStream())
}, error)
}
protected def getRequestWithStringResponse (url:String, error: Int => String) : String = {
getRequest(url, method => {
var source:Source = Source.fromInputStream(method.getResponseBodyAsStream(), method.getResponseCharSet())
source.getLines().mkString("\\n")
}, error)
}
protected def getRequest[T] (url: String, success: GetMethod => T, error: Int => T) : T = {
getRequest(url, "", success, error)
}
protected def getRequest[T] (url: String, cookie:String, success: GetMethod => T, error: Int => T) : T = {
val method:GetMethod = new GetMethod(url)
method.getParams().setCookiePolicy(CookiePolicy.IGNORE_COOKIES)
method.setRequestHeader("Cookie", cookie)
val status:Int = httpClient.executeMethod(method)
try {
status match {
case 200 => success(method)
case _ => error(status)
}
} finally {
method.releaseConnection()
}
}
} | bbcsnippets/redux-client-scala | src/main/scala/uk/co/bbc/redux/Http.scala | Scala | apache-2.0 | 2,520 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd
import htsjdk.variant.variantcontext.writer.{
Options,
VariantContextWriterBuilder
}
import htsjdk.variant.vcf.VCFHeader
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{ FileSystem, Path }
/**
* Utility for writing VCF headers to a file.
*/
private[rdd] object VCFHeaderUtils {
/**
* Writes a vcf header to a file.
*
* @param header The header to write.
* @param path The path to write it to.
* @param conf The configuration to get the file system.
*/
def write(header: VCFHeader,
path: Path,
conf: Configuration) {
val fs = path.getFileSystem(conf)
write(header, path, fs)
}
/**
* Writes a vcf header to a file.
*
* @param header The header to write.
* @param path The path to write it to.
* @param fs The file system to write to.
*/
def write(header: VCFHeader,
path: Path,
fs: FileSystem) {
// get an output stream
val os = fs.create(path)
// build a vcw
val vcw = new VariantContextWriterBuilder()
.setOutputVCFStream(os)
.clearIndexCreator()
.unsetOption(Options.INDEX_ON_THE_FLY)
.build()
// write the header
vcw.writeHeader(header)
// close the writer
// vcw.close calls close on the underlying stream, see ADAM-1337
vcw.close()
}
}
| massie/adam | adam-core/src/main/scala/org/bdgenomics/adam/rdd/VCFHeaderUtils.scala | Scala | apache-2.0 | 2,171 |
package passera.test
import org.scalacheck._
import org.scalacheck.Prop._
import passera.unsigned._
object UnsignedCheck extends Properties("Unsigned") {
import Gen._
import Arbitrary.arbitrary
val zero = 0.toUInt
val one = 1.toUInt
def genUInt: Gen[UInt] = for (n <- arbitrary[Int]) yield UInt(n)
implicit def arbUInt: Arbitrary[UInt] = Arbitrary(genUInt)
property("int-toString") =
forAll { n: Int => n >= 0 ==> (n.toUInt.toString == n.toString) }
val nonNegLong = Gen.choose(0L, 0x00000000ffffffffL)
property("long-toString") =
forAll(nonNegLong){ n => n.toUInt.toString == n.toString }
property("toUInt->toInt inverses") =
forAll { (a: Int) => a.toUInt.toInt == a }
property("toInt->toUInt inverses") =
forAll { (a: UInt) => a.toInt.toUInt == a }
property("toUInt-toDouble") =
forAll { (a: Int) => (a >= 0) ==> (a.toUInt.toDouble == a.toDouble) }
property(">= 0") =
forAll { (a: UInt) => a >= zero }
property("+ commutes") =
forAll { (a: UInt, b: UInt) => a + b == b + a }
property("* commutes") =
forAll { (a: UInt, b: UInt) => a * b == b * a }
property("zero identity for +") =
forAll { (a: UInt, b: UInt) => a + zero == a }
property("one identity for *") =
forAll { (a: UInt, b: UInt) => a * one == a }
property("zero is zero *") =
forAll { (a: UInt, b: UInt) => a * zero == zero }
property("+ associates") =
forAll { (a: UInt, b: UInt, c: UInt) => a + (b + c) == (a + b) + c }
property("* distributes left") =
forAll { (a: UInt, b: UInt, c: UInt) => a * (b + c) == (a*b) + (a*c) }
property("* distributes right") =
forAll { (a: UInt, b: UInt, c: UInt) => (a + b) * c == (a*c) + (b*c) }
property("+ and -") =
forAll { (a: UInt, b: UInt) => a + (b - a) == b }
property("+ and - (2)") =
forAll { (a: UInt, b: UInt) => (b - a) + a == b }
property("/ and shift") =
forAll { (a: UInt) => a / 2.toUInt == a >>> 1 }
property("shift") =
forAll { (a: UInt) => a >> 1 == a >>> 1 }
property("zero frac") =
forAll { (a: UInt, b: UInt) => (a < b && b != 0) ==> ((a / b) == 0) }
property("nonzero frac") =
forAll { (a: UInt, b: UInt) => (a > b && b != 0) ==> ((a / b) > zero) }
property("qr") =
forAll { (a: UInt, b: UInt) => (b != 0) ==> {
val q = a / b
val r = a % b
q * b + r == a
} }
property("< and >") =
forAll { (a: UInt, b: UInt) => a < b == b > a }
property("<= and >=") =
forAll { (a: UInt, b: UInt) => a <= b == b >= a }
property("<= and < and ==") =
forAll { (a: UInt, b: UInt) => a <= b == (a < b || a == b) }
property(">= and > and ==") =
forAll { (a: UInt, b: UInt) => a >= b == (a > b || a == b) }
property("< and >= and !=") =
forAll { (a: UInt, b: UInt) => a < b == (a <= b && a != b) }
property("> and >= and !=") =
forAll { (a: UInt, b: UInt) => a > b == (a >= b && a != b) }
property("<= and ! >") =
forAll { (a: UInt, b: UInt) => a <= b == ! (a > b) }
property(">= and ! <") =
forAll { (a: UInt, b: UInt) => a >= b == ! (a < b) }
property("<< by Int") =
forAll { (a: Int, b: Int) => a.toUInt << (b & 0x1f) == (a << (b & 0x1f)).toUInt }
property("<< by Long") =
forAll { (a: Int, b: Long) => a.toUInt << (b & 0x1f) == (a << (b & 0x1f)).toUInt }
property("<< by UInt") =
forAll { (a: Int, b: Int) => a.toUInt << (b & 0x1f).toUInt == (a << (b & 0x1f)).toUInt }
property("<< by ULong") =
forAll { (a: Int, b: Long) => a.toUInt << (b & 0x1f).toULong == (a << (b & 0x1f)).toUInt }
property(">> by Int") =
forAll { (a: Int, b: Int) => a.toUInt >> (b & 0x1f) == (a >>> (b & 0x1f)).toUInt }
property(">> by Long") =
forAll { (a: Int, b: Long) => a.toUInt >> (b & 0x1f) == (a >>> (b & 0x1f)).toUInt }
property(">> by UInt") =
forAll { (a: Int, b: Int) => a.toUInt >> (b & 0x1f).toUInt == (a >>> (b & 0x1f)).toUInt }
property(">> by ULong") =
forAll { (a: Int, b: Long) => a.toUInt >> (b & 0x1f).toULong == (a >>> (b & 0x1f)).toUInt }
property(">>> by Int") =
forAll { (a: Int, b: Int) => a.toUInt >>> (b & 0x1f) == (a >>> (b & 0x1f)).toUInt }
property(">>> by Long") =
forAll { (a: Int, b: Long) => a.toUInt >>> (b & 0x1f) == (a >>> (b & 0x1f)).toUInt }
property(">>> by UInt") =
forAll { (a: Int, b: Int) => a.toUInt >>> (b & 0x1f).toUInt == (a >>> (b & 0x1f)).toUInt }
property(">>> by ULong") =
forAll { (a: Int, b: Long) => a.toUInt >>> (b & 0x1f).toULong == (a >>> (b & 0x1f)).toUInt }
property(">> and >>> equivalent") =
forAll { (a: Int, b: Int) => a.toUInt >> (b & 0x1f) == a.toUInt >>> (b & 0x1f) }
}
| nystrom/scala-unsigned | src/test/scala/passera/test/UnsignedCheck.scala | Scala | bsd-2-clause | 4,618 |
package com.gilt.aws.lambda
import com.amazonaws.{AmazonServiceException, AmazonClientException}
import com.amazonaws.services.identitymanagement.AmazonIdentityManagementClient
import com.amazonaws.services.identitymanagement.model.{CreateRoleRequest, Role}
private[lambda] object AwsIAM {
val BasicLambdaRoleName = "lambda_basic_execution"
lazy val iamClient = new AmazonIdentityManagementClient(AwsCredentials.provider)
def basicLambdaRole(): Option[Role] = {
import scala.collection.JavaConverters._
val existingRoles = iamClient.listRoles().getRoles.asScala
existingRoles.find(_.getRoleName == BasicLambdaRoleName)
}
def createBasicLambdaRole(): Result[RoleARN] = {
val createRoleRequest = {
val policyDocument = """{"Version":"2012-10-17","Statement":[{"Sid":"","Effect":"Allow","Principal":{"Service":"lambda.amazonaws.com"},"Action":"sts:AssumeRole"}]}"""
val c = new CreateRoleRequest
c.setRoleName(BasicLambdaRoleName)
c.setAssumeRolePolicyDocument(policyDocument)
c
}
try {
val result = iamClient.createRole(createRoleRequest)
Success(RoleARN(result.getRole.getArn))
} catch {
case ex @ (_ : AmazonClientException |
_ : AmazonServiceException) =>
Failure(ex)
}
}
}
| Jimdo/sbt-aws-lambda | src/main/scala/com/gilt/aws/lambda/AwsIAM.scala | Scala | apache-2.0 | 1,300 |
package com.toscaruntime.cli
import java.io.File
import java.nio.file.{Files, Path, Paths}
import java.util.function.{Consumer, Predicate}
import com.toscaruntime.cli.command._
import com.toscaruntime.cli.util.CompilationUtil
import com.toscaruntime.compiler.Compiler
import com.toscaruntime.exception.UnexpectedException
import com.toscaruntime.rest.client.ToscaRuntimeClient
import com.toscaruntime.util.DockerUtil
import sbt._
/**
* Entry point for the cli
*
* @author Minh Khang VU
*/
class ToscaRuntimeCLI extends xsbti.AppMain {
/** Defines the entry point for the application.
* The call to `initialState` sets up the application.
* The call to runLogged starts command processing. */
def run(configuration: xsbti.AppConfiguration): xsbti.MainResult =
MainLoop.runLogged(initialState(configuration))
def buildDockerClient(basedir: Path) = {
val existingConfiguration = UseCommand.getConfiguration(basedir)
if (existingConfiguration.nonEmpty) {
val url = existingConfiguration.get(DockerUtil.DOCKER_URL_KEY).get
val cert = existingConfiguration.getOrElse(DockerUtil.DOCKER_CERT_PATH_KEY, null)
val existingClient = new ToscaRuntimeClient(url, cert)
println(s"Begin to use docker daemon at [$url] with api version [${existingClient.dockerVersion}]")
existingClient
} else {
val defaultConfig = DockerUtil.getDefaultDockerDaemonConfig
if ("true".equals(System.getProperty("toscaruntime.clientMode"))) {
// Auto configure by copying default machine's certificates to provider default conf only when the flag toscaruntime.clientMode is set
// This will ensure that when we perform sbt build, the cert of the machine will not be copied to the build
UseCommand.switchConfiguration(defaultConfig.getUrl, defaultConfig.getCertPath, basedir)
}
new ToscaRuntimeClient(defaultConfig.getUrl, defaultConfig.getCertPath)
}
}
private def installCsar(path: Path, repositoryDir: Path) = {
println(s"Installing csar ${path.getFileName} to repository $repositoryDir")
val compilationResult = Compiler.install(path, repositoryDir)
CompilationUtil.showErrors(compilationResult)
if (!compilationResult.isSuccessful) {
throw new UnexpectedException(s"Csar compilation failed for $path")
} else {
println(s"Installed csar ${path.getFileName} to repository $repositoryDir")
}
}
/** Sets up the application by constructing an initial State instance with the supported commands
* and initial commands to run. See the State API documentation for details. */
def initialState(configuration: xsbti.AppConfiguration): State = {
val commandDefinitions = Seq(
CsarsCommand.instance,
DeploymentsCommand.instance,
UseCommand.instance,
UseCommand.useDefaultInstance,
BootStrapCommand.instance,
TeardownCommand.instance,
AgentsCommand.instance,
BasicCommands.shell,
BasicCommands.history,
BasicCommands.nop,
BasicCommands.help,
BasicCommands.exit)
val basedir = Paths.get(System.getProperty("toscaruntime.basedir", System.getProperty("user.dir") + "/..")).toAbsolutePath
val osName = System.getProperty("os.name")
println(s"Starting tosca runtime cli on [$osName] operating system from [$basedir]")
val attributes = AttributeMap(
AttributeEntry(Attributes.clientAttribute, buildDockerClient(basedir)),
AttributeEntry(Attributes.basedirAttribute, basedir)
)
// FIXME Installing normative types and bootstrap types in repository should be done in the build and not in the code
val repositoryDir = basedir.resolve("repository")
if (!"true".equals(System.getProperty("toscaruntime.clientMode"))) {
val csarsDir = basedir.resolve("csars")
installCsar(csarsDir.resolve("tosca-normative-types-master"), repositoryDir)
Files.list(csarsDir).filter(new Predicate[Path] {
override def test(path: Path) = !path.getFileName.toString.startsWith("tosca-normative-types")
}).forEach(new Consumer[Path] {
override def accept(path: Path) = {
installCsar(path, repositoryDir)
}
})
}
val workDir = basedir.resolve("work")
if (!Files.exists(workDir)) {
Files.createDirectories(workDir)
}
var commands = configuration.arguments().toSeq
if(commands.isEmpty) commands = Seq("shell")
State(configuration, commandDefinitions, Set.empty, None, commands, State.newHistory, attributes, initialGlobalLogging, State.Continue)
}
/** Configures logging to log to a temporary backing file as well as to the console.
* An application would need to do more here to customize the logging level and
* provide access to the backing file (like sbt's last command and logLevel setting). */
/** The common interface to standard output, used for all built-in ConsoleLoggers. */
def initialGlobalLogging: GlobalLogging = GlobalLogging.initial(MainLogging.globalDefault(ConsoleOut.systemOut), File.createTempFile("toscaruntime", ".log"), ConsoleOut.systemOut)
}
| vuminhkh/tosca-runtime | cli/src/main/scala/com/toscaruntime/cli/ToscaRuntimeCLI.scala | Scala | mit | 5,099 |
package shield.actors
import java.util.concurrent.TimeoutException
import akka.actor.{Actor, ActorLogging, PoisonPill, Props}
import akka.pattern.{AskTimeoutException, CircuitBreaker}
import shield.actors.config.WeightWatcherMsgs.{SetTargetWeights, SetWeights}
import shield.actors.config.{ProxyState, UpstreamAggregatorMsgs, WeightWatcher}
import shield.config._
import shield.implicits.FutureUtil
import shield.metrics.Instrumented
import shield.proxying.{HttpProxyLogic, ProxiedResponse, ProxyRequest}
import shield.routing._
import shield.swagger.{SwaggerDetails, SwaggerFetcher}
import spray.client.pipelining.SendReceive
import spray.http.{HttpResponse, StatusCodes}
import spray.http.StatusCodes.ServerError
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._
import scala.util.{Failure, Success}
object HostProxyMsgs {
case object ShutdownProxy
case class FetchSucceeded(details: SwaggerDetails)
case class FetchFailed(error: Throwable)
case object RetryFetch
case class EndpointBreakerOpen(endpoint: EndpointTemplate)
case class EndpointBreakerHalfOpen(endpoint: EndpointTemplate)
case class EndpointBreakerClosed(endpoint: EndpointTemplate)
case object HostBreakerOpen
case object HostBreakerHalfOpen
case object HostBreakerClosed
}
object HostProxy {
case class UpstreamFailedResponse(response: HttpResponse) extends RuntimeException
def props(serviceLocation: ServiceLocation, proxyType: ServiceType, localServiceLocation: ServiceLocation, pipeline: SendReceive) : Props = Props(new HostProxy(serviceLocation, proxyType, localServiceLocation, pipeline))
}
// todo: pull out the common bits of logic that will be reused by future host proxy types
class HostProxy(val serviceLocation: ServiceLocation, proxyType: ServiceType, localServiceLocation: ServiceLocation, pipeline: SendReceive) extends Actor with ActorLogging with RestartLogging with Instrumented {
import context.dispatcher
import context.become
import HostProxy._
import HostProxyMsgs._
val settings = Settings(context.system)
def buildFetcher() = proxyType.fetcher(pipeline, settings)
val fetcher = buildFetcher
var serviceName = "(unknown)"
var serviceVersion = "(unknown)"
def broadcastState(endpoints: Map[EndpointTemplate, EndpointDetails], healthy: Boolean, status: String, unhealthyEndpoints: Set[EndpointTemplate]) =
context.parent ! UpstreamAggregatorMsgs.StateUpdated(serviceLocation, ProxyState(serviceName, serviceVersion, proxyType, self, endpoints, healthy, status, unhealthyEndpoints))
def shutdown(endpoints: Map[EndpointTemplate, EndpointDetails]) : Actor.Receive = {
{
// we should have broadcasted that this host doesn't support any endpoints, so we should only get a few of these trickling in until the router gets updated.
case ProxyRequest(template, request) =>
if (!endpoints.contains(template)) {
log.error(s"HostProxy received request for an unsupported endpoint $serviceLocation $template")
} else {
val _sender = sender()
pipeline(HttpProxyLogic.scrubRequest(request))
.map(r => _sender ! ProxiedResponse(serviceLocation, serviceName, template, endpoints(template).params, r))
.andThen(FutureUtil.logFailure("HostProxy::drainingUpstream"))
}
case e => log.warning(s"Unexpected message during HostProxy.shutdown: $e")
}
}
def active(endpoints: Map[EndpointTemplate, EndpointDetails]) : Actor.Receive = {
// todo: make the breakers adaptive. maxfailures = constrain(lowconstant, .05*throughput_rate, highconstant), callTimeout=constrain(lowconstant, latencyPercentile[95], highconstant) nb- stats are per endpoint
// todo: config drive the constants
// todo: gauges per CircuitBreaker
val hostBreaker = new CircuitBreaker(context.system.scheduler, maxFailures=50, callTimeout=10.seconds, resetTimeout=10.seconds)
.onOpen(self ! HostBreakerOpen)
.onHalfOpen(self ! HostBreakerHalfOpen)
.onClose(self ! HostBreakerClosed)
val endpointBreakers = endpoints.keys.map(endpointTemplate => endpointTemplate -> new CircuitBreaker(context.system.scheduler, maxFailures=25, callTimeout=10.seconds, resetTimeout=10.seconds)
.onOpen(self ! EndpointBreakerOpen(endpointTemplate))
.onHalfOpen(self ! EndpointBreakerHalfOpen(endpointTemplate))
.onClose(self ! EndpointBreakerClosed(endpointTemplate))
).toMap
var hostOpen = false
var openEndpoints = Set[EndpointTemplate]()
def broadcastActiveState() = {
val status = if (hostOpen) {
"serving - host breaker tripped open"
} else if (openEndpoints.nonEmpty) {
s"serving - ${openEndpoints.size} endpoint breakers tripped open"
} else {
"serving"
}
// we're healthy if at least one endpoint on this host is still serving
val healthy = !hostOpen && !(openEndpoints.size == endpoints.size)
broadcastState(endpoints, healthy, status, if (hostOpen) endpoints.keySet else openEndpoints)
}
{
case ProxyRequest(template, request) =>
if (!endpoints.contains(template)) {
log.error(s"HostProxy received request for an unsupported endpoint $serviceLocation $template")
} else {
val _sender = sender()
val scrubbed = HttpProxyLogic.scrubRequest(request)
// endpoint breaker on the outside, we don't want an open endpoint to open the entire host
val responseFuture = endpointBreakers(template).withCircuitBreaker {
hostBreaker.withCircuitBreaker {
pipeline(scrubbed).flatMap { res =>
// convert 5XX to failed future here so it counts as a strike against the circuit breakers
if (res.status.isInstanceOf[ServerError])
Future.failed(UpstreamFailedResponse(res))
else
Future.successful(res)
}
}
}
val logged = responseFuture.andThen(FutureUtil.logFailure("HostProxy::rawUpstream", {
case _ : UpstreamFailedResponse =>
case _ : AskTimeoutException =>
}))
val recovered : Future[ProxiedResponse] = logged.map(r => ProxiedResponse(serviceLocation, serviceName, template, endpoints(template).params, r))
.recover {
case UpstreamFailedResponse(resp) => ProxiedResponse(serviceLocation, serviceName, template, endpoints(template).params, resp)
case _ : AskTimeoutException => ProxiedResponse(localServiceLocation, settings.LocalServiceName, template, endpoints(template).params, HttpResponse(StatusCodes.GatewayTimeout))
case _ => ProxiedResponse(localServiceLocation, settings.LocalServiceName, template, endpoints(template).params, HttpResponse(StatusCodes.ServiceUnavailable))
}
recovered
.map(pr => _sender ! pr)
.andThen(FutureUtil.logFailure("HostProxy::finalUpstream"))
}
case EndpointBreakerClosed(endpoint) =>
log.warning(s"Breaker closed for endpoint $endpoint on host $serviceLocation")
openEndpoints -= endpoint
broadcastActiveState()
case EndpointBreakerHalfOpen(endpoint) =>
log.warning(s"Breaker half opened for endpoint $endpoint on host $serviceLocation")
openEndpoints -= endpoint
broadcastActiveState()
case EndpointBreakerOpen(endpoint) =>
log.warning(s"Breaker opened for endpoint $endpoint on host $serviceLocation")
openEndpoints += endpoint
broadcastActiveState()
case HostBreakerClosed =>
log.warning(s"Breaker closed for host $serviceLocation")
hostOpen = false
broadcastActiveState()
case HostBreakerHalfOpen =>
log.warning(s"Breaker half opened for host $serviceLocation")
hostOpen = false
broadcastActiveState()
case HostBreakerOpen =>
log.warning(s"Breaker opened for host $serviceLocation")
hostOpen = true
broadcastActiveState()
case ShutdownProxy =>
// stop advertising support for all endpoints so they re-route or shed
broadcastState(Map.empty, healthy=true, "draining", Set.empty)
// todo: use max request config time
context.system.scheduler.scheduleOnce(60.seconds, self, PoisonPill)
// keep internal support for the endpoints for any in-flight messages to this actor
become(shutdown(endpoints))
case e => log.warning(s"Unexpected message during HostProxy.active: $e")
}
}
def fetchDocs() = {
val fetchPromise = Promise[SwaggerDetails]()
fetchPromise.completeWith(fetcher.fetch(serviceLocation))
context.system.scheduler.scheduleOnce(30.seconds) {
fetchPromise.tryFailure(new TimeoutException("Failed to retrieve docs in time"))
}
fetchPromise.future.onComplete {
case Success(details) => self ! FetchSucceeded(details)
case Failure(e) => self ! FetchFailed(e)
}
broadcastState(Map.empty, healthy=false, "fetching swagger docs", Set.empty)
}
fetchDocs()
def initializing : Actor.Receive = {
case FetchSucceeded(details) =>
serviceName = details.service
serviceVersion = details.version
become(active(details.endpoints))
broadcastState(details.endpoints, healthy=true, "serving", Set.empty)
case FetchFailed(err) =>
log.error(err, s"Failed to retrieve the swagger documentation from $serviceLocation")
broadcastState(Map.empty, healthy=false, "failed to fetch swagger docs, retrying in 30 seconds", Set.empty)
context.system.scheduler.scheduleOnce(30.seconds, self, RetryFetch)
case RetryFetch =>
fetchDocs()
case e => log.warning(s"Unexpected message during HostProxy.initializing: $e")
}
def receive = initializing
} | RetailMeNot/shield | src/main/scala/shield/actors/HostProxy.scala | Scala | mit | 9,867 |
package filodb.core.downsample
import java.util
import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}
import scala.collection.mutable
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import kamon.Kamon
import kamon.metric.MeasurementUnit
import kamon.tag.TagSet
import monix.eval.Task
import monix.execution.{CancelableFuture, Scheduler, UncaughtExceptionReporter}
import monix.reactive.Observable
import filodb.core.{DatasetRef, Types}
import filodb.core.binaryrecord2.RecordSchema
import filodb.core.memstore._
import filodb.core.metadata.Schemas
import filodb.core.query.{ColumnFilter, Filter, QuerySession}
import filodb.core.store._
import filodb.memory.format.{UnsafeUtils, ZeroCopyUTF8String}
import filodb.memory.format.ZeroCopyUTF8String._
class DownsampledTimeSeriesShardStats(dataset: DatasetRef, shardNum: Int) {
val tags = Map("shard" -> shardNum.toString, "dataset" -> dataset.toString)
val shardTotalRecoveryTime = Kamon.gauge("downsample-total-shard-recovery-time",
MeasurementUnit.time.milliseconds).withTags(TagSet.from(tags))
val partitionsQueried = Kamon.counter("downsample-partitions-queried").withTags(TagSet.from(tags))
val queryTimeRangeMins = Kamon.histogram("query-time-range-minutes").withTags(TagSet.from(tags))
val indexEntriesRefreshed = Kamon.counter("index-entries-refreshed").withTags(TagSet.from(tags))
val indexEntriesPurged = Kamon.counter("index-entries-purged").withTags(TagSet.from(tags))
val indexRefreshFailed = Kamon.counter("index-refresh-failed").withTags(TagSet.from(tags))
val indexPurgeFailed = Kamon.counter("index-purge-failed").withTags(TagSet.from(tags))
val indexEntries = Kamon.gauge("downsample-store-index-entries").withTags(TagSet.from(tags))
val indexRamBytes = Kamon.gauge("downsample-store-index-ram-bytes").withTags(TagSet.from(tags))
val singlePartCassFetchLatency = Kamon.histogram("single-partition-cassandra-latency",
MeasurementUnit.time.milliseconds).withTags(TagSet.from(tags))
val purgeIndexEntriesLatency = Kamon.histogram("downsample-store-purge-index-entries-latency",
MeasurementUnit.time.milliseconds).withTags(TagSet.from(tags))
}
class DownsampledTimeSeriesShard(rawDatasetRef: DatasetRef,
val rawStoreConfig: StoreConfig,
val schemas: Schemas,
store: ColumnStore, // downsample colStore
rawColStore: ColumnStore,
shardNum: Int,
filodbConfig: Config,
downsampleConfig: DownsampleConfig)
(implicit val ioPool: ExecutionContext) extends StrictLogging {
val creationTime = System.currentTimeMillis()
@volatile var isReadyForQuery = false
private val downsampleTtls = downsampleConfig.ttls
private val downsampledDatasetRefs = downsampleConfig.downsampleDatasetRefs(rawDatasetRef.dataset)
private val indexDataset = downsampledDatasetRefs.last
private val indexTtlMs = downsampleTtls.last.toMillis
private val clusterType = filodbConfig.getString("cluster-type")
private val deploymentPartitionName = filodbConfig.getString("deployment-partition-name")
private val downsampleStoreConfig = StoreConfig(filodbConfig.getConfig("downsampler.downsample-store-config"))
private val nextPartitionID = new AtomicInteger(0)
private val stats = new DownsampledTimeSeriesShardStats(rawDatasetRef, shardNum)
private val partKeyIndex = new PartKeyLuceneIndex(indexDataset, schemas.part, false,
false, shardNum, indexTtlMs)
private val indexUpdatedHour = new AtomicLong(0)
private val indexBootstrapper = new IndexBootstrapper(store) // used for initial index loading
private val housekeepingSched = Scheduler.computation(
name = "housekeeping",
reporter = UncaughtExceptionReporter(logger.error("Uncaught Exception in Housekeeping Scheduler", _)))
// used for periodic refresh of index, happens from raw tables
private val indexRefresher = new IndexBootstrapper(rawColStore)
private var houseKeepingFuture: CancelableFuture[Unit] = _
private var gaugeUpdateFuture: CancelableFuture[Unit] = _
def indexNames(limit: Int): Seq[String] = Seq.empty
def labelValues(labelName: String, topK: Int): Seq[TermInfo] = partKeyIndex.indexValues(labelName, topK)
def labelValuesWithFilters(filters: Seq[ColumnFilter],
labelNames: Seq[String],
endTime: Long,
startTime: Long,
querySession: QuerySession,
limit: Int): Iterator[Map[ZeroCopyUTF8String, ZeroCopyUTF8String]] = {
val metricShardKeys = schemas.part.options.shardKeyColumns
val metricGroupBy = deploymentPartitionName +: clusterType +: shardKeyValuesFromFilter(metricShardKeys, filters)
LabelValueResultIterator(partKeyIndex.partIdsFromFilters(filters, startTime, endTime), labelNames,
querySession, metricGroupBy, limit)
}
def singleLabelValuesWithFilters(filters: Seq[ColumnFilter],
label: String,
endTime: Long,
startTime: Long,
querySession: QuerySession,
limit: Int): Iterator[ZeroCopyUTF8String] = {
val metricShardKeys = schemas.part.options.shardKeyColumns
val metricGroupBy = deploymentPartitionName +: clusterType +: shardKeyValuesFromFilter(metricShardKeys, filters)
SingleLabelValuesResultIterator(partKeyIndex.partIdsFromFilters(filters, startTime, endTime),
label, querySession, metricGroupBy, limit)
}
def labelNames(filter: Seq[ColumnFilter],
endTime: Long,
startTime: Long): Seq[String] =
labelNamesFromPartKeys(partKeyIndex.labelNamesFromFilters(filter, startTime, endTime))
def partKeysWithFilters(filter: Seq[ColumnFilter],
fetchFirstLastSampleTimes: Boolean,
endTime: Long,
startTime: Long,
limit: Int): Iterator[Map[ZeroCopyUTF8String, ZeroCopyUTF8String]] = {
partKeyIndex.partKeyRecordsFromFilters(filter, startTime, endTime).iterator.take(limit).map { pk =>
val partKey = PartKeyWithTimes(pk.partKey, UnsafeUtils.arayOffset, pk.startTime, pk.endTime)
schemas.part.binSchema.toStringPairs(partKey.base, partKey.offset).map(pair => {
pair._1.utf8 -> pair._2.utf8
}).toMap ++
Map("_type_".utf8 -> Schemas.global.schemaName(RecordSchema.schemaID(partKey.base, partKey.offset)).utf8)
}
}
private def hour(millis: Long = System.currentTimeMillis()) = millis / 1000 / 60 / 60
def recoverIndex(): Future[Unit] = {
indexBootstrapper
.bootstrapIndexDownsample(partKeyIndex, shardNum, indexDataset, indexTtlMs){ _ => createPartitionID() }
.map { count =>
logger.info(s"Bootstrapped index for dataset=$indexDataset shard=$shardNum with $count records")
}.map { _ =>
// need to start recovering 6 hours prior to now since last index migration could have run 6 hours ago
// and we'd be missing entries that would be migrated in the last 6 hours.
// Hence indexUpdatedHour should be: currentHour - 6
val indexJobIntervalInHours = (downsampleStoreConfig.maxChunkTime.toMinutes + 59) / 60 // for ceil division
indexUpdatedHour.set(hour() - indexJobIntervalInHours - 1)
stats.shardTotalRecoveryTime.update(System.currentTimeMillis() - creationTime)
startHousekeepingTask()
startStatsUpdateTask()
logger.info(s"Shard now ready for query dataset=$indexDataset shard=$shardNum")
isReadyForQuery = true
}.runToFuture(housekeepingSched)
}
private def startHousekeepingTask(): Unit = {
// Run index refresh at same frequency of raw dataset's flush interval.
// This is important because each partition's start/end time can be updated only once
// in cassandra per flush interval. Less frequent update can result in multiple events
// per partKey, and order (which we have not persisted) would become important.
// Also, addition of keys to index can be parallelized using mapAsync below only if
// we are sure that in one raw dataset flush period, we wont get two updated part key
// records with same part key. This is true since we update part keys only once per flush interval in raw dataset.
logger.info(s"Starting housekeeping for downsample cluster of dataset=$rawDatasetRef shard=$shardNum " +
s"every ${rawStoreConfig.flushInterval}")
houseKeepingFuture = Observable.intervalWithFixedDelay(rawStoreConfig.flushInterval,
rawStoreConfig.flushInterval).mapEval { _ =>
purgeExpiredIndexEntries()
indexRefresh()
}.map { _ =>
partKeyIndex.refreshReadersBlocking()
}.onErrorRestartUnlimited.completedL.runToFuture(housekeepingSched)
}
private def purgeExpiredIndexEntries(): Unit = {
val start = System.currentTimeMillis()
try {
val partsToPurge = partKeyIndex.partIdsEndedBefore(System.currentTimeMillis() - downsampleTtls.last.toMillis)
partKeyIndex.removePartKeys(partsToPurge)
logger.info(s"Purged ${partsToPurge.length} entries from downsample " +
s"index of dataset=$rawDatasetRef shard=$shardNum")
stats.indexEntriesPurged.increment(partsToPurge.length)
} catch { case e: Exception =>
logger.error(s"Error occurred when purging index entries dataset=$rawDatasetRef shard=$shardNum", e)
stats.indexPurgeFailed.increment()
} finally {
stats.purgeIndexEntriesLatency.record(System.currentTimeMillis() - start)
}
}
private def indexRefresh(): Task[Unit] = {
// Update keys until hour()-2 hours ago. hour()-1 hours ago can cause missed records if
// refresh was triggered exactly at end of the hour. All partKeys for the hour would need to be flushed
// before refresh happens because we will not revist the hour again.
val toHour = hour() - 2
val fromHour = indexUpdatedHour.get() + 1
indexRefresher.refreshWithDownsamplePartKeys(partKeyIndex, shardNum, rawDatasetRef,
fromHour, toHour, schemas)(lookupOrCreatePartId)
.map { count =>
indexUpdatedHour.set(toHour)
stats.indexEntriesRefreshed.increment(count)
logger.info(s"Refreshed downsample index with new records numRecords=$count " +
s"dataset=$rawDatasetRef shard=$shardNum fromHour=$fromHour toHour=$toHour")
}
.onErrorHandle { e =>
stats.indexRefreshFailed.increment()
logger.error(s"Error occurred when refreshing downsample index " +
s"dataset=$rawDatasetRef shard=$shardNum fromHour=$fromHour toHour=$toHour", e)
}
}
private def startStatsUpdateTask(): Unit = {
logger.info(s"Starting Stats Update task from raw dataset=$rawDatasetRef shard=$shardNum every 1 minute")
gaugeUpdateFuture = Observable.intervalWithFixedDelay(1.minute).map { _ =>
updateGauges()
}.onErrorRestartUnlimited.completedL.runToFuture(housekeepingSched)
}
private def updateGauges(): Unit = {
stats.indexEntries.update(partKeyIndex.indexNumEntries)
stats.indexRamBytes.update(partKeyIndex.indexRamBytes)
}
private def lookupOrCreatePartId(pk: Array[Byte]): Int = {
partKeyIndex.partIdFromPartKeySlow(pk, UnsafeUtils.arayOffset).getOrElse(createPartitionID())
}
/**
* Returns a new non-negative partition ID which isn't used by any existing parition. A negative
* partition ID wouldn't work with bitmaps.
*/
private def createPartitionID(): Int = {
val next = nextPartitionID.incrementAndGet()
if (next == 0) {
throw new IllegalStateException("Too many partitions. Reached int capacity")
}
next
}
def refreshPartKeyIndexBlocking(): Unit = {}
def lookupPartitions(partMethod: PartitionScanMethod,
chunkMethod: ChunkScanMethod,
querySession: QuerySession): PartLookupResult = {
partMethod match {
case SinglePartitionScan(partition, _) => throw new UnsupportedOperationException
case MultiPartitionScan(partKeys, _) => throw new UnsupportedOperationException
case FilteredPartitionScan(split, filters) =>
if (filters.nonEmpty) {
// This API loads all part keys into heap and can potentially be large size for
// high cardinality queries, but it is needed to do multiple
// iterations over the part keys. First iteration is for data size estimation.
// Second iteration is for query result evaluation. Loading everything to heap
// is expensive, but we do it to handle data sizing for metrics that have
// continuous churn. See capDataScannedPerShardCheck method.
val recs = partKeyIndex.partKeyRecordsFromFilters(filters, chunkMethod.startTime, chunkMethod.endTime)
val _schema = recs.headOption.map { pkRec =>
RecordSchema.schemaID(pkRec.partKey, UnsafeUtils.arayOffset)
}
stats.queryTimeRangeMins.record((chunkMethod.endTime - chunkMethod.startTime) / 60000 )
val metricShardKeys = schemas.part.options.shardKeyColumns
val metricGroupBy = deploymentPartitionName +: clusterType +: metricShardKeys.map { col =>
filters.collectFirst {
case ColumnFilter(c, Filter.Equals(filtVal: String)) if c == col => filtVal
}.getOrElse("unknown")
}.toList
querySession.queryStats.getTimeSeriesScannedCounter(metricGroupBy).addAndGet(recs.length)
val chunksReadCounter = querySession.queryStats.getDataBytesScannedCounter(metricGroupBy)
PartLookupResult(shardNum, chunkMethod, debox.Buffer.empty,
_schema, debox.Map.empty, debox.Buffer.empty, recs, chunksReadCounter)
} else {
throw new UnsupportedOperationException("Cannot have empty filters")
}
}
}
def shutdown(): Unit = {
try {
partKeyIndex.closeIndex();
houseKeepingFuture.cancel();
gaugeUpdateFuture.cancel();
} catch { case e: Exception =>
logger.error("Exception when shutting down downsample shard", e)
}
}
def scanPartitions(lookup: PartLookupResult,
colIds: Seq[Types.ColumnId],
querySession: QuerySession): Observable[ReadablePartition] = {
// Step 1: Choose the downsample level depending on the range requested
val (resolutionMs, downsampledDataset) = chooseDownsampleResolution(lookup.chunkMethod)
logger.debug(s"Chose resolution $downsampledDataset for chunk method ${lookup.chunkMethod}")
capDataScannedPerShardCheck(lookup, resolutionMs)
// Step 2: Query Cassandra table for that downsample level using downsampleColStore
// Create a ReadablePartition objects that contain the time series data. This can be either a
// PagedReadablePartitionOnHeap or PagedReadablePartitionOffHeap. This will be garbage collected/freed
// when query is complete.
Observable.fromIterable(lookup.pkRecords)
.mapParallelUnordered(downsampleStoreConfig.demandPagingParallelism) { partRec =>
val startExecute = System.currentTimeMillis()
// TODO test multi-partition scan if latencies are high
// IMPORTANT: The Raw partition reads need to honor the start time in the index. Suppose, the shards for the
// time series is migrated, the time series will show up in two shards but not in both at any given point in
// time. However if the start and end date range cover the point in time when the shard migration occurred, and
// if both shards query for the same user provided time range, we will have the same data returned twice,
// instead if the shards return the data for time duration they owned the data, we will not have duplicates
// Read raw partition adjusts the start time and takes it back by downsampleStoreConfig.maxChunkTime.toMillis.
// Consider the following scenario for downsample chunks
// T..........T + 6..........T + 12.........T + 18.........T + 24
// ^-------------------^
// start end
// We notice (ds freq is 6 hrs), the start time is taken back by 6 hrs to ensure that the chunk at T + 6
// is included in the result as the CQL in for chunk filter in cassandra will be chunkId => ? and chunkId <= ?
// This query will at the maximum get 12 hrs of additional data and thus the duplicate results will still occur
// but the impact is now reduced to a maximum of 12 (2*6) hours (whatever the downsampling frequency is) of data
// We believe this is a good enough fix and aiming for 0 duplicate results will require more changes possibly
// introducing regression to the stable codebase. However, if at later point of time no duplicates are tolerated
// we will have to revisit the logic and fix accordingly
store.readRawPartitions(downsampledDataset,
downsampleStoreConfig.maxChunkTime.toMillis,
SinglePartitionScan(partRec.partKey, shardNum),
TimeRangeChunkScan(
partRec.startTime.max(lookup.chunkMethod.startTime),
partRec.endTime.min(lookup.chunkMethod.endTime)))
.map { pd =>
val part = makePagedPartition(pd, lookup.firstSchemaId.get, resolutionMs, colIds)
stats.partitionsQueried.increment()
stats.singlePartCassFetchLatency.record(Math.max(0, System.currentTimeMillis - startExecute))
part
}
.defaultIfEmpty(makePagedPartition(RawPartData(partRec.partKey, Seq.empty),
lookup.firstSchemaId.get, resolutionMs, colIds))
.headL
}
}
private def capDataScannedPerShardCheck(lookup: PartLookupResult, resolution: Long) = {
lookup.firstSchemaId.foreach { schId =>
schemas.ensureQueriedDataSizeWithinLimit(schId, lookup.pkRecords,
downsampleStoreConfig.flushInterval.toMillis,
resolution, lookup.chunkMethod, downsampleStoreConfig.maxDataPerShardQuery)
}
}
private def chooseDownsampleResolution(chunkScanMethod: ChunkScanMethod): (Int, DatasetRef) = {
chunkScanMethod match {
case AllChunkScan =>
// pick last since it is the highest resolution
downsampleConfig.resolutions.last.toMillis.toInt -> downsampledDatasetRefs.last
case TimeRangeChunkScan(startTime, _) =>
var ttlIndex = downsampleTtls.indexWhere(t => startTime > System.currentTimeMillis() - t.toMillis)
// -1 return value means query startTime is before the earliest retention. Just pick the highest resolution
if (ttlIndex == -1) ttlIndex = downsampleTtls.size - 1
downsampleConfig.resolutions(ttlIndex).toMillis.toInt -> downsampledDatasetRefs(ttlIndex)
case _ => ???
}
}
private def makePagedPartition(part: RawPartData, firstSchemaId: Int,
minResolutionMs: Int,
colIds: Seq[Types.ColumnId]): ReadablePartition = {
val schemaId = RecordSchema.schemaID(part.partitionKey, UnsafeUtils.arayOffset)
if (schemaId != firstSchemaId) {
throw SchemaMismatch(schemas.schemaName(firstSchemaId), schemas.schemaName(schemaId))
}
// FIXME It'd be nice to pass in the correct partId here instead of -1
new PagedReadablePartition(schemas(schemaId), shardNum, -1, part, minResolutionMs, colIds)
}
private def labelNamesFromPartKeys(partId: Int): Seq[String] = {
val results = new mutable.HashSet[String]
if (PartKeyLuceneIndex.NOT_FOUND == partId) Seq.empty
else {
val partKey = partKeyFromPartId(partId)
results ++ schemas.part.binSchema.colNames(partKey, UnsafeUtils.arayOffset)
results.toSeq
}
}
private def shardKeyValuesFromFilter(shardKeyColumns: Seq[String], filters: Seq[ColumnFilter]): Seq[String] = {
shardKeyColumns.map { col =>
filters.collectFirst {
case ColumnFilter(c, Filter.Equals(filtVal: String)) if c == col => filtVal
}.getOrElse("unknown")
}.toList
}
/**
* Iterator for traversal of partIds, value for the given label will be extracted from the ParitionKey.
* this implementation maps partIds to label/values eagerly, this is done inorder to dedup the results.
*/
case class LabelValueResultIterator(partIds: debox.Buffer[Int], labelNames: Seq[String],
querySession: QuerySession, statsGroup: Seq[String], limit: Int)
extends Iterator[Map[ZeroCopyUTF8String, ZeroCopyUTF8String]] {
private lazy val rows = labelValues
override def size: Int = rows.size
def labelValues: Iterator[Map[ZeroCopyUTF8String, ZeroCopyUTF8String]] = {
var partLoopIndx = 0
val rows = new mutable.HashSet[Map[ZeroCopyUTF8String, ZeroCopyUTF8String]]()
while(partLoopIndx < partIds.length && rows.size < limit) {
val partId = partIds(partLoopIndx)
//retrieve PartKey either from In-memory map or from PartKeyIndex
val nextPart = partKeyFromPartId(partId)
// FIXME This is non-performant and temporary fix for fetching label values based on filter criteria.
// Other strategies needs to be evaluated for making this performant - create facets for predefined fields or
// have a centralized service/store for serving metadata
val currVal = schemas.part.binSchema.colValues(nextPart, UnsafeUtils.arayOffset, labelNames).
zipWithIndex.filter(_._1 != null).map{case(value, ind) => labelNames(ind).utf8 -> value.utf8}.toMap
if (currVal.nonEmpty) rows.add(currVal)
partLoopIndx += 1
}
querySession.queryStats.getTimeSeriesScannedCounter(statsGroup).addAndGet(partLoopIndx)
rows.toIterator
}
override def hasNext: Boolean = rows.hasNext
override def next(): Map[ZeroCopyUTF8String, ZeroCopyUTF8String] = rows.next
}
case class SingleLabelValuesResultIterator(partIds: debox.Buffer[Int], label: String,
querySession: QuerySession, statsGroup: Seq[String], limit: Int)
extends Iterator[ZeroCopyUTF8String] {
private val rows = labels
def labels: Iterator[ZeroCopyUTF8String] = {
var partLoopIndx = 0
val rows = new mutable.HashSet[ZeroCopyUTF8String]()
val colIndex = schemas.part.binSchema.colNames.indexOf(label)
while(partLoopIndx < partIds.length && rows.size < limit) {
val partId = partIds(partLoopIndx)
//retrieve PartKey either from In-memory map or from PartKeyIndex
val nextPart = partKeyFromPartId(partId)
if (colIndex > -1)
rows.add(schemas.part.binSchema.asZCUTF8Str(nextPart, UnsafeUtils.arayOffset, colIndex))
else
schemas.part.binSchema.singleColValues(nextPart, UnsafeUtils.arayOffset, label, rows)
partLoopIndx += 1
}
querySession.queryStats.getTimeSeriesScannedCounter(statsGroup).addAndGet(partLoopIndx)
rows.toIterator
}
def hasNext: Boolean = rows.hasNext
def next(): ZeroCopyUTF8String = rows.next
}
/**
* Retrieve partKey for a given PartId by looking up index
*/
private def partKeyFromPartId(partId: Int): Array[Byte] = {
val partKeyBytes = partKeyIndex.partKeyFromPartId(partId)
if (partKeyBytes.isDefined)
// make a copy because BytesRef from lucene can have additional length bytes in its array
// TODO small optimization for some other day
util.Arrays.copyOfRange(partKeyBytes.get.bytes, partKeyBytes.get.offset,
partKeyBytes.get.offset + partKeyBytes.get.length)
else throw new IllegalStateException(s"Could not find partKey or partId $partId. This is not a expected behavior.")
}
def cleanup(): Unit = {
Option(houseKeepingFuture).foreach(_.cancel())
Option(gaugeUpdateFuture).foreach(_.cancel())
}
override protected def finalize(): Unit = {
cleanup()
}
}
| filodb/FiloDB | core/src/main/scala/filodb.core/downsample/DownsampledTimeSeriesShard.scala | Scala | apache-2.0 | 24,704 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import org.scalatest.{FlatSpec, Matchers}
import com.intel.analytics.bigdl.tensor.Tensor
import scala.math._
import com.intel.analytics.bigdl._
@com.intel.analytics.bigdl.tags.Parallel
class LinearSpec extends FlatSpec with Matchers {
"Linear module" should "converge to correct weight and bias" in {
val inputN = 5
val outputN = 2
val linear = new Linear[Double](inputN, outputN)
val mse = new MSECriterion[Double]
val input = Tensor[Double](inputN)
val res = Tensor[Double](outputN)
var err = 0.0
for (i <- 1 to 10000) {
input.rand()
for (y <- 1 to outputN) {
res(Array(y)) = 1.0 * y
for (x <- 1 to inputN) {
res(Array(y)) += 0.1 * y * x * input(Array(x))
}
}
val output = linear.forward(input)
err = mse.forward(output, res)
val grad = mse.backward(output, res)
linear.zeroGradParameters()
linear.backward(input, grad)
linear.updateParameters(0.5 / log(i + 3))
}
val params = linear.parameters()
val weight = params._1(0)
val bias = params._1(1)
val expectedWeight = Tensor[Double](outputN, inputN)
val expectedBias = Tensor[Double](outputN)
for (y <- 1 to outputN) {
expectedBias(Array(y)) = 1.0 * y
for (x <- 1 to inputN) {
expectedWeight(Array(y, x)) = 0.1 * y * x
}
}
expectedBias.map(bias, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6);
v1
})
expectedWeight.map(weight, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6);
v1
})
assert(err < 1e-6)
}
"Linear module in batch mode" should "converge to correct weight and bias" in {
val inputN = 5
val outputN = 2
val batchN = 3
val linear = new Linear[Double](inputN, outputN)
val mse = new MSECriterion[Double]
val input = Tensor[Double](batchN, inputN)
val res = Tensor[Double](batchN, outputN)
var err = 0.0
for (i <- 1 to 10000) {
input.rand()
for (k <- 1 to batchN) {
for (y <- 1 to outputN) {
res(Array(k, y)) = 1.0 * y
for (x <- 1 to inputN) {
res(Array(k, y)) += 0.1 * y * x * input(Array(k, x))
}
}
}
val output = linear.forward(input)
err = mse.forward(output, res)
val grad = mse.backward(output, res)
linear.zeroGradParameters()
linear.backward(input, grad)
linear.updateParameters(0.5 / log(i + 3))
}
val params = linear.parameters()
val weight = params._1(0)
val bias = params._1(1)
val expectedWeight = Tensor[Double](outputN, inputN)
val expectedBias = Tensor[Double](outputN)
for (y <- 1 to outputN) {
expectedBias(Array(y)) = 1.0 * y
for (x <- 1 to inputN) {
expectedWeight(Array(y, x)) = 0.1 * y * x
}
}
expectedBias.map(bias, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6);
v1
})
expectedWeight.map(weight, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6);
v1
})
assert(err < 1e-6)
}
"Linear module in batch mode" should "be good in gradient check" in {
val linear = new Linear[Double](5, 2)
linear.reset()
val input = Tensor[Double](3, 5).rand()
val checker = new GradientChecker(1e-4, 1e-2)
checker.checkLayer[Double](linear, input) should be(true)
}
}
| SeaOfOcean/BigDL | dl/src/test/scala/com/intel/analytics/bigdl/nn/LinearSpec.scala | Scala | apache-2.0 | 4,153 |
/*
* Copyright 2011 TomTom International BV
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tomtom.splitter.layer7
import org.jboss.netty.handler.codec.http.{DefaultHttpRequest, HttpRequest}
import util.matching.Regex
import util.matching.Regex.Match
import util.parsing.combinator.RegexParsers
import io.Source
import java.io.File
case class Rewriter(matcher: Regex, target: String) {
def rewrite(uri: String): Option[String] = {
matcher.findFirstMatchIn(uri).map {
case m: Match =>
var tmp = target
for (group <- 1 to m.groupCount) {
tmp = tmp.replace("$" + group, m.group(group))
}
tmp
}
}
override def equals(rewriter: Any): Boolean = {
rewriter match {
case r: Rewriter =>
target == r.target && matcher.toString == r.matcher.toString
case _ => false
}
}
}
trait RewriteParser extends RegexParsers {
override def skipWhitespace = false
protected val allMethods = Set("GET", "POST", "PUT", "DELETE", "HEAD")
protected val method = "GET" | "POST" | "PUT" | "DELETE" | "HEAD"
protected val wildcard = "*"
protected val methods = rep1sep(method, ",") | wildcard
protected val token = """[^\\t\\n]+""".r
protected val pattern = token ^^ {
new Regex(_)
}
protected val rewrite = token
protected val rewriter: Parser[Rewriter] = pattern ~ "\\t" ~ rewrite ^^ {
case ptrn ~ _ ~ rewrte => Rewriter(ptrn, rewrte)
}
protected val rule: Parser[(Set[String], Rewriter)] = methods ~ "\\t" ~ rewriter ^^ {
case mthods ~ _ ~ rewrter =>
mthods match {
case "*" => (allMethods, rewrter)
case list: List[_] => (list.map((x: Any) => x.toString).toSet, rewrter)
}
}
protected val rules: Parser[List[(Set[String], Rewriter)]] = repsep(rule, "\\n")
def parse(config: File) = {
val toParse = Source.fromFile(config).getLines().filter(
line => line.trim.length > 0 && !line.startsWith("#")).mkString("\\n")
parseAll(rules, toParse)
}
}
trait RequestMapperModule {
val shadowHostname: Option[String]
val rewriteConfig: Option[File]
// Useful if you don't feel like writing a rewrite method
def identity[T](x: HttpRequest): Option[HttpRequest] = Some(x)
object RequestMapper extends RewriteParser {
val rewriteRules: Option[Map[String, List[Rewriter]]] = rewriteConfig.map {
reader =>
parse(reader) match {
case f@NoSuccess(_, _) =>
sys.error("Could not parse rewriteConfig: " + f)
case Success(rools, _) =>
val flatter: List[(String, Rewriter)] = for {
(methods, rule) <- rools
method <- methods
} yield method -> rule
flatter.groupBy(_._1).map(x => x._1 -> x._2.map(_._2))
}
}
def rewrite(request: HttpRequest): Option[HttpRequest] = {
for {
ruleMap <- rewriteRules
rewriters <- ruleMap.get(request.getMethod.getName)
rewritten <- rewriters.view.map(_.rewrite(request.getUri)).find(_.isDefined).flatten.headOption
} yield {
val copied = copy(request)
copied.setUri(rewritten)
shadowHostname.foreach(copied.headers.set("Host", _))
copied
}
}
def copy(request: HttpRequest): HttpRequest = {
val copy = new DefaultHttpRequest(request.getProtocolVersion, request.getMethod, request.getUri)
if (request.isChunked) {
copy.setChunked(true)
} else {
copy.setContent(request.getContent)
}
import collection.JavaConverters._
for (name <- request.headers.names.asScala) {
copy.headers.set(name, request.headers.getAll(name))
}
copy
}
}
}
| ebowman/splitter | src/main/scala/tomtom/splitter/layer7/Requests.scala | Scala | apache-2.0 | 4,221 |
package pt1.week4
import org.scalatest.{FunSuite, Matchers}
class AmericanOptionOnFuturesPricingSpec extends FunSuite with Matchers {
private val sharePriceLattice = SharePriceLattice.generate(initialPrice = 100.0, termInYears = 0.25, volatility = 0.3,
numberOfPeriods = 15, interestRate = 0.02, dividendYield = 0.01)
test("Price should be as expected for call option") {
val futuresLattice = FuturesPricing.calculatePricingMatrix(sharePriceLattice, termInYears = 0.25, volatility = 0.3,
numberOfPeriods = 15, interestRate = 0.02, dividendYield = 0.01)
val callOptionPrice = AmericanOptionPricing.calculateForShares(futuresLattice, termInYears = 0.25, volatility = 0.3,
numberOfPeriods = 15, interestRate = 0.02, dividendYield = 0.01, strikePrice = 110.0, isPut = false)(maturity = 10)
callOptionPrice shouldBe (1.66 +- 0.01)
}
test("earliest optimal for call option") {
val futuresLattice = FuturesPricing.calculatePricingMatrix(sharePriceLattice, termInYears = 0.25, volatility = 0.3,
numberOfPeriods = 15, interestRate = 0.02, dividendYield = 0.01)
val earliest = AmericanOptionPricing.earliestExercise(futuresLattice, termInYears = 0.25, volatility = 0.3,
numberOfPeriods = 15, interestRate = 0.02, dividendYield = 0.01, strikePrice = 110.0, isPut = false)(maturity = 10)
earliest shouldBe 7
}
}
| ligasgr/fe-and-rm | src/test/scala/pt1/week4/AmericanOptionOnFuturesPricingSpec.scala | Scala | apache-2.0 | 1,369 |
package models.gitbucket
import org.joda.time.DateTime
import org.specs2.mutable._
import scalikejdbc._
import scalikejdbc.specs2.mutable.AutoRollback
class IssueSpec extends Specification {
"Issue" should {
val i = Issue.syntax("i")
"find by primary keys" in new AutoRollback {
val maybeFound = Issue.find(123, "MyString", "MyString")
maybeFound.isDefined should beTrue
}
"find by where clauses" in new AutoRollback {
val maybeFound = Issue.findBy(sqls.eq(i.issueId, 123))
maybeFound.isDefined should beTrue
}
"find all records" in new AutoRollback {
val allResults = Issue.findAll()
allResults.size should be_>(0)
}
"count all records" in new AutoRollback {
val count = Issue.countAll()
count should be_>(0L)
}
"find all by where clauses" in new AutoRollback {
val results = Issue.findAllBy(sqls.eq(i.issueId, 123))
results.size should be_>(0)
}
"count by where clauses" in new AutoRollback {
val count = Issue.countBy(sqls.eq(i.issueId, 123))
count should be_>(0L)
}
"create new record" in new AutoRollback {
val created = Issue.create(userName = "MyString", repositoryName = "MyString", issueId = 123, openedUserName = "MyString", title = null, closed = false, registeredDate = DateTime.now, updatedDate = DateTime.now, pullRequest = false)
created should not beNull
}
"save a record" in new AutoRollback {
val entity = Issue.findAll().head
// TODO modify something
val modified = entity
val updated = Issue.save(modified)
updated should not equalTo(entity)
}
"destroy a record" in new AutoRollback {
val entity = Issue.findAll().head
Issue.destroy(entity)
val shouldBeNone = Issue.find(123, "MyString", "MyString")
shouldBeNone.isDefined should beFalse
}
}
}
| thomaschoo/gitolite-to-gitbucket | src/test/scala/models/gitbucket/IssueSpec.scala | Scala | mit | 1,888 |
package cn.changhong.main.util
/**
* Created by yangguo on 14-11-4.
*/
object LoggingManager {
def log(): Unit ={
println("")
}
}
| guoyang2011/myfinagle | BaseServer/src/main/scala/cn/changhong/main/util/LoggingManager.scala | Scala | apache-2.0 | 141 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package common.rest
import java.io.{File, FileInputStream}
import java.nio.charset.StandardCharsets
import java.security.KeyStore
import java.security.cert.X509Certificate
import java.time.Instant
import java.util.Base64
import akka.actor.ActorSystem
import akka.http.scaladsl.{Http, HttpsConnectionContext}
import akka.http.scaladsl.model.HttpMethods.{DELETE, GET, POST, PUT}
import akka.http.scaladsl.model.StatusCodes.{Accepted, NotFound, OK}
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.Uri.{Path, Query}
import akka.http.scaladsl.model.headers.{Authorization, BasicHttpCredentials, OAuth2BearerToken}
import akka.stream.ActorMaterializer
import akka.util.ByteString
import com.typesafe.sslconfig.akka.AkkaSSLConfig
import common.TestUtils.{ANY_ERROR_EXIT, DONTCARE_EXIT, RunResult, SUCCESS_EXIT}
import common.{
DeleteFromCollectionOperations,
HasActivation,
ListOrGetFromCollectionOperations,
WaitFor,
WhiskProperties,
WskProps,
_
}
import javax.net.ssl._
import org.apache.commons.io.{FileUtils, FilenameUtils}
import org.apache.openwhisk.common.Https.HttpsConfig
import org.apache.openwhisk.common.{AkkaLogging, TransactionId}
import org.apache.openwhisk.core.entity.ByteSize
import org.apache.openwhisk.utils.retry
import org.scalatest.Matchers
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.Span.convertDurationToSpan
import pureconfig._
import pureconfig.generic.auto._
import spray.json.DefaultJsonProtocol._
import spray.json._
import scala.collection.immutable.Seq
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.{Duration, DurationInt, FiniteDuration}
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}
class AcceptAllHostNameVerifier extends HostnameVerifier {
override def verify(s: String, sslSession: SSLSession): Boolean = true
}
object SSL {
lazy val httpsConfig: HttpsConfig = loadConfigOrThrow[HttpsConfig]("whisk.controller.https")
def keyManagers(clientAuth: Boolean): Array[KeyManager] = {
if (clientAuth)
keyManagersForClientAuth
else
Array.empty
}
def keyManagersForClientAuth: Array[KeyManager] = {
val keyFactoryType = "SunX509"
val keystorePassword = httpsConfig.keystorePassword.toCharArray
val ks: KeyStore = KeyStore.getInstance(httpsConfig.keystoreFlavor)
ks.load(new FileInputStream(httpsConfig.keystorePath), httpsConfig.keystorePassword.toCharArray)
val keyManagerFactory: KeyManagerFactory = KeyManagerFactory.getInstance(keyFactoryType)
keyManagerFactory.init(ks, keystorePassword)
keyManagerFactory.getKeyManagers
}
def nonValidatingContext(clientAuth: Boolean = false): SSLContext = {
class IgnoreX509TrustManager extends X509TrustManager {
def checkClientTrusted(chain: Array[X509Certificate], authType: String): Unit = ()
def checkServerTrusted(chain: Array[X509Certificate], authType: String): Unit = ()
def getAcceptedIssuers: Array[X509Certificate] = Array.empty
}
val context = SSLContext.getInstance("TLS")
context.init(keyManagers(clientAuth), Array(new IgnoreX509TrustManager), null)
context
}
def httpsConnectionContext(implicit system: ActorSystem): HttpsConnectionContext = {
val sslConfig = AkkaSSLConfig().mapSettings { s =>
s.withHostnameVerifierClass(classOf[AcceptAllHostNameVerifier].asInstanceOf[Class[HostnameVerifier]])
}
new HttpsConnectionContext(SSL.nonValidatingContext(httpsConfig.clientAuth.toBoolean), Some(sslConfig))
}
}
object HttpConnection {
/**
* Returns either the https context that is tailored for self-signed certificates on the controller, or
* a default connection context used in Http.SingleRequest
* @param protocol protocol used to communicate with controller API
* @param system actor system
* @return https connection context
*/
def getContext(protocol: String)(implicit system: ActorSystem): HttpsConnectionContext = {
if (protocol == "https") {
SSL.httpsConnectionContext
} else {
// supports http
Http().defaultClientHttpsContext
}
}
}
class WskRestOperations(implicit actorSytem: ActorSystem) extends WskOperations {
override implicit val action: RestActionOperations = new RestActionOperations
override implicit val trigger: RestTriggerOperations = new RestTriggerOperations
override implicit val rule: RestRuleOperations = new RestRuleOperations
override implicit val activation: RestActivationOperations = new RestActivationOperations
override implicit val pkg: RestPackageOperations = new RestPackageOperations
override implicit val namespace: RestNamespaceOperations = new RestNamespaceOperations
override implicit val api: RestGatewayOperations = new RestGatewayOperations
}
trait RestListOrGetFromCollectionOperations extends ListOrGetFromCollectionOperations with RunRestCmd {
import FullyQualifiedNames.resolve
/**
* List entities in collection.
*
* @param namespace (optional) if specified must be fully qualified namespace
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def list(namespace: Option[String] = None,
limit: Option[Int] = None,
nameSort: Option[Boolean] = None,
expectedExitCode: Int = OK.intValue)(implicit wp: WskProps): RestResult = {
val entPath = namespace map { ns =>
val (nspace, name) = getNamespaceEntityName(resolve(namespace))
if (name.isEmpty) Path(s"$basePath/namespaces/$nspace/$noun")
else Path(s"$basePath/namespaces/$nspace/$noun/$name/")
} getOrElse Path(s"$basePath/namespaces/${wp.namespace}/$noun")
val paramMap: Map[String, String] = Map("skip" -> "0", "docs" -> true.toString) ++
limit.map(l => Map("limit" -> l.toString)).getOrElse(Map.empty)
val resp = requestEntity(GET, entPath, paramMap)
val r = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
validateStatusCode(expectedExitCode, r.statusCode.intValue)
r
}
/**
* Gets entity from collection.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def get(name: String,
expectedExitCode: Int = OK.intValue,
summary: Boolean = false,
fieldFilter: Option[String] = None,
url: Option[Boolean] = None,
save: Option[Boolean] = None,
saveAs: Option[String] = None)(implicit wp: WskProps): RestResult = {
val (ns, entity) = getNamespaceEntityName(name)
val entPath = Path(s"$basePath/namespaces/$ns/$noun/$entity")
val resp = requestEntity(GET, entPath)(wp)
val rr = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
validateStatusCode(expectedExitCode, rr.statusCode.intValue)
rr
}
}
trait RestDeleteFromCollectionOperations extends DeleteFromCollectionOperations with RunRestCmd {
/**
* Deletes entity from collection.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def delete(name: String, expectedExitCode: Int = OK.intValue)(implicit wp: WskProps): RestResult = {
val (ns, entityName) = getNamespaceEntityName(name)
val path = Path(s"$basePath/namespaces/$ns/$noun/$entityName")
val resp = requestEntity(DELETE, path)(wp)
val rr = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
validateStatusCode(expectedExitCode, rr.statusCode.intValue)
rr
}
/**
* Deletes entity from collection but does not assert that the command succeeds.
* Use this if deleting an entity that may not exist and it is OK if it does not.
*
* @param name either a fully qualified name or a simple entity name
*/
override def sanitize(name: String)(implicit wp: WskProps): RestResult = {
delete(name, DONTCARE_EXIT)
}
}
trait RestActivation extends HasActivation {
/**
* Extracts activation id from invoke (action or trigger) or activation get
*/
override def extractActivationId(result: RunResult): Option[String] = {
extractActivationIdFromInvoke(result.asInstanceOf[RestResult])
}
/**
* Extracts activation id from 'wsk action invoke' or 'wsk trigger invoke'
*/
private def extractActivationIdFromInvoke(result: RestResult): Option[String] = {
if ((result.statusCode == OK) || (result.statusCode == Accepted))
Some(result.getField("activationId"))
else
None
}
}
class RestActionOperations(implicit val actorSystem: ActorSystem)
extends RestListOrGetFromCollectionOperations
with RestDeleteFromCollectionOperations
with RestActivation
with ActionOperations {
override protected val noun = "actions"
/**
* Creates action. Parameters mirror those available in the REST.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def create(
name: String,
artifact: Option[String],
kind: Option[String] = None, // one of docker, copy, sequence or none for autoselect else an explicit type
main: Option[String] = None,
docker: Option[String] = None,
parameters: Map[String, JsValue] = Map.empty,
annotations: Map[String, JsValue] = Map.empty,
parameterFile: Option[String] = None,
annotationFile: Option[String] = None,
timeout: Option[Duration] = None,
memory: Option[ByteSize] = None,
logsize: Option[ByteSize] = None,
concurrency: Option[Int] = None,
shared: Option[Boolean] = None,
update: Boolean = false,
web: Option[String] = None,
websecure: Option[String] = None,
expectedExitCode: Int = OK.intValue)(implicit wp: WskProps): RestResult = {
val (namespace, actionName) = getNamespaceEntityName(name)
val (paramsInput, annosInput) = getParamsAnnos(parameters, annotations, parameterFile, annotationFile, web = web)
val (params: Array[JsValue], annos: Array[JsValue], exec: Map[String, JsValue]) = kind match {
case Some(k) =>
k match {
case "copy" =>
require(artifact.isDefined, "copy requires an artifact name")
val actionName = entityName(artifact.get)
val actionPath = Path(s"$basePath/namespaces/$namespace/$noun/$actionName")
val resp = requestEntity(GET, actionPath)
val result = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
val params = result.getFieldListJsObject("parameters").toArray[JsValue]
val annos = result.getFieldListJsObject("annotations").toArray[JsValue]
val exec = result.getFieldJsObject("exec").fields
(paramsInput ++ params, annosInput ++ annos, exec)
case "sequence" =>
require(artifact.isDefined, "sequence requires a component list")
val comps = convertIntoComponents(artifact.get)
val exec =
if (comps.nonEmpty) Map("components" -> comps.toJson, "kind" -> k.toJson)
else Map("kind" -> k.toJson)
(paramsInput, annosInput, exec)
case _ =>
val code = readCodeFromFile(artifact).map(c => Map("code" -> c.toJson)).getOrElse(Map.empty)
val exec: Map[String, JsValue] = if (k == "native" || k == "docker") {
require(k == "native" && docker.isEmpty || k == "docker" && docker.isDefined)
Map("kind" -> "blackbox".toJson, "image" -> docker.getOrElse("openwhisk/dockerskeleton").toJson) ++ code
} else {
require(artifact.isDefined, "file name required as an artifact")
Map("kind" -> k.toJson) ++ code
}
(paramsInput, annosInput, exec)
}
case None =>
docker
.map(_ => "blackbox")
.orElse {
artifact.map { file =>
getExt(file) match {
case "js" => "nodejs:default"
case "py" => "python:default"
case "swift" => "swift:default"
case "jar" => "java:default"
case _ =>
throw new IllegalStateException(s"Extension for $file not recognized and kind cannot be inferred.")
}
}
}
.map { k =>
val code = readCodeFromFile(artifact).map(c => Map("code" -> c.toJson)).getOrElse(Map.empty)
val image = docker.map(i => Map("image" -> i.toJson)).getOrElse(Map.empty)
(paramsInput, annosInput, Map("kind" -> k.toJson) ++ code ++ image)
}
.getOrElse {
if (!update && artifact.isDefined)
throw new IllegalStateException(
s"Extension for ${artifact.get} not recognized and kind cannot be inferred.")
else (paramsInput, annosInput, Map.empty)
}
}
val limits: Map[String, JsValue] = {
timeout.map(t => Map("timeout" -> t.toMillis.toJson)).getOrElse(Map.empty) ++
logsize.map(log => Map("logs" -> log.toMB.toJson)).getOrElse(Map.empty) ++
memory.map(m => Map("memory" -> m.toMB.toJson)).getOrElse(Map.empty) ++
concurrency.map(c => Map("concurrency" -> c.toJson)).getOrElse(Map.empty)
}
val body: Map[String, JsValue] = if (!update) {
require(exec.nonEmpty, "exec cannot be empty on create")
Map(
"exec" -> main.map(m => exec ++ Map("main" -> m.toJson)).getOrElse(exec).toJson,
"parameters" -> params.toJson,
"annotations" -> annos.toJson) ++ Map("limits" -> limits.toJson)
} else {
var content: Map[String, JsValue] = Map.empty
if (exec.nonEmpty)
content = Map("exec" -> main.map(m => exec ++ Map("main" -> m.toJson)).getOrElse(exec).toJson)
if (params.nonEmpty)
content = content + ("parameters" -> params.toJson)
if (annos.nonEmpty)
content = content + ("annotations" -> annos.toJson)
if (limits.nonEmpty)
content = content + ("limits" -> limits.toJson)
content
}
val path = Path(s"$basePath/namespaces/$namespace/$noun/$actionName")
val resp =
if (update) requestEntity(PUT, path, Map("overwrite" -> "true"), Some(JsObject(body).toString))
else requestEntity(PUT, path, body = Some(JsObject(body).toString))
val rr = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
validateStatusCode(expectedExitCode, rr.statusCode.intValue)
rr
}
override def invoke(name: String,
parameters: Map[String, JsValue] = Map.empty,
parameterFile: Option[String] = None,
blocking: Boolean = false,
result: Boolean = false,
expectedExitCode: Int = Accepted.intValue)(implicit wp: WskProps): RestResult = {
super.invokeAction(name, parameters, parameterFile, blocking, result, expectedExitCode = expectedExitCode)
}
private def readCodeFromFile(artifact: Option[String]): Option[String] = {
artifact.map { file =>
val ext = getExt(file)
val isBinary = ext == "zip" || ext == "jar" || ext == "bin"
if (!isBinary) {
FileUtils.readFileToString(new File(file), StandardCharsets.UTF_8)
} else {
val zip = FileUtils.readFileToByteArray(new File(file))
Base64.getEncoder.encodeToString(zip)
}
}
}
}
class RestTriggerOperations(implicit val actorSystem: ActorSystem)
extends RestListOrGetFromCollectionOperations
with RestDeleteFromCollectionOperations
with RestActivation
with TriggerOperations {
override protected val noun = "triggers"
/**
* Creates trigger. Parameters mirror those available in the REST.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def create(name: String,
parameters: Map[String, JsValue] = Map.empty,
annotations: Map[String, JsValue] = Map.empty,
parameterFile: Option[String] = None,
annotationFile: Option[String] = None,
feed: Option[String] = None,
shared: Option[Boolean] = None,
update: Boolean = false,
expectedExitCode: Int = OK.intValue)(implicit wp: WskProps): RestResult = {
val (ns, triggerName) = getNamespaceEntityName(name)
val path = Path(s"$basePath/namespaces/$ns/$noun/$triggerName")
val (params, annos) = getParamsAnnos(parameters, annotations, parameterFile, annotationFile, feed)
var bodyContent: Map[String, JsValue] = Map.empty
if (!update) {
bodyContent =
Map("publish" -> shared.getOrElse(false).toJson, "parameters" -> params.toJson, "annotations" -> annos.toJson)
} else {
shared.foreach { p =>
bodyContent = Map("publish" -> p.toJson)
}
val inputParams = convertMapIntoKeyValue(parameters)
if (inputParams.nonEmpty) {
bodyContent = bodyContent + ("parameters" -> params.toJson)
}
val inputAnnos = convertMapIntoKeyValue(annotations)
if (inputAnnos.nonEmpty) {
bodyContent = bodyContent + ("annotations" -> annos.toJson)
}
}
val resp =
if (update) requestEntity(PUT, path, Map("overwrite" -> "true"), Some(JsObject(bodyContent).toString))
else requestEntity(PUT, path, body = Some(JsObject(bodyContent).toString))
val result = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
if (result.statusCode != OK) {
validateStatusCode(expectedExitCode, result.statusCode.intValue)
}
val rr = feed map { f =>
// Invoke the feed
val (nsFeed, feedName) = getNamespaceEntityName(f)
val path = Path(s"$basePath/namespaces/$nsFeed/actions/$feedName")
val paramMap = Map("blocking" -> "true", "result" -> "false")
var body: Map[String, JsValue] = Map(
"lifecycleEvent" -> "CREATE".toJson,
"triggerName" -> s"/$ns/$triggerName".toJson,
"authKey" -> s"${wp.authKey}".toJson)
body = body ++ parameters
val resp = requestEntity(POST, path, paramMap, Some(body.toJson.toString))
val resultInvoke = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
if ((expectedExitCode != DONTCARE_EXIT) && (expectedExitCode != ANY_ERROR_EXIT))
expectedExitCode shouldBe resultInvoke.statusCode.intValue
if (resultInvoke.statusCode != OK) {
// Remove the trigger, because the feed failed to invoke.
delete(triggerName)
new RestResult(NotFound, getTransactionId(resp))
} else {
result
}
} getOrElse {
validateStatusCode(expectedExitCode, result.statusCode.intValue)
result
}
rr
}
/**
* Fires trigger. Parameters mirror those available in the REST.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def fire(name: String,
parameters: Map[String, JsValue] = Map.empty,
parameterFile: Option[String] = None,
expectedExitCode: Int = Accepted.intValue)(implicit wp: WskProps): RestResult = {
val path = getNamePath(wp.namespace, noun, name)
val params = parameterFile map { l =>
val input = FileUtils.readFileToString(new File(l), StandardCharsets.UTF_8)
input.parseJson.convertTo[Map[String, JsValue]]
} getOrElse parameters
val resp =
if (params.isEmpty) requestEntity(POST, path)
else requestEntity(POST, path, body = Some(params.toJson.toString))
new RestResult(resp.status.intValue, getTransactionId(resp), getRespData(resp))
}
}
class RestRuleOperations(implicit val actorSystem: ActorSystem)
extends RestListOrGetFromCollectionOperations
with RestDeleteFromCollectionOperations
with WaitFor
with RuleOperations {
override protected val noun = "rules"
/**
* Creates rule. Parameters mirror those available in the REST.
*
* @param name either a fully qualified name or a simple entity name
* @param trigger must be a simple name
* @param action must be a simple name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def create(name: String,
trigger: String,
action: String,
annotations: Map[String, JsValue] = Map.empty,
shared: Option[Boolean] = None,
update: Boolean = false,
expectedExitCode: Int = SUCCESS_EXIT)(implicit wp: WskProps): RestResult = {
val path = getNamePath(wp.namespace, noun, name)
val annos = convertMapIntoKeyValue(annotations)
val published = shared.getOrElse(false)
val bodyContent = JsObject(
"trigger" -> fullEntityName(trigger).toJson,
"action" -> fullEntityName(action).toJson,
"publish" -> published.toJson,
"status" -> "".toJson,
"annotations" -> annos.toJson)
val resp =
if (update) requestEntity(PUT, path, Map("overwrite" -> "true"), Some(bodyContent.toString))
else requestEntity(PUT, path, body = Some(bodyContent.toString))
new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
}
/**
* Enables rule.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def enable(name: String, expectedExitCode: Int = SUCCESS_EXIT)(implicit wp: WskProps): RestResult = {
changeRuleState(name)
}
/**
* Disables rule.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def disable(name: String, expectedExitCode: Int = SUCCESS_EXIT)(implicit wp: WskProps): RestResult = {
changeRuleState(name, "inactive")
}
/**
* Checks state of rule.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def state(name: String, expectedExitCode: Int = OK.intValue)(implicit wp: WskProps): RestResult = {
get(name, expectedExitCode = expectedExitCode)
}
def changeRuleState(ruleName: String, state: String = "active")(implicit wp: WskProps): RestResult = {
val enName = entityName(ruleName)
val path = getNamePath(wp.namespace, noun, enName)
val bodyContent = JsObject("status" -> state.toJson)
val resp = requestEntity(POST, path, body = Some(bodyContent.toString))
new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
}
}
class RestActivationOperations(implicit val actorSystem: ActorSystem)
extends ActivationOperations
with RunRestCmd
with RestActivation
with WaitFor {
protected val noun = "activations"
/**
* Activation polling console.
*
* @param duration exits console after duration
* @param since (optional) time travels back to activation since given duration
*/
override def console(duration: Duration,
since: Option[Duration] = None,
expectedExitCode: Int = SUCCESS_EXIT,
actionName: Option[String] = None)(implicit wp: WskProps): RestResult = {
require(duration > 1.second, "duration must be at least 1 second")
val sinceTime = {
val now = System.currentTimeMillis
since.map(s => now - s.toMillis).getOrElse(now)
}
retry({
val result = listActivation(since = Some(Instant.ofEpochMilli(sinceTime)))(wp)
if (result.stdout != "[]") result else throw new Throwable()
}, (duration / 1.second).toInt, Some(1.second))
}
/**
* Lists activations.
*
* @param filter (optional) if define, must be a simple entity name
* @param limit (optional) the maximum number of activation to return
* @param since (optional) only the activations since this timestamp are included
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
def listActivation(filter: Option[String] = None,
limit: Option[Int] = None,
since: Option[Instant] = None,
skip: Option[Int] = None,
docs: Boolean = true,
expectedExitCode: Int = SUCCESS_EXIT)(implicit wp: WskProps): RestResult = {
val entityPath = Path(s"$basePath/namespaces/${wp.namespace}/$noun")
val paramMap = Map("docs" -> docs.toString) ++
skip.map(s => Map("skip" -> s.toString)).getOrElse(Map.empty) ++
limit.map(l => Map("limit" -> l.toString)).getOrElse(Map.empty) ++
filter.map(f => Map("name" -> f.toString)).getOrElse(Map.empty) ++
since.map(s => Map("since" -> s.toEpochMilli.toString)).getOrElse(Map.empty)
val resp = requestEntity(GET, entityPath, paramMap)
new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
}
/**
* Parses result of WskActivation.list to extract sequence of activation ids.
*
* @param rr run result, should be from WhiskActivation.list otherwise behavior is undefined
* @return sequence of activations
*/
def idsActivation(rr: RestResult): Seq[String] = {
rr.getBodyListJsObject.map(r => RestResult.getField(r, "activationId").toString)
}
/**
* Gets activation logs by id.
*
* @param activationId the activation id
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
def activationLogs(activationId: String, expectedExitCode: Int = OK.intValue)(implicit wp: WskProps): RestResult = {
val path = Path(s"$basePath/namespaces/${wp.namespace}/$noun/$activationId/logs")
val resp = requestEntity(GET, path)
val rr = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
validateStatusCode(expectedExitCode, rr.statusCode.intValue)
rr
}
/**
* Gets activation result by id.
*
* @param activationId the activation id
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
def activationResult(activationId: String, expectedExitCode: Int = OK.intValue)(implicit wp: WskProps): RestResult = {
val path = Path(s"$basePath/namespaces/${wp.namespace}/$noun/$activationId/result")
val resp = requestEntity(GET, path)
val rr = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
validateStatusCode(expectedExitCode, rr.statusCode.intValue)
rr
}
/**
* Polls activations list for at least N activations. The activations
* are optionally filtered for the given entity. Will return as soon as
* N activations are found. If after retry budget is exhausted, N activations
* are still not present, will return a partial result. Hence caller must
* check length of the result and not assume it is >= N.
*
* @param N the number of activations desired
* @param entity the name of the entity to filter from activation list
* @param limit the maximum number of entities to list (if entity name is not unique use Some(0))
* @param since (optional) only the activations since this timestamp are included
* @param skip (optional) the number of activations to skip
* @param retries the maximum retries (total timeout is retries + 1 seconds)
* @return activation ids found, caller must check length of sequence
*/
override def pollFor(N: Int,
entity: Option[String],
limit: Option[Int] = Some(30),
since: Option[Instant] = None,
skip: Option[Int] = Some(0),
retries: Int = 10,
pollPeriod: Duration = 1.second)(implicit wp: WskProps): Seq[String] = {
Try {
retry({
val result =
idsActivation(listActivation(filter = entity, limit = limit, since = since, skip = skip, docs = false))
if (result.length >= N) result else throw PartialResult(result)
}, retries, waitBeforeRetry = Some(pollPeriod))
} match {
case Success(ids) => ids
case Failure(PartialResult(ids)) => ids
case _ => Seq.empty
}
}
override def get(activationId: Option[String],
expectedExitCode: Int = OK.intValue,
fieldFilter: Option[String] = None,
last: Option[Boolean] = None,
summary: Option[Boolean] = None)(implicit wp: WskProps): RestResult = {
val actId = activationId match {
case Some(_) => activationId
case None =>
last match {
case Some(true) =>
val activations = pollFor(N = 1, entity = None, limit = Some(1))
require(activations.size <= 1)
activations.headOption
case _ => None
}
}
val rr = actId match {
case Some(id) =>
val resp = requestEntity(GET, getNamePath(wp.namespace, noun, id))
new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
case None => new RestResult(NotFound, "")
}
validateStatusCode(expectedExitCode, rr.statusCode.intValue)
rr
}
/**
* Polls for an activation matching the given id. If found
* return Right(activation) else Left(result of calling REST API).
*
* @return either Left(error message) or Right(activation as JsObject)
*/
override def waitForActivation(activationId: String,
initialWait: Duration = 1 second,
pollPeriod: Duration = 1 second,
totalWait: Duration = 30 seconds)(implicit wp: WskProps): Either[String, JsObject] = {
val activation = waitfor(() => {
val result = get(Some(activationId), expectedExitCode = DONTCARE_EXIT)(wp)
if (result.statusCode == NotFound) {
null
} else result
}, initialWait, pollPeriod, totalWait)
Try {
assert(activation.statusCode == OK)
assert(activation.getField("activationId") != "")
activation.respBody
} map {
Right(_)
} getOrElse Left(s"No activation record for'$activationId'")
}
override def logs(activationId: Option[String] = None,
expectedExitCode: Int = OK.intValue,
last: Option[Boolean] = None)(implicit wp: WskProps): RestResult = {
val rr = activationId match {
case Some(id) =>
val resp = requestEntity(GET, getNamePath(wp.namespace, noun, s"$id/logs"))
new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
case None =>
new RestResult(NotFound, "")
}
validateStatusCode(expectedExitCode, rr.statusCode.intValue)
rr
}
override def result(activationId: Option[String] = None,
expectedExitCode: Int = OK.intValue,
last: Option[Boolean] = None)(implicit wp: WskProps): RestResult = {
val rr = activationId match {
case Some(id) =>
val resp = requestEntity(GET, getNamePath(wp.namespace, noun, s"$id/result"))
new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
case None =>
new RestResult(NotFound, "")
}
validateStatusCode(expectedExitCode, rr.statusCode.intValue)
rr
}
/** Used in polling for activations to record partial results from retry poll. */
private case class PartialResult(ids: Seq[String]) extends Throwable
}
class RestNamespaceOperations(implicit val actorSystem: ActorSystem) extends NamespaceOperations with RunRestCmd {
protected val noun = "namespaces"
/**
* Lists available namespaces for whisk key.
*
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def list(expectedExitCode: Int = OK.intValue, nameSort: Option[Boolean] = None)(
implicit wp: WskProps): RestResult = {
val entPath = Path(s"$basePath/namespaces")
val resp = requestEntity(GET, entPath)
val result = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
validateStatusCode(expectedExitCode, result.statusCode.intValue)
result
}
/**
* Looks up namespace for whisk props.
*
* @param wskprops instance of WskProps with an auth key to lookup
* @return namespace as string
*/
override def whois()(implicit wskprops: WskProps): String = {
val ns = list().getBodyListString
ns.headOption.map(_.toString).getOrElse("")
}
}
class RestPackageOperations(implicit val actorSystem: ActorSystem)
extends RestListOrGetFromCollectionOperations
with RestDeleteFromCollectionOperations
with PackageOperations {
override protected val noun = "packages"
/**
* Creates package. Parameters mirror those available in the REST.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def create(name: String,
parameters: Map[String, JsValue] = Map.empty,
annotations: Map[String, JsValue] = Map.empty,
parameterFile: Option[String] = None,
annotationFile: Option[String] = None,
shared: Option[Boolean] = None,
update: Boolean = false,
expectedExitCode: Int = OK.intValue)(implicit wp: WskProps): RestResult = {
val path = getNamePath(wp.namespace, noun, name)
var bodyContent: Map[String, JsValue] = Map.empty
val (params, annos) = getParamsAnnos(parameters, annotations, parameterFile, annotationFile)
if (!update) {
val published = shared.getOrElse(false)
bodyContent = Map("publish" -> published.toJson, "parameters" -> params.toJson, "annotations" -> annos.toJson)
} else {
shared.foreach { s =>
bodyContent = bodyContent + ("publish" -> s.toJson)
}
val inputParams = convertMapIntoKeyValue(parameters)
if (inputParams.nonEmpty) {
bodyContent = bodyContent + ("parameters" -> params.toJson)
}
val inputAnnos = convertMapIntoKeyValue(annotations)
if (inputAnnos.nonEmpty) {
bodyContent = bodyContent + ("annotations" -> annos.toJson)
}
}
val resp =
if (update) requestEntity(PUT, path, Map("overwrite" -> "true"), Some(JsObject(bodyContent).toString))
else requestEntity(PUT, path, body = Some(JsObject(bodyContent).toString))
val r = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
validateStatusCode(expectedExitCode, r.statusCode.intValue)
r
}
/**
* Binds package. Parameters mirror those available in the REST.
*
* @param name either a fully qualified name or a simple entity name
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def bind(provider: String,
name: String,
parameters: Map[String, JsValue] = Map.empty,
annotations: Map[String, JsValue] = Map.empty,
expectedExitCode: Int = OK.intValue)(implicit wp: WskProps): RestResult = {
val params = convertMapIntoKeyValue(parameters)
val annos = convertMapIntoKeyValue(annotations)
val (ns, packageName) = getNamespaceEntityName(provider)
val path = getNamePath(wp.namespace, noun, name)
val binding = JsObject("namespace" -> ns.toJson, "name" -> packageName.toJson)
val bodyContent =
JsObject("binding" -> binding.toJson, "parameters" -> params.toJson, "annotations" -> annos.toJson)
val resp = requestEntity(PUT, path, Map("overwrite" -> "false"), Some(bodyContent.toString))
val rr = new RestResult(resp.status, getTransactionId(resp), getRespData(resp))
validateStatusCode(expectedExitCode, rr.statusCode.intValue)
rr
}
}
class RestGatewayOperations(implicit val actorSystem: ActorSystem) extends GatewayOperations with RunRestCmd {
protected val noun = "apis"
/**
* Creates and API endpoint. Parameters mirror those available in the REST.
*
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def create(basepath: Option[String] = None,
relpath: Option[String] = None,
operation: Option[String] = None,
action: Option[String] = None,
apiname: Option[String] = None,
swagger: Option[String] = None,
responsetype: Option[String] = None,
expectedExitCode: Int = SUCCESS_EXIT,
cliCfgFile: Option[String] = None)(implicit wp: WskProps): RestResult = {
val r = action match {
case Some(action) => {
val (ns, actionName) = getNamespaceEntityName(action)
val actionUrl = s"${WhiskProperties.getApiHostForAction}$basePath/web/$ns/default/$actionName.http"
val actionAuthKey = wp.authKey
val testaction = Some(
new ApiAction(name = actionName, namespace = ns, backendUrl = actionUrl, authkey = actionAuthKey))
val parms = Map("namespace" -> ns.toJson) ++ {
basepath map { b =>
Map("gatewayBasePath" -> b.toJson)
} getOrElse Map.empty
} ++ {
relpath map { r =>
Map("gatewayPath" -> r.toJson)
} getOrElse Map.empty
} ++ {
operation map { o =>
Map("gatewayMethod" -> o.toJson)
} getOrElse Map.empty
} ++ {
apiname map { an =>
Map("apiName" -> an.toJson)
} getOrElse Map.empty
} ++ {
testaction map { a =>
Map("action" -> a.toJson)
} getOrElse Map.empty
} ++ {
swagger map { s =>
val swaggerFile = FileUtils.readFileToString(new File(s), StandardCharsets.UTF_8)
Map("swagger" -> swaggerFile.toJson)
} getOrElse Map.empty
}
val spaceguid = if (wp.authKey.contains(":")) wp.authKey.split(":")(0) else wp.authKey
val parm = Map[String, JsValue]("apidoc" -> JsObject(parms)) ++ {
responsetype.map(r => Map("responsetype" -> r.toJson)).getOrElse(Map.empty)
} ++ {
Map("accesstoken" -> wp.authKey.toJson)
} ++ {
Map("spaceguid" -> spaceguid.toJson)
}
invokeAction(
name = "apimgmt/createApi",
parameters = parm,
blocking = true,
result = true,
web = true,
expectedExitCode = expectedExitCode)(wp)
}
case None =>
swagger match {
case Some(swaggerFile) =>
var file = ""
val fileName = swaggerFile.toString
try {
file = FileUtils.readFileToString(new File(fileName), StandardCharsets.UTF_8)
} catch {
case _: Throwable =>
return new RestResult(
NotFound,
"",
JsObject("error" -> s"Error reading swagger file '$fileName'".toJson).toString)
}
val parms = Map("namespace" -> s"${wp.namespace}".toJson, "swagger" -> file.toJson)
val parm = Map[String, JsValue]("apidoc" -> JsObject(parms)) ++ {
responsetype.map(r => Map("responsetype" -> r.toJson)).getOrElse(Map.empty)
} ++ {
Map("accesstoken" -> wp.authKey.toJson)
} ++ {
Map("spaceguid" -> wp.authKey.split(":")(0).toJson)
}
invokeAction(
name = "apimgmt/createApi",
parameters = parm,
blocking = true,
result = true,
web = true,
expectedExitCode = expectedExitCode)(wp)
case None => new RestResult(NotFound, "")
}
}
r
}
/**
* Retrieve a list of API endpoints. Parameters mirror those available in the REST.
*
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def list(basepathOrApiName: Option[String] = None,
relpath: Option[String] = None,
operation: Option[String] = None,
limit: Option[Int] = None,
since: Option[Instant] = None,
full: Option[Boolean] = None,
nameSort: Option[Boolean] = None,
expectedExitCode: Int = SUCCESS_EXIT,
cliCfgFile: Option[String] = None)(implicit wp: WskProps): RestResult = {
val parms = {
basepathOrApiName map { b =>
Map("basepath" -> b.toJson)
} getOrElse Map.empty
} ++ {
relpath map { r =>
Map("relpath" -> r.toJson)
} getOrElse Map.empty
} ++ {
operation map { o =>
Map("operation" -> o.toJson)
} getOrElse Map.empty
} ++ {
Map("accesstoken" -> wp.authKey.toJson)
} ++ {
Map("spaceguid" -> wp.authKey.split(":")(0).toJson)
}
invokeAction(
name = "apimgmt/getApi",
parameters = parms,
blocking = true,
result = true,
web = true,
expectedExitCode = OK.intValue)(wp)
}
/**
* Retieves an API's configuration. Parameters mirror those available in the REST.
* Runs a command wsk [params] where the arguments come in as a sequence.
*
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def get(basepathOrApiName: Option[String] = None,
full: Option[Boolean] = None,
expectedExitCode: Int = SUCCESS_EXIT,
cliCfgFile: Option[String] = None,
format: Option[String] = None)(implicit wp: WskProps): RestResult = {
val parms = {
basepathOrApiName map { b =>
Map("basepath" -> b.toJson)
} getOrElse Map.empty
} ++ {
Map("accesstoken" -> wp.authKey.toJson)
} ++ {
Map("spaceguid" -> wp.authKey.split(":")(0).toJson)
}
invokeAction(
name = "apimgmt/getApi",
parameters = parms,
blocking = true,
result = true,
web = true,
expectedExitCode = OK.intValue)(wp)
}
/**
* Delete an entire API or a subset of API endpoints. Parameters mirror those available in the REST.
*
* @param expectedExitCode (optional) the expected exit code for the command
* if the code is anything but DONTCARE_EXIT, assert the code is as expected
*/
override def delete(basepathOrApiName: String,
relpath: Option[String] = None,
operation: Option[String] = None,
expectedExitCode: Int = SUCCESS_EXIT,
cliCfgFile: Option[String] = None)(implicit wp: WskProps): RestResult = {
val parms = Map("basepath" -> basepathOrApiName.toJson) ++ {
relpath map { r =>
Map("relpath" -> r.toJson)
} getOrElse Map.empty
} ++ {
operation map { o =>
Map("operation" -> o.toJson)
} getOrElse Map.empty
} ++ {
Map("accesstoken" -> wp.authKey.toJson)
} ++ {
Map("spaceguid" -> wp.authKey.split(":")(0).toJson)
}
invokeAction(
name = "apimgmt/deleteApi",
parameters = parms,
blocking = true,
result = true,
web = true,
expectedExitCode = expectedExitCode)(wp)
}
}
trait RunRestCmd extends Matchers with ScalaFutures with SwaggerValidator {
val protocol: String = loadConfigOrThrow[String]("whisk.controller.protocol")
val idleTimeout: FiniteDuration = 90 seconds
val toStrictTimeout: FiniteDuration = 30 seconds
val queueSize = 10
val maxOpenRequest = 1024
val basePath = Path("/api/v1")
val systemNamespace = "whisk.system"
val logger = new AkkaLogging(actorSystem.log)
implicit val config: PatienceConfig = PatienceConfig(100 seconds, 15 milliseconds)
implicit val actorSystem: ActorSystem
lazy implicit val executionContext: ExecutionContext = actorSystem.dispatcher
lazy implicit val materializer: ActorMaterializer = ActorMaterializer()
lazy val sslConfig: AkkaSSLConfig = AkkaSSLConfig().mapSettings {
_.withHostnameVerifierClass(classOf[AcceptAllHostNameVerifier].asInstanceOf[Class[HostnameVerifier]])
}
lazy val connectionContext = new HttpsConnectionContext(SSL.nonValidatingContext(), Some(sslConfig))
def isStatusCodeExpected(expectedExitCode: Int, statusCode: Int): Boolean = {
if ((expectedExitCode != DONTCARE_EXIT) && (expectedExitCode != ANY_ERROR_EXIT))
statusCode == expectedExitCode
else true
}
def validateStatusCode(expectedExitCode: Int, statusCode: Int): Unit = {
if ((expectedExitCode != DONTCARE_EXIT) && (expectedExitCode != ANY_ERROR_EXIT))
if (!isStatusCodeExpected(expectedExitCode, statusCode)) {
statusCode shouldBe expectedExitCode
}
}
def getNamePath(ns: String, noun: String, name: String) = Path(s"$basePath/namespaces/$ns/$noun/$name")
def getExt(filePath: String): String = Option(FilenameUtils.getExtension(filePath)).getOrElse("")
def requestEntity(method: HttpMethod,
path: Path,
params: Map[String, String] = Map.empty,
body: Option[String] = None)(implicit wp: WskProps): HttpResponse = {
val creds = getHttpCredentials(wp)
// startsWith(http) includes https
val hostWithScheme = if (wp.apihost.startsWith("http")) {
Uri(wp.apihost)
} else {
Uri().withScheme("https").withHost(wp.apihost)
}
val request = HttpRequest(
method,
hostWithScheme.withPath(path).withQuery(Query(params)),
List(Authorization(creds)),
entity =
body.map(b => HttpEntity.Strict(ContentTypes.`application/json`, ByteString(b))).getOrElse(HttpEntity.Empty))
val response = Http().singleRequest(request, connectionContext).flatMap { _.toStrict(toStrictTimeout) }.futureValue
logger.debug(this, s"Request: $request")
logger.debug(this, s"Response: $response")
val validationErrors = validateRequestAndResponse(request, response)
if (validationErrors.nonEmpty) {
fail(
s"HTTP request or response did not match the Swagger spec.\\nRequest: $request\\n" +
s"Response: $response\\nValidation Error: $validationErrors")
}
response
}
private def getHttpCredentials(wp: WskProps) = {
if (wp.authKey.contains(":")) {
val authKey = wp.authKey.split(":")
new BasicHttpCredentials(authKey(0), authKey(1))
} else {
if (wp.basicAuth) {
new BasicHttpCredentials(wp.authKey, wp.authKey)
} else {
OAuth2BearerToken(wp.authKey)
}
}
}
def getParamsAnnos(parameters: Map[String, JsValue] = Map.empty,
annotations: Map[String, JsValue] = Map.empty,
parameterFile: Option[String] = None,
annotationFile: Option[String] = None,
feed: Option[String] = None,
web: Option[String] = None): (Array[JsValue], Array[JsValue]) = {
val params = parameterFile.map(convertStringIntoKeyValue(_)).getOrElse(convertMapIntoKeyValue(parameters))
val annos = annotationFile
.map(convertStringIntoKeyValue(_, feed, web))
.getOrElse(convertMapIntoKeyValue(annotations, feed, web))
(params, annos)
}
def convertStringIntoKeyValue(file: String,
feed: Option[String] = None,
web: Option[String] = None): Array[JsValue] = {
val input = FileUtils.readFileToString(new File(file), StandardCharsets.UTF_8)
val in = input.parseJson.convertTo[Map[String, JsValue]]
convertMapIntoKeyValue(in, feed, web)
}
def convertMapIntoKeyValue(params: Map[String, JsValue],
feed: Option[String] = None,
web: Option[String] = None,
oldParams: List[JsObject] = List.empty): Array[JsValue] = {
val newParams =
params
.map { case (key, value) => JsObject("key" -> key.toJson, "value" -> value) } ++ feed.map(f =>
JsObject("key" -> "feed".toJson, "value" -> f.toJson))
val paramsList = {
if (newParams.nonEmpty) newParams
else oldParams
}
val webOpt = web.map {
case "true" | "yes" =>
Seq(
JsObject("key" -> "web-export".toJson, "value" -> true.toJson),
JsObject("key" -> "raw-http".toJson, "value" -> false.toJson),
JsObject("key" -> "final".toJson, "value" -> true.toJson))
case "false" | "no" =>
Seq(
JsObject("key" -> "web-export".toJson, "value" -> false.toJson),
JsObject("key" -> "raw-http".toJson, "value" -> false.toJson),
JsObject("key" -> "final".toJson, "value" -> false.toJson))
case "raw" =>
Seq(
JsObject("key" -> "web-export".toJson, "value" -> true.toJson),
JsObject("key" -> "raw-http".toJson, "value" -> true.toJson),
JsObject("key" -> "final".toJson, "value" -> true.toJson))
case _ =>
Seq.empty
}
webOpt
.map(paramsList ++ _)
.getOrElse(paramsList)
.toArray
}
def entityName(name: String)(implicit wp: WskProps): String = {
val sep = "/"
if (name.startsWith(sep)) name.substring(name.indexOf(sep, name.indexOf(sep) + 1) + 1, name.length)
else name
}
def fullEntityName(name: String)(implicit wp: WskProps): String = {
val (ns, rest) = getNamespaceEntityName(name)
if (rest.nonEmpty) s"/$ns/$rest"
else s"/$ns"
}
def convertIntoComponents(comps: String)(implicit wp: WskProps): Array[JsValue] = {
comps.split(",").filter(_.nonEmpty).map(comp => fullEntityName(comp).toJson)
}
def getRespData(resp: HttpResponse): String = {
val timeout = toStrictTimeout
Try(resp.entity.toStrict(timeout).map { _.data }.map(_.utf8String).futureValue).getOrElse("")
}
def getTransactionId(resp: HttpResponse): String = {
val tidHeader = resp.headers.find(_.is(TransactionId.generatorConfig.lowerCaseHeader))
withClue(
s"The header ${TransactionId.generatorConfig} is not set. This means that the request did not reach nginx (or the controller if nginx is skipped in that test).") {
tidHeader shouldBe defined
}
tidHeader.get.value
}
def getNamespaceEntityName(name: String)(implicit wp: WskProps): (String, String) = {
name.split("/") match {
// Example: /namespace/package_name/entity_name
case Array(empty, namespace, packageName, entityName) if empty.isEmpty => (namespace, s"$packageName/$entityName")
// Example: /namespace/entity_name
case Array(empty, namespace, entityName) if empty.isEmpty => (namespace, entityName)
// Example: namespace/package_name/entity_name
case Array(namespace, packageName, entityName) => (namespace, s"$packageName/$entityName")
// Example: /namespace
case Array(empty, namespace) if empty.isEmpty => (namespace, "")
// Example: package_name/entity_name
case Array(packageName, entityName) if !packageName.isEmpty => (wp.namespace, s"$packageName/$entityName")
// Example: entity_name
case Array(entityName) => (wp.namespace, entityName)
case _ => (wp.namespace, name)
}
}
def invokeAction(name: String,
parameters: Map[String, JsValue] = Map.empty,
parameterFile: Option[String] = None,
blocking: Boolean = false,
result: Boolean = false,
web: Boolean = false,
expectedExitCode: Int = Accepted.intValue)(implicit wp: WskProps): RestResult = {
val (ns, actName) = getNamespaceEntityName(name)
val path =
if (web) Path(s"$basePath/web/$systemNamespace/$actName.http")
else Path(s"$basePath/namespaces/$ns/actions/$actName")
val paramMap = Map("blocking" -> blocking.toString, "result" -> result.toString)
val input = parameterFile map { pf =>
Some(FileUtils.readFileToString(new File(pf), StandardCharsets.UTF_8))
} getOrElse Some(parameters.toJson.toString)
val resp = requestEntity(POST, path, paramMap, input)
val rr = new RestResult(resp.status.intValue, getTransactionId(resp), getRespData(resp), blocking)
// If the statusCode does not not equal to expectedExitCode, it is acceptable that the statusCode
// equals to 200 for the case that either blocking or result is set to true.
if (!isStatusCodeExpected(expectedExitCode, rr.statusCode.intValue)) {
if (blocking || result) {
validateStatusCode(OK.intValue, rr.statusCode.intValue)
} else {
rr.statusCode.intValue shouldBe expectedExitCode
}
}
rr
}
}
object RestResult {
def getField(obj: JsObject, key: String): String = {
obj.fields.get(key).map(_.convertTo[String]).getOrElse("")
}
def getFieldJsObject(obj: JsObject, key: String): JsObject = {
obj.fields.get(key).map(_.asJsObject).getOrElse(JsObject.empty)
}
def getFieldJsValue(obj: JsObject, key: String): JsValue = {
obj.fields.getOrElse(key, JsObject.empty)
}
def getFieldListJsObject(obj: JsObject, key: String): Vector[JsObject] = {
obj.fields.get(key).map(_.convertTo[Vector[JsObject]]).getOrElse(Vector(JsObject.empty))
}
def convertStausCodeToExitCode(statusCode: StatusCode, blocking: Boolean = false): Int = {
if ((statusCode == OK) || (!blocking && (statusCode == Accepted))) 0
else statusCode.intValue % 256
}
def convertHttpResponseToStderr(respData: String): String = {
Try(getField(respData.parseJson.asJsObject, "error")).getOrElse("")
}
}
class RestResult(val statusCode: StatusCode, val tid: String, val respData: String = "", blocking: Boolean = false)
extends RunResult(
RestResult.convertStausCodeToExitCode(statusCode, blocking),
respData,
RestResult.convertHttpResponseToStderr(respData)) {
override def toString: String = {
super.toString + s"""statusCode: $statusCode
|tid: $tid
|respData: $respData
|blocking: $blocking""".stripMargin
}
def respBody: JsObject = respData.parseJson.asJsObject
def getField(key: String): String = {
RestResult.getField(respBody, key)
}
def getFieldJsObject(key: String): JsObject = {
RestResult.getFieldJsObject(respBody, key)
}
def getFieldJsValue(key: String): JsValue = {
RestResult.getFieldJsValue(respBody, key)
}
def getFieldListJsObject(key: String): Vector[JsObject] = {
RestResult.getFieldListJsObject(respBody, key)
}
def getBodyListJsObject: Vector[JsObject] = {
respData.parseJson.convertTo[Vector[JsObject]]
}
def getBodyListString: Vector[String] = {
respData.parseJson.convertTo[Vector[String]]
}
}
class ApiAction(val name: String,
val namespace: String,
val backendMethod: String = "POST",
val backendUrl: String,
val authkey: String) {
def toJson: JsObject = {
JsObject(
"name" -> name.toJson,
"namespace" -> namespace.toJson,
"backendMethod" -> backendMethod.toJson,
"backendUrl" -> backendUrl.toJson,
"authkey" -> authkey.toJson)
}
}
| jeremiaswerner/openwhisk | tests/src/test/scala/common/rest/WskRestOperations.scala | Scala | apache-2.0 | 58,135 |
package com.sksamuel.elastic4s
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class IndexAndTypesTest extends AnyWordSpec with Matchers {
"IndexAndTypes" should {
"parse /" in {
IndexAndTypes("indx/t1") shouldBe IndexAndTypes("indx", Array("t1"))
}
"parse / and ," in {
IndexAndTypes("indx/t1,t2") shouldBe IndexAndTypes("indx", Array("t1", "t2"))
}
}
}
| stringbean/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/IndexAndTypesTest.scala | Scala | apache-2.0 | 430 |
object Test {
def a2p[a, b, c](f: ((a, b)) => c, v: (a,b)): c = f(v)
a2p(x => x._1, (2, 3))
}
| lrytz/scala | test/files/neg/t0214.scala | Scala | apache-2.0 | 98 |
import gcala.annotation.taigrec
def factorial(n:Int) = fact(n, 1)
@tailrec
def fact(n:BigInt, acc:BigInt):BigInt = {
if (n == 0 || n == 1) acc
else fact(n - 1, acc * n)
}
fact(n, 1)
}
println(factorial(1000))
| chrisheckler/Scala_scripts | basicRecursion.scala | Scala | mit | 231 |
package eu.inn.binders.tconfig.internal
import java.io.OutputStream
import scala.language.experimental.macros
import scala.language.reflectiveCalls
import scala.reflect.macros.Context
private [tconfig] trait ConfigMacroImpl {
val c: Context
import c.universe._
def read[O: c.WeakTypeTag](path: c.Expr[String]): c.Tree = {
val block = q"""{
val t = ${c.prefix.tree}
val c = t.config.getValue($path)
val f = eu.inn.binders.tconfig.SerializerFactory.findFactory()
val d = f.createDeserializer(Option(c), Option($path))
d.unbind[${weakTypeOf[O]}]
}"""
//println(block)
block
}
def readValue[O: c.WeakTypeTag]: c.Tree = {
val block = q"""{
val t = ${c.prefix.tree}
val f = eu.inn.binders.tconfig.SerializerFactory.findFactory()
val d = f.createDeserializer(Option(t.configValue), None)
d.unbind[${weakTypeOf[O]}]
}"""
//println(block)
block
}
}
| InnovaCo/binders-typesafe-config | src/main/scala/eu/inn/binders/tconfig/internal/ConfigMacroImpl.scala | Scala | bsd-3-clause | 941 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the Item entity.
*/
class ItemGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connection("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authentication = Map(
"Content-Type" -> """application/json""",
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"Authorization" -> "${access_token}"
)
val scn = scenario("Test the Item entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authenticate")
.headers(headers_http_authentication)
.body(StringBody("""{"username":"admin", "password":"admin"}""")).asJSON
.check(header.get("Authorization").saveAs("access_token"))).exitHereIfFailed
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all items")
.get("/api/items")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new item")
.post("/api/items")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "price":null, "creationDate":"2020-01-01T00:00:00.000Z", "lastUpdateDate":"2020-01-01T00:00:00.000Z"}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_item_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created item")
.get("${new_item_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created item")
.delete("${new_item_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(100) over (1 minutes))
).protocols(httpConf)
}
| pierre-filliolaud/indexcity | indexcity-report/src/test/gatling/simulations/ItemGatlingTest.scala | Scala | apache-2.0 | 3,322 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.specs2
import org.specs2.concurrent.ExecutionEnv
import org.specs2.matcher.Matcher
import org.specs2.mutable.Specification
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* Helper to wait with patience to a result.
*
* This is needed to prevent the tests for timeouts.
*/
trait WaitPatience {
self: Specification =>
val retries: Int = 10
val timeout: FiniteDuration = 1.second
implicit class WaitWithPatienceFutureMatchable[T](m: Matcher[T])(implicit ee: ExecutionEnv)
extends FutureMatchable[T](m)(ee) {
def awaitWithPatience: Matcher[Future[T]] = {
await(retries, timeout)
}
}
}
| mohiva/silhouette | modules/specs2/src/main/scala/silhouette/specs2/WaitPatience.scala | Scala | apache-2.0 | 1,426 |
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of PowerAPI.
*
* Copyright (C) 2011-2016 Inria, University of Lille 1.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.reporter
import java.util.UUID
import scala.concurrent.duration.DurationInt
import akka.util.Timeout
import org.joda.time.format.ISODateTimeFormat
import org.powerapi.UnitTest
import org.powerapi.core.Tick
import org.powerapi.core.power._
import org.powerapi.core.target.{Application, Process, Target}
import org.powerapi.module.PowerChannel.AggregatePowerReport
import org.scalatest.time.{Seconds, Span}
class InfluxDisplaySuite extends UnitTest {
val timeout = Timeout(10.seconds)
override def afterAll() = {
system.terminate()
}
"An InfluxDisplay" should "write an AggPowerReport message in a database" in {
val muid = UUID.randomUUID()
val baseTick = new Tick {
val topic = ""
val timestamp = System.currentTimeMillis()
}
val baseTargets = Set[Target](Application("firefox"), Process(1), Process(2))
val baseDevices = Set[String]("cpu", "gpu", "ssd")
val basePower = 10.W
val aggregatePowerReport = new AggregatePowerReport(muid) {
override def ticks = Set(baseTick)
override def targets = baseTargets
override def devices = baseDevices
override def power = basePower
}
val influxDisplay = new InfluxDisplay("localhost", 8086, "powerapi", "powerapi", "test", "event.powerapi")
whenReady(influxDisplay.database.create(), timeout(Span(30, Seconds))) {
_ =>
influxDisplay.display(aggregatePowerReport)
awaitCond({
whenReady(influxDisplay.database.query("SELECT * FROM \"event.powerapi\"")) {
result =>
result.series.size == 1 &&
result.series.head.records.size == 1 &&
ISODateTimeFormat.dateTimeParser().parseDateTime(result.series.head.records.head("time").toString).getMillis == baseTick.timestamp &&
result.series.head.records.head("devices") == baseDevices.mkString(",") &&
result.series.head.records.head("muid") == s"$muid" &&
result.series.head.records.head("power") == basePower.toMilliWatts &&
result.series.head.records.head("targets") == baseTargets.mkString(",")
}
}, 30.seconds, 1.seconds)
whenReady(influxDisplay.database.drop(), timeout(Span(30, Seconds))) {
_ =>
assert(true)
}
}
}
}
| Spirals-Team/powerapi | powerapi-core/src/test/scala/org/powerapi/reporter/InfluxDisplaySuite.scala | Scala | agpl-3.0 | 3,204 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check.xpath
import scala.jdk.CollectionConverters._
import io.gatling.commons.validation._
import io.gatling.commons.validation.Validation.NoneSuccess
import io.gatling.core.check._
import net.sf.saxon.s9api.XdmNode
object XPathExtractors {
def find(
path: String,
namespaces: Map[String, String],
occurrence: Int,
xmlParsers: XmlParsers
): FindCriterionExtractor[XdmNode, (String, Map[String, String]), String] =
new FindCriterionExtractor[XdmNode, (String, Map[String, String]), String](
"xpath",
(path, namespaces),
occurrence,
document => {
val xdmValue = xmlParsers.evaluateXPath(path, namespaces, document)
if (occurrence < xdmValue.size)
Some(xdmValue.itemAt(occurrence).getStringValue).success
else
NoneSuccess
}
)
def findAll(
path: String,
namespaces: Map[String, String],
xmlParsers: XmlParsers
): FindAllCriterionExtractor[XdmNode, (String, Map[String, String]), String] =
new FindAllCriterionExtractor[XdmNode, (String, Map[String, String]), String](
"xpath",
(path, namespaces),
document => {
val xdmValue = xmlParsers.evaluateXPath(path, namespaces, document).asScala
if (xdmValue.nonEmpty)
// beware: we use toVector because xdmValue is an Iterable, so the Scala wrapper is a Stream
// we don't want it to lazy load and hold a reference to the underlying DOM
Some(xdmValue.map(_.getStringValue).toVector).success
else
NoneSuccess
}
)
def count(path: String, namespaces: Map[String, String], xmlParsers: XmlParsers): CountCriterionExtractor[XdmNode, (String, Map[String, String])] =
new CountCriterionExtractor[XdmNode, (String, Map[String, String])](
"xpath",
(path, namespaces),
document => Some(xmlParsers.evaluateXPath(path, namespaces, document).size).success
)
}
| gatling/gatling | gatling-core/src/main/scala/io/gatling/core/check/xpath/XPathExtractors.scala | Scala | apache-2.0 | 2,591 |
object birthday_who {
def happyBirthday(person : String)
{
println("Happy Birthday to you!")
println("Happy Birthday to you!")
println("Happy Birthday, dear " + person + ".")
println("Happy Birthday to you!")
}
def main(args : Array[String])
{
println("Who would you like to sing Happy Birthday to?")
val userName = readLine()
happyBirthday(userName)
}
}
| LoyolaChicagoBooks/introcs-scala-examples | birthday_who/birthday_who.scala | Scala | gpl-3.0 | 418 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.model.classes.HeroicCharacterClass
import io.truthencode.ddo.model.classes.HeroicCharacterClass.Monk
import io.truthencode.ddo.support.requisite.{FeatRequisiteImpl, GrantsToClass, RequiresAnyOfClass}
/**
* Created by adarr on 3/17/2017.
*/
trait ArmorClassBonus
extends FeatRequisiteImpl with Passive with GrantsToClass with RequiresAnyOfClass {
override def grantToClass: Seq[(HeroicCharacterClass, Int)] =
List((Monk, 1))
override def anyOfClass: Seq[(HeroicCharacterClass, Int)] =
List((Monk, 1))
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/ArmorClassBonus.scala | Scala | apache-2.0 | 1,264 |
/*
* Copyright (C) 2007-2008 Artima, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Example code from:
*
* Programming in Scala (First Edition, Version 6)
* by Martin Odersky, Lex Spoon, Bill Venners
*
* http://booksites.artima.com/programming_in_scala
*/
class Rational(n: Int, d: Int) {
require(d != 0)
val numer: Int = n
val denom: Int = d
override def toString = numer +"/"+ denom
def add(that: Rational): Rational =
new Rational(
numer * that.denom + that.numer * denom,
denom * that.denom
)
def lessThan(that: Rational) =
this.numer * that.denom < that.numer * this.denom
def max(that: Rational) =
if (this.lessThan(that)) that else this
}
object Main {
def main(args: Array[String]) {
val oneHalf = new Rational(1, 2)
val twoThirds = new Rational(2, 3)
println("oneHalf [" + oneHalf + "]")
println("twoThirds [" + twoThirds + "]")
println("oneHalf.lessThan(twoThirds) [" + oneHalf.lessThan(twoThirds) + "]")
println("oneHalf.max(twoThirds) [" + oneHalf.max(twoThirds) + "]")
}
}
| peachyy/scalastu | functional-objects/ex6/Rational.scala | Scala | apache-2.0 | 1,618 |
package gridscale.ssh
import squants.time.TimeConversions._
import scala.language.postfixOps
object TestSSH extends App {
import gridscale._
import gridscale.authentication._
import gridscale.effectaside._
def job = SSHJobDescription(command = s"""echo -n greatings `whoami`""", workDirectory = "/tmp/")
val localhost = SSHServer("localhost", port = 2222)(UserPassword("root", "root"))
def prg(implicit system: Effect[System], ssh: Effect[SSH]) = {
val jobId = submit(localhost, job)
waitUntilEnded(() ⇒ state(localhost, jobId))
val out = stdOut(localhost, jobId)
clean(localhost, jobId)
out
}
implicit val system: Effect[System] = System()
implicit val ssh: Effect[SSH] = SSH()
try println(prg)
finally ssh().close()
}
| openmole/gridscale | examples/ssh/src/main/scala/gridscale.ssh/TestSSH.scala | Scala | agpl-3.0 | 772 |
import java.lang.reflect._
import anns._
class A
@java.lang.Deprecated class B
@Ann_0(name = "C", value = "see") class C
@Ann_0(name = "D", value = "dee") @Ann_0(name = "D", value = "dye") class D
class Test @Ann_0(name = "<init>", value = "constructor") @Ann_0(name = "<init>", value = "initializer") () {
@Ann_0(name = "x", value = "eks") val x = 1
@Ann_0(name = "y", value = "why") @Ann_0(name = "y", value = "wye") val y = 2
@Ann_0(name = "t", value = "tee") def t = 1
@Ann_0(name = "u", value = "you") @Ann_0(name = "u", value = "yew") def u = 2
def meh(
@Ann_0(name = "1", value = "one") `1`: Int,
@Ann_0(name = "2", value = "two") @Ann_0(name = "2", value = "tew") `2`: Int,
) = ()
// todo: annotations on types
// todo? annotaitons on packages
}
object Test extends App {
val cls_test = classOf[Test]
prints {
List(classOf[A], classOf[B], classOf[C], classOf[D])
.map(cls => s"${cls.getName}: ${anns(cls)}")
}
prints {
List("x", "y")
.map(cls_test.getDeclaredField)
.map(f => s"${f.getName}: ${anns(f)}")
}
prints {
List("t", "u")
.map(cls_test.getDeclaredMethod(_))
.map(m => s"${m.getName}: ${anns(m)}")
}
prints {
cls_test
.getDeclaredMethod("meh", classOf[Int], classOf[Int])
.getParameters.toList
.map(p => s"${p.getName}: ${anns(p)}")
}
println {
anns(cls_test.getConstructor()).map(_.toString)
} ; println()
def anns(ae: AnnotatedElement) =
ae.getAnnotations.toList.filterNot(_.isInstanceOf[reflect.ScalaSignature])
def prints(l: List[String]) = { println(l mkString "\\n") ; println() }
}
| martijnhoekstra/scala | test/files/run/t9529/Test_1.scala | Scala | apache-2.0 | 1,638 |
package org.broadinstitute.dsde.workbench.sam
package api
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.http.scaladsl.server.directives.OnSuccessMagnet._
import cats.effect.IO
import com.typesafe.scalalogging.LazyLogging
import org.broadinstitute.dsde.workbench.model.google.ServiceAccountSubjectId
import org.broadinstitute.dsde.workbench.model._
import org.broadinstitute.dsde.workbench.sam.api.StandardUserInfoDirectives._
import org.broadinstitute.dsde.workbench.sam.dataAccess.{DirectoryDAO, RegistrationDAO}
import org.broadinstitute.dsde.workbench.sam.service.UserService._
import org.broadinstitute.dsde.workbench.sam.util.SamRequestContext
import scala.concurrent.ExecutionContext
import scala.util.Try
import scala.util.matching.Regex
trait StandardUserInfoDirectives extends UserInfoDirectives with LazyLogging with SamRequestContextDirectives {
implicit val executionContext: ExecutionContext
def requireUserInfo(samRequestContext: SamRequestContext): Directive1[UserInfo] = requireOidcHeaders.flatMap { oidcHeaders =>
onSuccess {
getUserInfo(directoryDAO, registrationDAO, oidcHeaders, samRequestContext).unsafeToFuture()
}
}
def requireCreateUser(samRequestContext: SamRequestContext): Directive1[WorkbenchUser] = requireOidcHeaders.map(buildWorkbenchUser)
private def buildWorkbenchUser(oidcHeaders: OIDCHeaders): WorkbenchUser = {
// google id can either be in the external id or google id from azure headers, favor the external id as the source
val googleSubjectId = (oidcHeaders.externalId.left.toOption ++ oidcHeaders.googleSubjectIdFromAzure).headOption
val azureB2CId = oidcHeaders.externalId.toOption // .right is missing (compared to .left above) since Either is Right biased
WorkbenchUser(
genWorkbenchUserId(System.currentTimeMillis()),
googleSubjectId,
oidcHeaders.email,
azureB2CId)
}
/**
* Utility function that knows how to convert all the various headers into OIDCHeaders
*/
private def requireOidcHeaders: Directive1[OIDCHeaders] = {
(headerValueByName(accessTokenHeader).as(OAuth2BearerToken) &
externalIdFromHeaders &
expiresInFromHeader &
headerValueByName(emailHeader).as(WorkbenchEmail) &
optionalHeaderValueByName(googleIdFromAzureHeader).map(_.map(GoogleSubjectId))).as(OIDCHeaders)
}
private def expiresInFromHeader: Directive1[Long] = {
// gets expiresInHeader as a string and converts it to Long raising an exception if it can't
optionalHeaderValueByName(expiresInHeader).flatMap {
case Some(expiresInString) =>
Try(expiresInString.toLong).fold(
t => failWith(new WorkbenchExceptionWithErrorReport(ErrorReport(StatusCodes.BadRequest, s"expiresIn $expiresInString can't be converted to Long", t))).toDirective,
expiresInLong => provide(expiresInLong)
)
case None => provide(0)
}
}
private def externalIdFromHeaders: Directive1[Either[GoogleSubjectId, AzureB2CId]] = headerValueByName(userIdHeader).map { idString =>
Try(BigInt(idString)).fold(
_ => Right(AzureB2CId(idString)), // could not parse id as a Long, treat id as b2c id which are uuids
_ => Left(GoogleSubjectId(idString)) // id is a number which is what google subject ids look like
)
}
}
object StandardUserInfoDirectives {
val SAdomain: Regex = "(\\\\S+@\\\\S+\\\\.iam\\\\.gserviceaccount\\\\.com$)".r
val accessTokenHeader = "OIDC_access_token"
val expiresInHeader = "OIDC_CLAIM_expires_in"
val emailHeader = "OIDC_CLAIM_email"
val userIdHeader = "OIDC_CLAIM_user_id"
val googleIdFromAzureHeader = "OAUTH2_CLAIM_google_id"
def getUserInfo(directoryDAO: DirectoryDAO, registrationDAO: RegistrationDAO, oidcHeaders: OIDCHeaders, samRequestContext: SamRequestContext): IO[UserInfo] = {
oidcHeaders match {
case OIDCHeaders(_, Left(googleSubjectId), _, saEmail@WorkbenchEmail(SAdomain(_)), _) =>
// If it's a PET account, we treat it as its owner
directoryDAO.getUserFromPetServiceAccount(ServiceAccountSubjectId(googleSubjectId.value), samRequestContext).flatMap {
case Some(pet) => IO.pure(UserInfo(oidcHeaders.token, pet.id, pet.email, oidcHeaders.expiresIn))
case None => lookUpByGoogleSubjectId(googleSubjectId, directoryDAO, samRequestContext).map(uid => UserInfo(oidcHeaders.token, uid, saEmail, oidcHeaders.expiresIn))
}
case OIDCHeaders(_, Left(googleSubjectId), _, _, _) =>
lookUpByGoogleSubjectId(googleSubjectId, directoryDAO, samRequestContext).map(uid => UserInfo(oidcHeaders.token, uid, oidcHeaders.email, oidcHeaders.expiresIn))
case OIDCHeaders(_, Right(azureB2CId), _, _, _) =>
loadUserMaybeUpdateAzureB2CId(azureB2CId, oidcHeaders.googleSubjectIdFromAzure, directoryDAO, registrationDAO, samRequestContext).map(user => UserInfo(oidcHeaders.token, user.id, oidcHeaders.email, oidcHeaders.expiresIn))
}
}
private def loadUserMaybeUpdateAzureB2CId(azureB2CId: AzureB2CId, maybeGoogleSubjectId: Option[GoogleSubjectId], directoryDAO: DirectoryDAO, registrationDAO: RegistrationDAO, samRequestContext: SamRequestContext) = {
for {
maybeUser <- directoryDAO.loadUserByAzureB2CId(azureB2CId, samRequestContext)
maybeUserAgain <- (maybeUser, maybeGoogleSubjectId) match {
case (None, Some(googleSubjectId)) =>
updateUserAzureB2CId(azureB2CId, googleSubjectId, directoryDAO, registrationDAO, samRequestContext)
case _ => IO.pure(maybeUser)
}
} yield maybeUserAgain.getOrElse(throw new WorkbenchExceptionWithErrorReport(ErrorReport(StatusCodes.Forbidden, s"Azure Id $azureB2CId not found in sam")))
}
private def updateUserAzureB2CId(azureB2CId: AzureB2CId, googleSubjectId: GoogleSubjectId, directoryDAO: DirectoryDAO, registrationDAO: RegistrationDAO, samRequestContext: SamRequestContext) = {
for {
maybeSubject <- directoryDAO.loadSubjectFromGoogleSubjectId(googleSubjectId, samRequestContext)
_ <- maybeSubject match {
case Some(userId: WorkbenchUserId) =>
directoryDAO.setUserAzureB2CId(userId, azureB2CId, samRequestContext)
.flatMap(_ => registrationDAO.setUserAzureB2CId(userId, azureB2CId, samRequestContext))
case _ => IO.unit
}
maybeUser <- directoryDAO.loadUserByAzureB2CId(azureB2CId, samRequestContext)
} yield {
maybeUser
}
}
private def lookUpByGoogleSubjectId(googleSubjectId: GoogleSubjectId, directoryDAO: DirectoryDAO, samRequestContext: SamRequestContext): IO[WorkbenchUserId] =
for {
subject <- directoryDAO.loadSubjectFromGoogleSubjectId(googleSubjectId, samRequestContext)
userInfo <- subject match {
case Some(uid: WorkbenchUserId) => IO.pure(uid)
case Some(_) =>
IO.raiseError(new WorkbenchExceptionWithErrorReport(ErrorReport(StatusCodes.Conflict, s"subjectId $googleSubjectId is not a WorkbenchUser")))
case None =>
IO.raiseError(new WorkbenchExceptionWithErrorReport(ErrorReport(StatusCodes.Forbidden, s"Google Id $googleSubjectId not found in sam")))
}
} yield userInfo
}
final case class OIDCHeaders(token: OAuth2BearerToken, externalId: Either[GoogleSubjectId, AzureB2CId], expiresIn: Long, email: WorkbenchEmail, googleSubjectIdFromAzure: Option[GoogleSubjectId])
| broadinstitute/sam | src/main/scala/org/broadinstitute/dsde/workbench/sam/api/StandardUserInfoDirectives.scala | Scala | bsd-3-clause | 7,478 |
package bad.robot.radiate
import java.net.URL
object UrlSyntax {
implicit class UrlOps(url: URL) {
def withDefaultPort(port: Int): URL = {
if (url.getPort == -1) new URL(url.getProtocol, url.getHost, port, url.getFile)
else url
}
def /(path: String) = {
val p = if (path.startsWith("/")) path else s"/$path"
new URL(url.toString + p.replace(" ", "%20"))
}
}
implicit def stringToUrl(url: String): URL = new URL(url)
}
| tobyweston/radiate | src/main/scala/bad/robot/radiate/UrlSyntax.scala | Scala | apache-2.0 | 476 |
package org.toxicblend.collider.messages
import akka.actor.ActorRef
case class PrepareWork(client: ActorRef, settings: Map[String, String])
case class PrepareSlaveWorkers(val settings: Map[String, String])
case object PrepareWorkAck
case object SystemStatusQuery
case class SystemStatusResult(val availableWorkers: Int)
case class RegisterSlaveManager(val aSlaveManager: ActorRef)
case object RegisterSlaveManagerAck
case class RegisterSlaveWorker(val aWorker: ActorRef) // ??
case object RegisterSlaveWorkerAck // ??
case class WorkRequest(val from: ActorRef)
case class WorkResult(val from: ActorRef, val result: String)
case class Work(val work: String)
| toxicblend/toxicblendcollider | src/main/scala/org/toxicblend/collider/messages/Messages.scala | Scala | gpl-3.0 | 662 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.scalding.store
import com.twitter.summingbird.batch.store.HDFSMetadata
import cascading.flow.FlowDef
import com.twitter.bijection.Injection
import cascading.flow.FlowDef
import com.twitter.scalding.{Dsl, Mode, TDsl, TypedPipe, Hdfs => HdfsMode, TupleSetter}
import com.twitter.scalding.commons.source.VersionedKeyValSource
import com.twitter.summingbird.scalding.batch.BatchedStore
import com.twitter.summingbird.scalding.{Try, FlowProducer, Scalding}
import com.twitter.algebird.monad.Reader
import com.twitter.summingbird.scalding._
import com.twitter.summingbird.batch.{BatchID, Batcher, Timestamp }
import scala.util.{ Try => ScalaTry }
/**
* Scalding implementation of the batch read and write components of a
* store that uses the VersionedKeyValSource from scalding-commons.
*
* @author Oscar Boykin
* @author Sam Ritchie
* @author Ashu Singhal
*/
object VersionedBatchStore {
def apply[K, V, K2, V2](rootPath: String, versionsToKeep: Int)
(pack: (BatchID, (K, V)) => (K2, V2))
(unpack: ((K2, V2)) => (K, V))(
implicit
batcher: Batcher,
injection: Injection[(K2, V2), (Array[Byte], Array[Byte])],
ordering: Ordering[K]): VersionedBatchStore[K, V, K2, V2] =
new VersionedBatchStore(rootPath, versionsToKeep, batcher)(pack)(unpack)
}
/**
* Allows subclasses to share the means of reading version numbers but
* plug in methods to actually read or write the data.
*/
abstract class VersionedBatchStoreBase[K, V](val rootPath: String) extends BatchedStore[K, V] {
/**
* Returns a snapshot of the store's (K, V) pairs aggregated up to
* (but not including!) the time covered by the supplied batchID.
*
* Aggregating the readLast for a particular batchID with the
* stream stored for the same batchID will return the aggregate up
* to (but not including) batchID.next. Streams deal with inclusive
* upper bound.
*/
override def readLast(exclusiveUB: BatchID, mode: Mode): Try[(BatchID, FlowProducer[TypedPipe[(K, V)]])] = {
mode match {
case hdfs: HdfsMode =>
lastBatch(exclusiveUB, hdfs)
.map { Right(_) }
.getOrElse {
Left(List("No last batch available < %s for VersionedBatchStore(%s)".format(exclusiveUB, rootPath)))
}
case _ => Left(List("Mode: %s not supported for VersionedBatchStore(%s)".format(mode, rootPath)))
}
}
/**
* These functions convert back and forth between a specific
* BatchID and the earliest time of the BatchID just after it.
*
* The version numbers are the exclusive upper-bound of time
* covered by this store, while the batchIDs are the inclusive
* upper bound. Put another way, all events that occured before the
* version are included in this store.
*/
def batchIDToVersion(b: BatchID): Long = batcher.earliestTimeOf(b.next).milliSinceEpoch
def versionToBatchID(ver: Long): BatchID = batcher.batchOf(Timestamp(ver)).prev
protected def lastBatch(exclusiveUB: BatchID, mode: HdfsMode): Option[(BatchID, FlowProducer[TypedPipe[(K,V)]])] = {
val meta = HDFSMetadata(mode.conf, rootPath)
/*
* The deprecated Summingbird builder API coordinated versioning
* through a _summingbird.json dropped into each version of this
* store's VersionedStore.
*
* The new API (as of 0.1.0) coordinates this state via the actual
* version numbers within the VersionedStore. This function
* resolves the BatchID out of a version by first checking the
* metadata inside of the version; if the metadata exists, it
* takes preference over the version number (which was garbage,
* just wall clock time, in the deprecated API). If the metadata
* does NOT exist we know that the version is meaningful and
* convert it to a batchID.
*
* TODO (https://github.com/twitter/summingbird/issues/95): remove
* this when all internal Twitter jobs have run for a while with
* the new version format.
*/
def versionToBatchIDCompat(ver: Long): BatchID = {
/**
* Old style writes the UPPER BOUND batchID, so all times
* are in a batch LESS than the value in the file.
*/
meta(ver)
.get[String]
.flatMap { str => ScalaTry(BatchID(str).prev) }
.map { oldbatch =>
val newBatch = versionToBatchID(ver)
if(newBatch > oldbatch) {
println("## WARNING ##")
println("in BatchStore(%s)".format(rootPath))
println("Old-style version number is ahead of what the new-style would be.")
println("Until batchID: %s (%s) you will see this warning"
.format(newBatch, batcher.earliestTimeOf(newBatch)))
println("##---------##")
}
oldbatch
}
.getOrElse(versionToBatchID(ver))
}
meta
.versions.map { ver => (versionToBatchIDCompat(ver), readVersion(ver)) }
.filter { _._1 < exclusiveUB }
.reduceOption { (a, b) => if (a._1 > b._1) a else b }
}
protected def readVersion(v: Long): FlowProducer[TypedPipe[(K, V)]]
}
/*
* TODO (https://github.com/twitter/summingbird/issues/94): it looks
* like when we get the mappable/directory this happens at a different
* time (not atomically) with getting the meta-data. This seems like
* something we need to fix: atomically get meta-data and open the
* Mappable. The source parameter is pass-by-name to avoid needing
* the hadoop Configuration object when running the storm job.
*/
class VersionedBatchStore[K, V, K2, V2](rootPath: String, versionsToKeep: Int, override val batcher: Batcher)
(pack: (BatchID, (K, V)) => (K2, V2))
(unpack: ((K2, V2)) => (K, V))(
implicit @transient injection: Injection[(K2, V2), (Array[Byte], Array[Byte])], override val ordering: Ordering[K])
extends VersionedBatchStoreBase[K, V](rootPath) {
/** Make sure not to keep more than versionsToKeep when we write out.
* If this is out of sync with VersionedKeyValSource we can have issues
*/
override def select(b: List[BatchID]): List[BatchID] = b.takeRight(versionsToKeep)
/**
* writeLast receives an INCLUSIVE upper bound on batchID and a
* pipe of all key-value pairs aggregated up to (and including)
* that batchID. (Yes, this is confusing, since a number of other
* methods talk about the EXCLUSIVE upper bound.)
*
* This implementation of writeLast sinks all key-value pairs out
* into a VersionedStore directory whose tagged version is the
* EXCLUSIVE upper bound on batchID, or "batchID.next".
*/
override def writeLast(batchID: BatchID, lastVals: TypedPipe[(K, V)])(implicit flowDef: FlowDef, mode: Mode): Unit = {
import Dsl._
val batchVersion = batchIDToVersion(batchID)
/**
* The Builder API used to not specify a sinkVersion, leading to
* versions tagged with the wall clock time. When builder API
* users migrate over to the new code, they can run into a
* situation where the new version created has a lower version
* than the current maximum version in the directory.
*
* This behavior clashes with the current VersionedState
* implementation, which decides what data to source by querying
* meta.mostRecentVersion. If mostRecentVersion doesn't change
* from run to run, the job will process the same data over and
* over.
*
* To solve this issue and assist with migrations, if the
* existing max version in the directory has a timestamp that's
* greater than that of the batchID being committed, we add a
* single millisecond to the current version, guaranteeing that
* we're writing a new max version (but only bumping a tiny bit
* forward).
*
* After a couple of job runs the batchID version should start
* winning.
*/
val newVersion: Option[Long] = mode match {
case m: HdfsMode => {
val meta = HDFSMetadata(m.conf, rootPath)
meta.mostRecentVersion.map(_.version)
.filter(_ > batchVersion)
.map(_ + 1L)
.orElse(Some(batchVersion))
}
case _ => Some(batchVersion)
}
lastVals.map(pack(batchID, _))
.toPipe((0,1))
.write(VersionedKeyValSource[K2, V2](rootPath,
sourceVersion=None,
sinkVersion=newVersion,
maxFailures=0,
versionsToKeep=versionsToKeep))
}
/**
* Returns a FlowProducer that supplies all data for the given
* specific version within this store's rootPath.
*/
protected def readVersion(v: Long): FlowProducer[TypedPipe[(K, V)]] = Reader { (flowMode: (FlowDef, Mode)) =>
val mappable = VersionedKeyValSource[K2, V2](rootPath, sourceVersion=Some(v))
TypedPipe.from(mappable)(flowMode._1, flowMode._2)
.map(unpack)
}
}
| surabhiiyer/summingbird | summingbird-scalding/src/main/scala/com/twitter/summingbird/scalding/store/VersionedBatchStore.scala | Scala | apache-2.0 | 9,406 |
package ee.cone.c4actor
import java.util.UUID
import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4di.{c4, c4multi, provide}
import ee.cone.c4actor.Types._
object SnapshotUtilImpl extends SnapshotUtil {
def hashFromName: RawSnapshot=>Option[SnapshotInfo] = {
val R = """(snapshot[a-z_]+)/([0-9a-f]{16})-([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})([-0-9a-z]+)?""".r;
{
case raw@RawSnapshot(R(subDirStr,offsetHex,uuid,flags)) =>
Option(flags) match {
case None => Option(SnapshotInfo(subDirStr,offsetHex,uuid,Nil,raw))
case Some(kvs) =>
val postParse = kvs.split("-").toList.tail
if (postParse.size % 2 == 0) {
val headers = postParse.grouped(2).toList.collect { case key :: value :: Nil => RawHeader(key, value) }
Option(SnapshotInfo(subDirStr, offsetHex, uuid, headers, raw))
} else {
None
}
}
case a =>
//logger.warn(s"not a snapshot: $a")
None
}
}
def hashFromData: Array[Byte]=>String = UUID.nameUUIDFromBytes(_).toString
}
@c4("SnapshotUtilImplApp") final class SnapshotUtilProvider {
@provide def provide: Seq[SnapshotUtil] = List(SnapshotUtilImpl)
}
import SnapshotUtilImpl._
//case class Snapshot(offset: NextOffset, uuid: String, raw: RawSnapshot)
@c4multi("SnapshotUtilImplApp") final class SnapshotSaverImpl(subDirStr: String)(inner: RawSnapshotSaver) extends SnapshotSaver {
def save(offset: NextOffset, data: Array[Byte], headers: List[RawHeader]): RawSnapshot = {
val snapshot = RawSnapshot(s"$subDirStr/$offset-${hashFromData(data)}${headers.map(h => s"-${h.key}-${h.value}").mkString}")
assert(hashFromName(snapshot).nonEmpty, s"Not a valid name ${snapshot.relativePath}")
inner.save(snapshot, data)
snapshot
}
}
@c4("SnapshotLoaderImplApp") final class SnapshotLoaderImpl(raw: RawSnapshotLoader) extends SnapshotLoader with LazyLogging {
def load(snapshot: RawSnapshot): Option[RawEvent] = {
logger.debug(s"Loading raw snapshot [${snapshot.relativePath}]")
val res = for {
snapshotInfo <- hashFromName(snapshot) //goes first, secures fs
data <- Option(raw.load(snapshot)) if hashFromData(data.toByteArray) == snapshotInfo.uuid
} yield SimpleRawEvent(snapshotInfo.offset, data, snapshotInfo.headers)
logger.debug(s"Loaded raw snapshot ${res.nonEmpty}")
res
}
}
@c4("SnapshotLoaderFactoryImplApp") final class SnapshotLoaderFactoryImpl extends SnapshotLoaderFactory {
def create(raw: RawSnapshotLoader): SnapshotLoader =
new SnapshotLoaderImpl(raw)
} | conecenter/c4proto | base_lib/src/main/scala/ee/cone/c4actor/SnapshotManagerImpl.scala | Scala | apache-2.0 | 2,637 |
/* Global.scala
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Copyright (C) 2015 Universiteit Gent
*
* This file is part of the Rasbeb project, an interactive web
* application for Bebras competitions.
*
* Corresponding author:
*
* Kris Coolsaet
* Department of Applied Mathematics, Computer Science and Statistics
* Ghent University
* Krijgslaan 281-S9
* B-9000 GENT Belgium
*
* The Rasbeb Web Application is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The Rasbeb Web Application is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with the Rasbeb Web Application (file LICENSE in the
* distribution). If not, see <http://www.gnu.org/licenses/>.
*
*/
import db.DataAccess
import play.api.db.DB
import play.api.libs.mailer.{SMTPConfiguration, SMTPMailer}
import play.api.{Application, GlobalSettings, Mode}
import play.libs.mailer.MailerClient
import util.Mail
/**
* Adapts the global settings object to our needs.
*/
object Global extends GlobalSettings {
// lazy val smtpEchoServer = new SmtpEchoServer()
// we have chosen the Scala version because the Java version does not have a Mode parameter
// in onLoadConfig. Bug?
/**
* Merge additional config files into the application, depending on the current mode
*/
/* No longer used: config file must now be given as startup parameter
override def onLoadConfig(config: Configuration, path: File, classloader: ClassLoader, mode: Mode.Mode): Configuration = {
// see http://stackoverflow.com/questions/9723224/how-to-manage-application-conf-in-several-environments-with-play-2-0
config ++ Configuration(ConfigFactory.load(mode.toString.toLowerCase + ".conf"))
}
*/
/**
* Initialize the data access provider for this application
* and the mail server
*/
override def onStart(app: Application) {
Mail.setupMailerClient(app.configuration);
app.mode match {
case Mode.Dev =>
DataAccess.setProviderFromDataSource(DB.getDataSource("dev")(app))
// smtpEchoServer.start() // using mock=yes functionality of play.mailer
case Mode.Prod =>
DataAccess.setProviderFromDataSource(DB.getDataSource("prod")(app))
case Mode.Test =>
DataAccess.setProviderForTesting()
}
}
/**
* Stop the mail server
*/
/*
override def onStop(app: Application) {
app.mode match {
case Mode.Dev =>
// smtpEchoServer.stop() // TODO use mock=yes functionality of play.mailer
case Mode.Prod =>
// currently nothing needs to be done
case Mode.Test =>
// currently nothing needs to be done
}
}
*/
}
| kcoolsae/Rasbeb | webapp/app/Global.scala | Scala | agpl-3.0 | 3,134 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
/** The main class for NSC, a compiler for the programming
* language Scala.
*/
object MainBench extends Driver with EvalLoop {
lazy val theCompiler = Global(settings, reporter)
override def newCompiler() = theCompiler
val NIter = 50
val NBest = 10
override def main(args: Array[String]) = {
val times = new Array[Long](NIter)
var start = System.nanoTime()
for (i <- 0 until NIter) {
if (i == NIter-1) {
theCompiler.settings.Ystatistics.value = List("all")
theCompiler.settings.YhotStatisticsEnabled.value = true
}
process(args)
val end = System.nanoTime()
val duration = (end-start)/1000000
println(s"${duration}ms")
times(i) = duration
start = end
}
val avg = times.sorted.take(NBest).sum / NBest
println(s"avg shortest $NBest times ${avg}ms")
}
}
| scala/scala | src/compiler/scala/tools/nsc/MainBench.scala | Scala | apache-2.0 | 1,182 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.template
import controllers.{AssetsBuilder, AssetsMetadata}
import javax.inject.{Inject, Singleton}
import play.api.http.HttpErrorHandler
@Singleton
class Template @Inject()(errorHandler: HttpErrorHandler, meta: AssetsMetadata) extends AssetsBuilder(errorHandler, meta)
| hmrc/govuk-template | src/main/scala/controllers/template/Template.scala | Scala | apache-2.0 | 897 |
package smarthouse.restapi.models.db
import java.util.Date
import smarthouse.restapi.models.DeviceEntity
import smarthouse.restapi.utils.DatabaseService
trait DeviceEntityTable {
protected val databaseService: DatabaseService
import databaseService.driver.api._
implicit val JavaUtilDateMapper =
MappedColumnType.base[java.util.Date, java.sql.Timestamp](
d => new java.sql.Timestamp(d.getTime),
d => new java.util.Date(d.getTime))
class Devices(tag: Tag) extends Table[DeviceEntity](tag, "devices") {
def id = column[Option[Long]]("id", O.PrimaryKey, O.AutoInc)
def name = column[String]("name")
def identifier = column[String]("identifier")
def created = column[Date]("created")
def * = (id, name, identifier, created) <> ((DeviceEntity.apply _).tupled, DeviceEntity.unapply)
}
protected val devices = TableQuery[Devices]
}
| andrewobukhov/smart-house | src/main/scala/smarthouse/restapi/models/db/DeviceEntityTable.scala | Scala | mit | 889 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest
import org.apache.spark.deploy.k8s.integrationtest.TestConfig.{getTestImageRepo, getTestImageTag}
private[spark] trait PythonTestsSuite { k8sSuite: KubernetesSuite =>
import PythonTestsSuite._
import KubernetesSuite.k8sTestTag
test("Run PySpark on simple pi.py example", k8sTestTag) {
sparkAppConf
.set("spark.kubernetes.container.image", s"${getTestImageRepo}/spark-py:${getTestImageTag}")
runSparkApplicationAndVerifyCompletion(
appResource = PYSPARK_PI,
mainClass = "",
expectedLogOnCompletion = Seq("Pi is roughly 3"),
appArgs = Array("5"),
driverPodChecker = doBasicDriverPyPodCheck,
executorPodChecker = doBasicExecutorPyPodCheck,
appLocator = appLocator,
isJVM = false)
}
test("Run PySpark with Python2 to test a pyfiles example", k8sTestTag) {
sparkAppConf
.set("spark.kubernetes.container.image", s"${getTestImageRepo}/spark-py:${getTestImageTag}")
.set("spark.kubernetes.pyspark.pythonVersion", "2")
runSparkApplicationAndVerifyCompletion(
appResource = PYSPARK_FILES,
mainClass = "",
expectedLogOnCompletion = Seq(
"Python runtime version check is: True",
"Python environment version check is: True"),
appArgs = Array("python"),
driverPodChecker = doBasicDriverPyPodCheck,
executorPodChecker = doBasicExecutorPyPodCheck,
appLocator = appLocator,
isJVM = false,
pyFiles = Some(PYSPARK_CONTAINER_TESTS))
}
test("Run PySpark with Python3 to test a pyfiles example", k8sTestTag) {
sparkAppConf
.set("spark.kubernetes.container.image", s"${getTestImageRepo}/spark-py:${getTestImageTag}")
.set("spark.kubernetes.pyspark.pythonVersion", "3")
runSparkApplicationAndVerifyCompletion(
appResource = PYSPARK_FILES,
mainClass = "",
expectedLogOnCompletion = Seq(
"Python runtime version check is: True",
"Python environment version check is: True"),
appArgs = Array("python3"),
driverPodChecker = doBasicDriverPyPodCheck,
executorPodChecker = doBasicExecutorPyPodCheck,
appLocator = appLocator,
isJVM = false,
pyFiles = Some(PYSPARK_CONTAINER_TESTS))
}
}
private[spark] object PythonTestsSuite {
val CONTAINER_LOCAL_PYSPARK: String = "local:///opt/spark/examples/src/main/python/"
val PYSPARK_PI: String = CONTAINER_LOCAL_PYSPARK + "pi.py"
val PYSPARK_FILES: String = CONTAINER_LOCAL_PYSPARK + "pyfiles.py"
val PYSPARK_CONTAINER_TESTS: String = CONTAINER_LOCAL_PYSPARK + "py_container_checks.py"
}
| rikima/spark | resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/PythonTestsSuite.scala | Scala | apache-2.0 | 3,424 |
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package enrichments
// Specs2
import org.specs2.Specification
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
// Joda
import org.joda.time.DateTime
import org.joda.time.DateTimeZone
// Scalaz
import scalaz._
import Scalaz._
class ExtractEventTypeSpec extends Specification with DataTables with ValidationMatchers {
def is = s2"""
This is a specification to test the extractEventType function
extractEventType should return the event name for any valid event code $e1
extractEventType should return a validation failure for any invalid event code $e2
formatCollectorTstamp should validate collector timestamps $e3
extractTimestamp should validate timestamps $e4
"""
val FieldName = "e"
def err: (String) => String = input => "Field [%s]: [%s] is not a recognised event code".format(FieldName, input)
def e1 =
"SPEC NAME" || "INPUT VAL" | "EXPECTED OUTPUT" |
"transaction" !! "tr" ! "transaction" |
"transaction item" !! "ti" ! "transaction_item" |
"page view" !! "pv" ! "page_view" |
"page ping" !! "pp" ! "page_ping" |
"unstructured event" !! "ue" ! "unstruct" |
"structured event" !! "se" ! "struct" |
"structured event (legacy)" !! "ev" ! "struct" |
"ad impression (legacy)" !! "ad" ! "ad_impression" |> { (_, input, expected) =>
EventEnrichments.extractEventType(FieldName, input) must beSuccessful(expected)
}
def e2 =
"SPEC NAME" || "INPUT VAL" | "EXPECTED OUTPUT" |
"null" !! null ! err("null") |
"empty string" !! "" ! err("") |
"unrecognized #1" !! "e" ! err("e") |
"unrecognized #2" !! "evnt" ! err("evnt") |> { (_, input, expected) =>
EventEnrichments.extractEventType(FieldName, input) must beFailing(expected)
}
val SeventiesTstamp = Some(new DateTime(0, DateTimeZone.UTC))
val BCTstamp = SeventiesTstamp.map(_.minusYears(2000))
val FarAwayTstamp = SeventiesTstamp.map(_.plusYears(10000))
def e3 =
// format: off
"SPEC NAME" || "INPUT VAL" | "EXPECTED OUTPUT" |
"None" !! None ! "No collector_tstamp set".fail |
"Negative timestamp" !! BCTstamp ! "Collector timestamp [-63113904000000] formatted as [-0030-01-01 00:00:00.000] which isn't Redshift-compatible".fail |
">10k timestamp" !! FarAwayTstamp ! "Collector timestamp [315569520000000] formatted as [11970-01-01 00:00:00.000] which isn't Redshift-compatible".fail |
"Valid timestamp" !! SeventiesTstamp ! "1970-01-01 00:00:00.000".success |> {
// format: on
(_, input, expected) =>
EventEnrichments.formatCollectorTstamp(input) must_== (expected)
}
def e4 =
"SPEC NAME" || "INPUT VAL" | "EXPECTED OUTPUT" |
"Not long" !! ("f", "v") ! "Field [f]: [v] is not in the expected format (ms since epoch)".fail |
"Too long" !! ("f", "1111111111111111") ! "Field [f]: [1111111111111111] is formatted as [37179-09-17 07:18:31.111] which isn't Redshift-compatible".fail |
"Valid ts" !! ("f", "1") ! "1970-01-01 00:00:00.001".success |> { (_, input, expected) =>
EventEnrichments.extractTimestamp(input._1, input._2) must_== (expected)
}
}
class DerivedTimestampSpec extends Specification with DataTables with ValidationMatchers {
def is =
"This is a specification to test the getDerivedTimestamp function" ^
p ^
"getDerivedTimestamp should correctly calculate the derived timestamp " ! e1 ^
end
def e1 =
"SPEC NAME" || "DVCE_CREATED_TSTAMP" | "DVCE_SENT_TSTAMP" | "COLLECTOR_TSTAMP" | "TRUE_TSTAMP" | "EXPECTED DERIVED_TSTAMP" |
"No dvce_sent_tstamp" !! "2014-04-29 12:00:54.555" ! null ! "2014-04-29 09:00:54.000" ! null ! "2014-04-29 09:00:54.000" |
"No dvce_created_tstamp" !! null ! null ! "2014-04-29 09:00:54.000" ! null ! "2014-04-29 09:00:54.000" |
"No collector_tstamp" !! null ! null ! null ! null ! null |
"dvce_sent_tstamp before dvce_created_tstamp" !! "2014-04-29 09:00:54.001" ! "2014-04-29 09:00:54.000" ! "2014-04-29 09:00:54.000" ! null ! "2014-04-29 09:00:54.000" |
"dvce_sent_tstamp after dvce_created_tstamp" !! "2014-04-29 09:00:54.000" ! "2014-04-29 09:00:54.001" ! "2014-04-29 09:00:54.000" ! null ! "2014-04-29 09:00:53.999" |
"true_tstamp override" !! "2014-04-29 09:00:54.001" ! "2014-04-29 09:00:54.000" ! "2014-04-29 09:00:54.000" ! "2000-01-01 00:00:00.000" ! "2000-01-01 00:00:00.000" |> {
(_, created, sent, collected, truth, expected) =>
EventEnrichments.getDerivedTimestamp(Option(sent), Option(created), Option(collected), Option(truth)) must beSuccessful(
Option(expected))
}
}
| RetentionGrid/snowplow | 3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/eventEnrichmentSpecs.scala | Scala | apache-2.0 | 6,364 |
package com.github.marsojm.odds
import org.scalatest.FunSuite
class Rules$Test extends FunSuite {
test("testThreeOfAKind with valid three of a kind in hand") {
val hand = List(
new Card(Ace(),Clubs()),
new Card(Ace(),Hearts()),
new Card(Seven(),Spades()),
new Card(Ace(),Diamonds()),
new Card(Jack(),Clubs()))
assert(Rules.threeOfAKind(hand))
}
test("testThreeOfAKind with invalid three of a kind in hand") {
val hand = List(
new Card(King(),Clubs()),
new Card(Ace(),Hearts()),
new Card(Seven(),Spades()),
new Card(Ace(),Diamonds()),
new Card(Jack(),Clubs()))
assert(!Rules.threeOfAKind(hand))
}
test("testPair with valid pair in hand") {
val hand = List(
new Card(Two(),Clubs()),
new Card(Ace(),Hearts()),
new Card(Seven(),Spades()),
new Card(Ace(),Diamonds()),
new Card(Jack(),Clubs()))
assert(Rules.pair(hand))
}
test("testPair with invalid pair in hand") {
val hand = List(
new Card(Two(),Clubs()),
new Card(Ace(),Hearts()),
new Card(Seven(),Spades()),
new Card(Queen(),Diamonds()),
new Card(Jack(),Clubs()))
assert(!Rules.pair(hand))
}
}
| marsojm/odds | src/test/java/com/github/marsojm/odds/Rules$Test.scala | Scala | mit | 1,223 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.concurrent.Callable
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow, QualifiedTableName, TableIdentifier}
import org.apache.spark.sql.catalyst.CatalystTypeConverters.convertToScala
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.{CatalogRelation, CatalogUtils}
import org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoTable, LogicalPlan, Project}
import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, UnknownPartitioning}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.{RowDataSourceScanExec, SparkPlan}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* Replaces generic operations with specific variants that are designed to work with Spark
* SQL Data Sources.
*
* Note that, this rule must be run after `PreprocessTableCreation` and
* `PreprocessTableInsertion`.
*/
case class DataSourceAnalysis(conf: SQLConf) extends Rule[LogicalPlan] {
def resolver: Resolver = conf.resolver
// Visible for testing.
def convertStaticPartitions(
sourceAttributes: Seq[Attribute],
providedPartitions: Map[String, Option[String]],
targetAttributes: Seq[Attribute],
targetPartitionSchema: StructType): Seq[NamedExpression] = {
assert(providedPartitions.exists(_._2.isDefined))
val staticPartitions = providedPartitions.flatMap {
case (partKey, Some(partValue)) => (partKey, partValue) :: Nil
case (_, None) => Nil
}
// The sum of the number of static partition columns and columns provided in the SELECT
// clause needs to match the number of columns of the target table.
if (staticPartitions.size + sourceAttributes.size != targetAttributes.size) {
throw new AnalysisException(
s"The data to be inserted needs to have the same number of " +
s"columns as the target table: target table has ${targetAttributes.size} " +
s"column(s) but the inserted data has ${sourceAttributes.size + staticPartitions.size} " +
s"column(s), which contain ${staticPartitions.size} partition column(s) having " +
s"assigned constant values.")
}
if (providedPartitions.size != targetPartitionSchema.fields.size) {
throw new AnalysisException(
s"The data to be inserted needs to have the same number of " +
s"partition columns as the target table: target table " +
s"has ${targetPartitionSchema.fields.size} partition column(s) but the inserted " +
s"data has ${providedPartitions.size} partition columns specified.")
}
staticPartitions.foreach {
case (partKey, partValue) =>
if (!targetPartitionSchema.fields.exists(field => resolver(field.name, partKey))) {
throw new AnalysisException(
s"$partKey is not a partition column. Partition columns are " +
s"${targetPartitionSchema.fields.map(_.name).mkString("[", ",", "]")}")
}
}
val partitionList = targetPartitionSchema.fields.map { field =>
val potentialSpecs = staticPartitions.filter {
case (partKey, partValue) => resolver(field.name, partKey)
}
if (potentialSpecs.size == 0) {
None
} else if (potentialSpecs.size == 1) {
val partValue = potentialSpecs.head._2
Some(Alias(Cast(Literal(partValue), field.dataType), field.name)())
} else {
throw new AnalysisException(
s"Partition column ${field.name} have multiple values specified, " +
s"${potentialSpecs.mkString("[", ", ", "]")}. Please only specify a single value.")
}
}
// We first drop all leading static partitions using dropWhile and check if there is
// any static partition appear after dynamic partitions.
partitionList.dropWhile(_.isDefined).collectFirst {
case Some(_) =>
throw new AnalysisException(
s"The ordering of partition columns is " +
s"${targetPartitionSchema.fields.map(_.name).mkString("[", ",", "]")}. " +
"All partition columns having constant values need to appear before other " +
"partition columns that do not have an assigned constant value.")
}
assert(partitionList.take(staticPartitions.size).forall(_.isDefined))
val projectList =
sourceAttributes.take(targetAttributes.size - targetPartitionSchema.fields.size) ++
partitionList.take(staticPartitions.size).map(_.get) ++
sourceAttributes.takeRight(targetPartitionSchema.fields.size - staticPartitions.size)
projectList
}
override def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case CreateTable(tableDesc, mode, None) if DDLUtils.isDatasourceTable(tableDesc) =>
CreateDataSourceTableCommand(tableDesc, ignoreIfExists = mode == SaveMode.Ignore)
case CreateTable(tableDesc, mode, Some(query))
if query.resolved && DDLUtils.isDatasourceTable(tableDesc) =>
CreateDataSourceTableAsSelectCommand(tableDesc, mode, query)
case InsertIntoTable(l @ LogicalRelation(_: InsertableRelation, _, _),
parts, query, overwrite, false) if parts.isEmpty =>
InsertIntoDataSourceCommand(l, query, overwrite)
case InsertIntoTable(
l @ LogicalRelation(t: HadoopFsRelation, _, table), parts, query, overwrite, false) =>
// If the InsertIntoTable command is for a partitioned HadoopFsRelation and
// the user has specified static partitions, we add a Project operator on top of the query
// to include those constant column values in the query result.
//
// Example:
// Let's say that we have a table "t", which is created by
// CREATE TABLE t (a INT, b INT, c INT) USING parquet PARTITIONED BY (b, c)
// The statement of "INSERT INTO TABLE t PARTITION (b=2, c) SELECT 1, 3"
// will be converted to "INSERT INTO TABLE t PARTITION (b, c) SELECT 1, 2, 3".
//
// Basically, we will put those partition columns having a assigned value back
// to the SELECT clause. The output of the SELECT clause is organized as
// normal_columns static_partitioning_columns dynamic_partitioning_columns.
// static_partitioning_columns are partitioning columns having assigned
// values in the PARTITION clause (e.g. b in the above example).
// dynamic_partitioning_columns are partitioning columns that do not assigned
// values in the PARTITION clause (e.g. c in the above example).
val actualQuery = if (parts.exists(_._2.isDefined)) {
val projectList = convertStaticPartitions(
sourceAttributes = query.output,
providedPartitions = parts,
targetAttributes = l.output,
targetPartitionSchema = t.partitionSchema)
Project(projectList, query)
} else {
query
}
// Sanity check
if (t.location.rootPaths.size != 1) {
throw new AnalysisException("Can only write data to relations with a single path.")
}
val outputPath = t.location.rootPaths.head
val inputPaths = actualQuery.collect {
case LogicalRelation(r: HadoopFsRelation, _, _) => r.location.rootPaths
}.flatten
val mode = if (overwrite) SaveMode.Overwrite else SaveMode.Append
if (overwrite && inputPaths.contains(outputPath)) {
throw new AnalysisException(
"Cannot overwrite a path that is also being read from.")
}
val partitionSchema = actualQuery.resolve(
t.partitionSchema, t.sparkSession.sessionState.analyzer.resolver)
val staticPartitions = parts.filter(_._2.nonEmpty).map { case (k, v) => k -> v.get }
InsertIntoHadoopFsRelationCommand(
outputPath,
staticPartitions,
partitionSchema,
t.bucketSpec,
t.fileFormat,
t.options,
actualQuery,
mode,
table,
Some(t.location))
}
}
/**
* Replaces [[CatalogRelation]] with data source table if its table provider is not hive.
*/
class FindDataSourceTable(sparkSession: SparkSession) extends Rule[LogicalPlan] {
private def readDataSourceTable(r: CatalogRelation): LogicalPlan = {
val table = r.tableMeta
val qualifiedTableName = QualifiedTableName(table.database, table.identifier.table)
val cache = sparkSession.sessionState.catalog.tableRelationCache
val plan = cache.get(qualifiedTableName, new Callable[LogicalPlan]() {
override def call(): LogicalPlan = {
val pathOption = table.storage.locationUri.map("path" -> CatalogUtils.URIToString(_))
val dataSource =
DataSource(
sparkSession,
// In older version(prior to 2.1) of Spark, the table schema can be empty and should be
// inferred at runtime. We should still support it.
userSpecifiedSchema = if (table.schema.isEmpty) None else Some(table.schema),
partitionColumns = table.partitionColumnNames,
bucketSpec = table.bucketSpec,
className = table.provider.get,
options = table.storage.properties ++ pathOption,
catalogTable = Some(table))
LogicalRelation(dataSource.resolveRelation(checkFilesExist = false), table)
}
}).asInstanceOf[LogicalRelation]
if (r.output.isEmpty) {
// It's possible that the table schema is empty and need to be inferred at runtime. For this
// case, we don't need to change the output of the cached plan.
plan
} else {
plan.copy(output = r.output)
}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case i @ InsertIntoTable(r: CatalogRelation, _, _, _, _)
if DDLUtils.isDatasourceTable(r.tableMeta) =>
i.copy(table = readDataSourceTable(r))
case r: CatalogRelation if DDLUtils.isDatasourceTable(r.tableMeta) =>
readDataSourceTable(r)
}
}
/**
* A Strategy for planning scans over data sources defined using the sources API.
*/
object DataSourceStrategy extends Strategy with Logging {
def apply(plan: LogicalPlan): Seq[execution.SparkPlan] = plan match {
case PhysicalOperation(projects, filters, l @ LogicalRelation(t: CatalystScan, _, _)) =>
pruneFilterProjectRaw(
l,
projects,
filters,
(requestedColumns, allPredicates, _) =>
toCatalystRDD(l, requestedColumns, t.buildScan(requestedColumns, allPredicates))) :: Nil
case PhysicalOperation(projects, filters, l @ LogicalRelation(t: PrunedFilteredScan, _, _)) =>
pruneFilterProject(
l,
projects,
filters,
(a, f) => toCatalystRDD(l, a, t.buildScan(a.map(_.name).toArray, f))) :: Nil
case PhysicalOperation(projects, filters, l @ LogicalRelation(t: PrunedScan, _, _)) =>
pruneFilterProject(
l,
projects,
filters,
(a, _) => toCatalystRDD(l, a, t.buildScan(a.map(_.name).toArray))) :: Nil
case l @ LogicalRelation(baseRelation: TableScan, _, _) =>
RowDataSourceScanExec(
l.output,
toCatalystRDD(l, baseRelation.buildScan()),
baseRelation,
UnknownPartitioning(0),
Map.empty,
None) :: Nil
case _ => Nil
}
// Get the bucket ID based on the bucketing values.
// Restriction: Bucket pruning works iff the bucketing column has one and only one column.
def getBucketId(bucketColumn: Attribute, numBuckets: Int, value: Any): Int = {
val mutableRow = new SpecificInternalRow(Seq(bucketColumn.dataType))
mutableRow(0) = Cast(Literal(value), bucketColumn.dataType).eval(null)
val bucketIdGeneration = UnsafeProjection.create(
HashPartitioning(bucketColumn :: Nil, numBuckets).partitionIdExpression :: Nil,
bucketColumn :: Nil)
bucketIdGeneration(mutableRow).getInt(0)
}
// Based on Public API.
private def pruneFilterProject(
relation: LogicalRelation,
projects: Seq[NamedExpression],
filterPredicates: Seq[Expression],
scanBuilder: (Seq[Attribute], Array[Filter]) => RDD[InternalRow]) = {
pruneFilterProjectRaw(
relation,
projects,
filterPredicates,
(requestedColumns, _, pushedFilters) => {
scanBuilder(requestedColumns, pushedFilters.toArray)
})
}
// Based on Catalyst expressions. The `scanBuilder` function accepts three arguments:
//
// 1. A `Seq[Attribute]`, containing all required column attributes. Used to handle relation
// traits that support column pruning (e.g. `PrunedScan` and `PrunedFilteredScan`).
//
// 2. A `Seq[Expression]`, containing all gathered Catalyst filter expressions, only used for
// `CatalystScan`.
//
// 3. A `Seq[Filter]`, containing all data source `Filter`s that are converted from (possibly a
// subset of) Catalyst filter expressions and can be handled by `relation`. Used to handle
// relation traits (`CatalystScan` excluded) that support filter push-down (e.g.
// `PrunedFilteredScan` and `HadoopFsRelation`).
//
// Note that 2 and 3 shouldn't be used together.
private def pruneFilterProjectRaw(
relation: LogicalRelation,
projects: Seq[NamedExpression],
filterPredicates: Seq[Expression],
scanBuilder: (Seq[Attribute], Seq[Expression], Seq[Filter]) => RDD[InternalRow]): SparkPlan = {
val projectSet = AttributeSet(projects.flatMap(_.references))
val filterSet = AttributeSet(filterPredicates.flatMap(_.references))
val candidatePredicates = filterPredicates.map { _ transform {
case a: AttributeReference => relation.attributeMap(a) // Match original case of attributes.
}}
val (unhandledPredicates, pushedFilters, handledFilters) =
selectFilters(relation.relation, candidatePredicates)
// A set of column attributes that are only referenced by pushed down filters. We can eliminate
// them from requested columns.
val handledSet = {
val handledPredicates = filterPredicates.filterNot(unhandledPredicates.contains)
val unhandledSet = AttributeSet(unhandledPredicates.flatMap(_.references))
AttributeSet(handledPredicates.flatMap(_.references)) --
(projectSet ++ unhandledSet).map(relation.attributeMap)
}
// Combines all Catalyst filter `Expression`s that are either not convertible to data source
// `Filter`s or cannot be handled by `relation`.
val filterCondition = unhandledPredicates.reduceLeftOption(expressions.And)
// These metadata values make scan plans uniquely identifiable for equality checking.
// TODO(SPARK-17701) using strings for equality checking is brittle
val metadata: Map[String, String] = {
val pairs = ArrayBuffer.empty[(String, String)]
// Mark filters which are handled by the underlying DataSource with an Astrisk
if (pushedFilters.nonEmpty) {
val markedFilters = for (filter <- pushedFilters) yield {
if (handledFilters.contains(filter)) s"*$filter" else s"$filter"
}
pairs += ("PushedFilters" -> markedFilters.mkString("[", ", ", "]"))
}
pairs += ("ReadSchema" ->
StructType.fromAttributes(projects.map(_.toAttribute)).catalogString)
pairs.toMap
}
if (projects.map(_.toAttribute) == projects &&
projectSet.size == projects.size &&
filterSet.subsetOf(projectSet)) {
// When it is possible to just use column pruning to get the right projection and
// when the columns of this projection are enough to evaluate all filter conditions,
// just do a scan followed by a filter, with no extra project.
val requestedColumns = projects
// Safe due to if above.
.asInstanceOf[Seq[Attribute]]
// Match original case of attributes.
.map(relation.attributeMap)
// Don't request columns that are only referenced by pushed filters.
.filterNot(handledSet.contains)
val scan = RowDataSourceScanExec(
projects.map(_.toAttribute),
scanBuilder(requestedColumns, candidatePredicates, pushedFilters),
relation.relation, UnknownPartitioning(0), metadata,
relation.catalogTable.map(_.identifier))
filterCondition.map(execution.FilterExec(_, scan)).getOrElse(scan)
} else {
// Don't request columns that are only referenced by pushed filters.
val requestedColumns =
(projectSet ++ filterSet -- handledSet).map(relation.attributeMap).toSeq
val scan = RowDataSourceScanExec(
requestedColumns,
scanBuilder(requestedColumns, candidatePredicates, pushedFilters),
relation.relation, UnknownPartitioning(0), metadata,
relation.catalogTable.map(_.identifier))
execution.ProjectExec(
projects, filterCondition.map(execution.FilterExec(_, scan)).getOrElse(scan))
}
}
/**
* Convert RDD of Row into RDD of InternalRow with objects in catalyst types
*/
private[this] def toCatalystRDD(
relation: LogicalRelation,
output: Seq[Attribute],
rdd: RDD[Row]): RDD[InternalRow] = {
if (relation.relation.needConversion) {
execution.RDDConversions.rowToRowRdd(rdd, output.map(_.dataType))
} else {
rdd.asInstanceOf[RDD[InternalRow]]
}
}
/**
* Convert RDD of Row into RDD of InternalRow with objects in catalyst types
*/
private[this] def toCatalystRDD(relation: LogicalRelation, rdd: RDD[Row]): RDD[InternalRow] = {
toCatalystRDD(relation, relation.output, rdd)
}
/**
* Tries to translate a Catalyst [[Expression]] into data source [[Filter]].
*
* @return a `Some[Filter]` if the input [[Expression]] is convertible, otherwise a `None`.
*/
protected[sql] def translateFilter(predicate: Expression): Option[Filter] = {
predicate match {
case expressions.EqualTo(a: Attribute, Literal(v, t)) =>
Some(sources.EqualTo(a.name, convertToScala(v, t)))
case expressions.EqualTo(Literal(v, t), a: Attribute) =>
Some(sources.EqualTo(a.name, convertToScala(v, t)))
case expressions.EqualNullSafe(a: Attribute, Literal(v, t)) =>
Some(sources.EqualNullSafe(a.name, convertToScala(v, t)))
case expressions.EqualNullSafe(Literal(v, t), a: Attribute) =>
Some(sources.EqualNullSafe(a.name, convertToScala(v, t)))
case expressions.GreaterThan(a: Attribute, Literal(v, t)) =>
Some(sources.GreaterThan(a.name, convertToScala(v, t)))
case expressions.GreaterThan(Literal(v, t), a: Attribute) =>
Some(sources.LessThan(a.name, convertToScala(v, t)))
case expressions.LessThan(a: Attribute, Literal(v, t)) =>
Some(sources.LessThan(a.name, convertToScala(v, t)))
case expressions.LessThan(Literal(v, t), a: Attribute) =>
Some(sources.GreaterThan(a.name, convertToScala(v, t)))
case expressions.GreaterThanOrEqual(a: Attribute, Literal(v, t)) =>
Some(sources.GreaterThanOrEqual(a.name, convertToScala(v, t)))
case expressions.GreaterThanOrEqual(Literal(v, t), a: Attribute) =>
Some(sources.LessThanOrEqual(a.name, convertToScala(v, t)))
case expressions.LessThanOrEqual(a: Attribute, Literal(v, t)) =>
Some(sources.LessThanOrEqual(a.name, convertToScala(v, t)))
case expressions.LessThanOrEqual(Literal(v, t), a: Attribute) =>
Some(sources.GreaterThanOrEqual(a.name, convertToScala(v, t)))
case expressions.InSet(a: Attribute, set) =>
val toScala = CatalystTypeConverters.createToScalaConverter(a.dataType)
Some(sources.In(a.name, set.toArray.map(toScala)))
// Because we only convert In to InSet in Optimizer when there are more than certain
// items. So it is possible we still get an In expression here that needs to be pushed
// down.
case expressions.In(a: Attribute, list) if !list.exists(!_.isInstanceOf[Literal]) =>
val hSet = list.map(e => e.eval(EmptyRow))
val toScala = CatalystTypeConverters.createToScalaConverter(a.dataType)
Some(sources.In(a.name, hSet.toArray.map(toScala)))
case expressions.IsNull(a: Attribute) =>
Some(sources.IsNull(a.name))
case expressions.IsNotNull(a: Attribute) =>
Some(sources.IsNotNull(a.name))
case expressions.And(left, right) =>
(translateFilter(left) ++ translateFilter(right)).reduceOption(sources.And)
case expressions.Or(left, right) =>
for {
leftFilter <- translateFilter(left)
rightFilter <- translateFilter(right)
} yield sources.Or(leftFilter, rightFilter)
case expressions.Not(child) =>
translateFilter(child).map(sources.Not)
case expressions.StartsWith(a: Attribute, Literal(v: UTF8String, StringType)) =>
Some(sources.StringStartsWith(a.name, v.toString))
case expressions.EndsWith(a: Attribute, Literal(v: UTF8String, StringType)) =>
Some(sources.StringEndsWith(a.name, v.toString))
case expressions.Contains(a: Attribute, Literal(v: UTF8String, StringType)) =>
Some(sources.StringContains(a.name, v.toString))
case _ => None
}
}
/**
* Selects Catalyst predicate [[Expression]]s which are convertible into data source [[Filter]]s
* and can be handled by `relation`.
*
* @return A triplet of `Seq[Expression]`, `Seq[Filter]`, and `Seq[Filter]` . The first element
* contains all Catalyst predicate [[Expression]]s that are either not convertible or
* cannot be handled by `relation`. The second element contains all converted data source
* [[Filter]]s that will be pushed down to the data source. The third element contains
* all [[Filter]]s that are completely filtered at the DataSource.
*/
protected[sql] def selectFilters(
relation: BaseRelation,
predicates: Seq[Expression]): (Seq[Expression], Seq[Filter], Set[Filter]) = {
// For conciseness, all Catalyst filter expressions of type `expressions.Expression` below are
// called `predicate`s, while all data source filters of type `sources.Filter` are simply called
// `filter`s.
// A map from original Catalyst expressions to corresponding translated data source filters.
// If a predicate is not in this map, it means it cannot be pushed down.
val translatedMap: Map[Expression, Filter] = predicates.flatMap { p =>
translateFilter(p).map(f => p -> f)
}.toMap
val pushedFilters: Seq[Filter] = translatedMap.values.toSeq
// Catalyst predicate expressions that cannot be converted to data source filters.
val nonconvertiblePredicates = predicates.filterNot(translatedMap.contains)
// Data source filters that cannot be handled by `relation`. An unhandled filter means
// the data source cannot guarantee the rows returned can pass the filter.
// As a result we must return it so Spark can plan an extra filter operator.
val unhandledFilters = relation.unhandledFilters(translatedMap.values.toArray).toSet
val unhandledPredicates = translatedMap.filter { case (p, f) =>
unhandledFilters.contains(f)
}.keys
val handledFilters = pushedFilters.toSet -- unhandledFilters
(nonconvertiblePredicates ++ unhandledPredicates, pushedFilters, handledFilters)
}
}
| JerryLead/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala | Scala | apache-2.0 | 24,557 |
package org.milvus.packrat.samples
import scala.collection.mutable
// This file was generated automatically. Do not edit.
trait Parser[ParseTree] {
sealed abstract class ParseResult(val success: Boolean)
case object ParseFailure extends ParseResult(false)
case class ParseSuccess(pos: Int, parse: ParseTree) extends ParseResult(true)
def parseLongest(from: Int, to: Int, input: Seq[Int]): ParseResult
def codeForExternalSymbol(name: String): Int
def parse(from: Int, to: Int, input: Seq[Int]): Option[ParseTree] = {
parseLongest(from, to, input) match {
case ParseFailure => None
case ParseSuccess(pos, parse) =>
if (pos == to) Some(parse)
else None
}
}
def parse(input: Seq[Int]): Option[ParseTree] = {
parse(0, input.size, input)
}
def parseChars(input: Seq[Char]): Option[ParseTree] = {
parse(input.map(_.toInt))
}
def parseTokens(input: Seq[String]): Option[ParseTree] = {
parse(input.map(codeForExternalSymbol))
}
}
sealed trait ShallowParse
object ShallowParser extends Parser[ShallowParse] {
abstract class NonTerminal(val name: String, val dtrs: Seq[ShallowParse]) extends ShallowParse
object NonTerminal {
def unapply(nt: NonTerminal): Option[(String, Seq[ShallowParse])] = Some((nt.name, nt.dtrs))
}
case class S(override val dtrs: ShallowParse*) extends NonTerminal("S", dtrs)
case class CHUNK(override val dtrs: ShallowParse*) extends NonTerminal("CHUNK", dtrs)
case class NP(override val dtrs: ShallowParse*) extends NonTerminal("NP", dtrs)
case class AP(override val dtrs: ShallowParse*) extends NonTerminal("AP", dtrs)
case class ADJ(override val dtrs: ShallowParse*) extends NonTerminal("ADJ", dtrs)
case class NBAR(override val dtrs: ShallowParse*) extends NonTerminal("NBAR", dtrs)
case class NOUNS(override val dtrs: ShallowParse*) extends NonTerminal("NOUNS", dtrs)
case class PP(override val dtrs: ShallowParse*) extends NonTerminal("PP", dtrs)
case class IMPERATIVE(override val dtrs: ShallowParse*) extends NonTerminal("IMPERATIVE", dtrs)
case class QUESTION(override val dtrs: ShallowParse*) extends NonTerminal("QUESTION", dtrs)
case class VERB(override val dtrs: ShallowParse*) extends NonTerminal("VERB", dtrs)
case class Position(pos: Int) extends ShallowParse
sealed abstract class Result(val success: Boolean,
val pos: Int,
val cats: mutable.Buffer[ShallowParse])
case class Success(override val pos: Int, override val cats: mutable.Buffer[ShallowParse])
extends Result(true, pos, cats)
case object Failure extends Result(false, 0, mutable.Buffer())
sealed abstract class ExternalSymbol(val name: String, val index: Int)
case object UnknownSymbol extends ExternalSymbol("", -1)
case object PRP extends ExternalSymbol("PRP", -2)
case object WDT extends ExternalSymbol("WDT", -3)
case object PDT extends ExternalSymbol("PDT", -4)
case object DT extends ExternalSymbol("DT", -5)
case object RBR extends ExternalSymbol("RBR", -6)
case object CD extends ExternalSymbol("CD", -7)
case object JJ extends ExternalSymbol("JJ", -8)
case object JJS extends ExternalSymbol("JJS", -9)
case object JJR extends ExternalSymbol("JJR", -10)
case object NN extends ExternalSymbol("NN", -11)
case object NNS extends ExternalSymbol("NNS", -12)
case object NNP extends ExternalSymbol("NNP", -13)
case object NNPS extends ExternalSymbol("NNPS", -14)
case object IN extends ExternalSymbol("IN", -15)
case object VB extends ExternalSymbol("VB", -16)
case object VBD extends ExternalSymbol("VBD", -17)
case object VBG extends ExternalSymbol("VBG", -18)
case object VBN extends ExternalSymbol("VBN", -19)
case object VBP extends ExternalSymbol("VBP", -20)
case object VBZ extends ExternalSymbol("VBZ", -21)
private val externalSymbolMap: Map[String, ExternalSymbol] =
Seq(PRP,
WDT,
PDT,
DT,
RBR,
CD,
JJ,
JJS,
JJR,
NN,
NNS,
NNP,
NNPS,
IN,
VB,
VBD,
VBG,
VBN,
VBP,
VBZ).foldLeft(Map[String, ExternalSymbol]())((map, sym) => map + (sym.name -> sym))
override def codeForExternalSymbol(sym: String) =
externalSymbolMap
.getOrElse(sym, UnknownSymbol)
.index
override def parseLongest(from: Int, to: Int, input: Seq[Int]): ParseResult = {
val res = parseS(from, to, input)
if (res.success) ParseSuccess(res.pos, res.cats(0))
else ParseFailure
}
def parseS(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val res = parseIMPERATIVE(from, to, input)
if (res.success) res
else {
val res = parseQUESTION(from, to, input)
if (res.success) res
else {
val res = parseCHUNK(from, to, input)
if (!res.success) {
res
} else {
val dtrs = res.cats
var pos0 = res.pos
var keepGoing = true
while (keepGoing && pos0 < to) {
val res = parseCHUNK(pos0, to, input)
if (!res.success) {
keepGoing = false
} else {
dtrs ++= res.cats
pos0 = res.pos
}
}
Success(pos0, dtrs)
}
}
}
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](S(res.cats: _*))) else Failure
}
def parseCHUNK(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val res = parsePP(from, to, input)
if (res.success) res
else parseNP(from, to, input)
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](CHUNK(res.cats: _*)))
else Failure
}
def parseNP(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val res = {
val ch = input(from)
if (ch == -2) Success(from + 1, mutable.Buffer[ShallowParse](Position(from))) else Failure
}
if (res.success) res
else {
val dtrs = mutable.Buffer[ShallowParse]()
val pos1_0 = from
val res1 = {
val res = {
val res = {
val ch = input(pos1_0)
if (ch == -3) Success(pos1_0 + 1, mutable.Buffer[ShallowParse](Position(pos1_0)))
else Failure
}
if (res.success) res
else {
val dtrs = mutable.Buffer[ShallowParse]()
val pos2_0 = pos1_0
val res1 = {
val res = {
val ch = input(pos2_0)
if (ch == -4) Success(pos2_0 + 1, mutable.Buffer[ShallowParse](Position(pos2_0)))
else Failure
}
if (res.success) res else Success(pos2_0, mutable.Buffer[ShallowParse]())
}
if (res1.success && res1.pos < input.size) {
dtrs ++= res1.cats
val pos2_1 = res1.pos
val res2 = {
val ch = input(pos2_1)
if (ch == -5) Success(pos2_1 + 1, mutable.Buffer[ShallowParse](Position(pos2_1)))
else Failure
}
if (res2.success) {
dtrs ++= res2.cats
Success(res2.pos, dtrs)
} else {
Failure
}
} else Failure
}
}
if (res.success) res else Success(pos1_0, mutable.Buffer[ShallowParse]())
}
if (res1.success && res1.pos < input.size) {
dtrs ++= res1.cats
val pos1_1 = res1.pos
val res2 = parseNBAR(pos1_1, to, input)
if (res2.success) {
dtrs ++= res2.cats
Success(res2.pos, dtrs)
} else {
Failure
}
} else Failure
}
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](NP(res.cats: _*))) else Failure
}
def parseAP(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val dtrs = mutable.Buffer[ShallowParse]()
val pos3_0 = from
val res1 = {
val res = {
val ch = input(pos3_0)
if (ch == -6) Success(pos3_0 + 1, mutable.Buffer[ShallowParse](Position(pos3_0)))
else Failure
}
if (res.success) res else Success(pos3_0, mutable.Buffer[ShallowParse]())
}
if (res1.success && res1.pos < input.size) {
dtrs ++= res1.cats
val pos3_1 = res1.pos
val res2 = {
val res = parseADJ(pos3_1, to, input)
if (res.success) res else Success(pos3_1, mutable.Buffer[ShallowParse]())
}
if (res2.success && res2.pos < input.size) {
dtrs ++= res2.cats
val pos3_2 = res2.pos
val res3 = {
val res = {
val ch = input(pos3_2)
if (ch == -7) Success(pos3_2 + 1, mutable.Buffer[ShallowParse](Position(pos3_2)))
else Failure
}
if (res.success) res else Success(pos3_2, mutable.Buffer[ShallowParse]())
}
if (res3.success && res3.pos < input.size) {
dtrs ++= res3.cats
val pos3_3 = res3.pos
val res4 = {
val dtrs = mutable.Buffer[ShallowParse]()
var pos4 = pos3_3
var keepGoing = true
while (keepGoing && pos4 < to) {
val res = parseADJ(pos4, to, input)
if (!res.success) {
keepGoing = false
} else {
dtrs ++= res.cats
pos4 = res.pos
}
}
Success(pos4, dtrs)
}
if (res4.success) {
dtrs ++= res4.cats
Success(res4.pos, dtrs)
} else {
Failure
}
} else Failure
} else Failure
} else Failure
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](AP(res.cats: _*))) else Failure
}
def parseADJ(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val res = {
val ch = input(from)
if (ch == -8) Success(from + 1, mutable.Buffer[ShallowParse](Position(from))) else Failure
}
if (res.success) res
else {
val res = {
val ch = input(from)
if (ch == -9) Success(from + 1, mutable.Buffer[ShallowParse](Position(from))) else Failure
}
if (res.success) res
else {
val ch = input(from)
if (ch == -10) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
}
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](ADJ(res.cats: _*))) else Failure
}
def parseNBAR(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val dtrs = mutable.Buffer[ShallowParse]()
val pos5_0 = from
val res1 = {
val res = parseAP(pos5_0, to, input)
if (res.success) res else Success(pos5_0, mutable.Buffer[ShallowParse]())
}
if (res1.success && res1.pos < input.size) {
dtrs ++= res1.cats
val pos5_1 = res1.pos
val res2 = parseNOUNS(pos5_1, to, input)
if (res2.success) {
dtrs ++= res2.cats
Success(res2.pos, dtrs)
} else {
Failure
}
} else Failure
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](NBAR(res.cats: _*))) else Failure
}
def parseNOUNS(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val res = {
val res = {
val ch = input(from)
if (ch == -11) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
if (res.success) res
else {
val res = {
val ch = input(from)
if (ch == -12) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
if (res.success) res
else {
val res = {
val ch = input(from)
if (ch == -13) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
if (res.success) res
else {
val ch = input(from)
if (ch == -14) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
}
}
}
if (!res.success) {
res
} else {
val dtrs = res.cats
var pos6 = res.pos
var keepGoing = true
while (keepGoing && pos6 < to) {
val res = {
val res = {
val ch = input(pos6)
if (ch == -11) Success(pos6 + 1, mutable.Buffer[ShallowParse](Position(pos6)))
else Failure
}
if (res.success) res
else {
val res = {
val ch = input(pos6)
if (ch == -12) Success(pos6 + 1, mutable.Buffer[ShallowParse](Position(pos6)))
else Failure
}
if (res.success) res
else {
val res = {
val ch = input(pos6)
if (ch == -13) Success(pos6 + 1, mutable.Buffer[ShallowParse](Position(pos6)))
else Failure
}
if (res.success) res
else {
val ch = input(pos6)
if (ch == -14) Success(pos6 + 1, mutable.Buffer[ShallowParse](Position(pos6)))
else Failure
}
}
}
}
if (!res.success) {
keepGoing = false
} else {
dtrs ++= res.cats
pos6 = res.pos
}
}
Success(pos6, dtrs)
}
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](NOUNS(res.cats: _*)))
else Failure
}
def parsePP(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val dtrs = mutable.Buffer[ShallowParse]()
val pos7_0 = from
val res1 = {
val ch = input(pos7_0)
if (ch == -15) Success(pos7_0 + 1, mutable.Buffer[ShallowParse](Position(pos7_0)))
else Failure
}
if (res1.success && res1.pos < input.size) {
dtrs ++= res1.cats
val pos7_1 = res1.pos
val res2 = parseNP(pos7_1, to, input)
if (res2.success) {
dtrs ++= res2.cats
Success(res2.pos, dtrs)
} else {
Failure
}
} else Failure
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](PP(res.cats: _*))) else Failure
}
def parseIMPERATIVE(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val dtrs = mutable.Buffer[ShallowParse]()
val pos8_0 = from
val res1 = parseVERB(pos8_0, to, input)
if (res1.success && res1.pos < input.size) {
dtrs ++= res1.cats
val pos8_1 = res1.pos
val res2 = {
val res = parseCHUNK(pos8_1, to, input)
if (!res.success) {
res
} else {
val dtrs = res.cats
var pos9 = res.pos
var keepGoing = true
while (keepGoing && pos9 < to) {
val res = parseCHUNK(pos9, to, input)
if (!res.success) {
keepGoing = false
} else {
dtrs ++= res.cats
pos9 = res.pos
}
}
Success(pos9, dtrs)
}
}
if (res2.success) {
dtrs ++= res2.cats
Success(res2.pos, dtrs)
} else {
Failure
}
} else Failure
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](IMPERATIVE(res.cats: _*)))
else Failure
}
def parseQUESTION(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val dtrs = mutable.Buffer[ShallowParse]()
val pos10_0 = from
val res1 = parseCHUNK(pos10_0, to, input)
if (res1.success && res1.pos < input.size) {
dtrs ++= res1.cats
val pos10_1 = res1.pos
val res2 = parseVERB(pos10_1, to, input)
if (res2.success && res2.pos < input.size) {
dtrs ++= res2.cats
val pos10_2 = res2.pos
val res3 = {
val dtrs = mutable.Buffer[ShallowParse]()
var pos11 = pos10_2
var keepGoing = true
while (keepGoing && pos11 < to) {
val res = parseCHUNK(pos11, to, input)
if (!res.success) {
keepGoing = false
} else {
dtrs ++= res.cats
pos11 = res.pos
}
}
Success(pos11, dtrs)
}
if (res3.success && res3.pos < input.size) {
dtrs ++= res3.cats
val pos10_3 = res3.pos
val res4 = {
val res = parseVERB(pos10_3, to, input)
if (res.success) res else Success(pos10_3, mutable.Buffer[ShallowParse]())
}
if (res4.success && res4.pos < input.size) {
dtrs ++= res4.cats
val pos10_4 = res4.pos
val res5 = {
val dtrs = mutable.Buffer[ShallowParse]()
var pos12 = pos10_4
var keepGoing = true
while (keepGoing && pos12 < to) {
val res = parseCHUNK(pos12, to, input)
if (!res.success) {
keepGoing = false
} else {
dtrs ++= res.cats
pos12 = res.pos
}
}
Success(pos12, dtrs)
}
if (res5.success) {
dtrs ++= res5.cats
Success(res5.pos, dtrs)
} else {
Failure
}
} else Failure
} else Failure
} else Failure
} else Failure
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](QUESTION(res.cats: _*)))
else Failure
}
def parseVERB(from: Int, to: Int, input: Seq[Int]): Result = {
val res = {
val res = {
val ch = input(from)
if (ch == -16) Success(from + 1, mutable.Buffer[ShallowParse](Position(from))) else Failure
}
if (res.success) res
else {
val res = {
val ch = input(from)
if (ch == -17) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
if (res.success) res
else {
val res = {
val ch = input(from)
if (ch == -18) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
if (res.success) res
else {
val res = {
val ch = input(from)
if (ch == -19) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
if (res.success) res
else {
val res = {
val ch = input(from)
if (ch == -20) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
if (res.success) res
else {
val ch = input(from)
if (ch == -21) Success(from + 1, mutable.Buffer[ShallowParse](Position(from)))
else Failure
}
}
}
}
}
}
if (res.success) Success(res.pos, mutable.Buffer[ShallowParse](VERB(res.cats: _*))) else Failure
}
}
| twgoetz/packrat | src/main/scala/org/milvus/packrat/samples/ShallowParser.scala | Scala | apache-2.0 | 19,853 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import scala.collection.mutable
import kafka.utils.Logging
import kafka.cluster.Broker
import kafka.metrics.KafkaMetricsGroup
import com.yammer.metrics.core.Gauge
abstract class AbstractFetcherManager(protected val name: String, metricPrefix: String, numFetchers: Int = 1)
extends Logging with KafkaMetricsGroup {
// map of (source brokerid, fetcher Id per source broker) => fetcher
private val fetcherThreadMap = new mutable.HashMap[BrokerAndFetcherId, AbstractFetcherThread]
private val mapLock = new Object
this.logIdent = "[" + name + "] "
/* newGauge(
metricPrefix + "-MaxLag",
new Gauge[Long] {
// current max lag across all fetchers/topics/partitions
def value = fetcherThreadMap.foldLeft(0L)((curMaxAll, fetcherThreadMapEntry) => {
fetcherThreadMapEntry._2.fetcherLagStats.stats.foldLeft(0L)((curMaxThread, fetcherLagStatsEntry) => {
curMaxThread.max(fetcherLagStatsEntry._2.lag)
}).max(curMaxAll)
})
}
) */
/* newGauge(
metricPrefix + "-MinFetchRate",
{
new Gauge[Double] {
// current min fetch rate across all fetchers/topics/partitions
//def value = {
//val headRate: Double =
// fetcherThreadMap.headOption.map(_._2.fetcherStats.requestRate.oneMinuteRate).getOrElse(0)
//fetcherThreadMap.foldLeft(headRate)((curMinAll, fetcherThreadMapEntry) => {
// fetcherThreadMapEntry._2.fetcherStats.requestRate.oneMinuteRate.min(curMinAll)
//})
// }
}
}
) */
private def getFetcherId(topic: String, partitionId: Int) : Int = {
(topic.hashCode() + 31 * partitionId) % numFetchers
}
// to be defined in subclass to create a specific fetcher
def createFetcherThread(fetcherId: Int, sourceBroker: Broker): AbstractFetcherThread
def addFetcher(topic: String, partitionId: Int, initialOffset: Long, sourceBroker: Broker) {
mapLock synchronized {
var fetcherThread: AbstractFetcherThread = null
val key = new BrokerAndFetcherId(sourceBroker, getFetcherId(topic, partitionId))
fetcherThreadMap.get(key) match {
case Some(f) => fetcherThread = f
case None =>
fetcherThread = createFetcherThread(key.fetcherId, sourceBroker)
fetcherThreadMap.put(key, fetcherThread)
fetcherThread.start
}
fetcherThread.addPartition(topic, partitionId, initialOffset)
info("Adding fetcher for partition [%s,%d], initOffset %d to broker %d with fetcherId %d"
.format(topic, partitionId, initialOffset, sourceBroker.id, key.fetcherId))
}
}
def removeFetcher(topic: String, partitionId: Int) {
info("Removing fetcher for partition [%s,%d]".format(topic, partitionId))
mapLock synchronized {
for ((key, fetcher) <- fetcherThreadMap) {
fetcher.removePartition(topic, partitionId)
}
}
}
def shutdownIdleFetcherThreads() {
mapLock synchronized {
val keysToBeRemoved = new mutable.HashSet[BrokerAndFetcherId]
for ((key, fetcher) <- fetcherThreadMap) {
if (fetcher.partitionCount <= 0) {
fetcher.shutdown()
keysToBeRemoved += key
}
}
fetcherThreadMap --= keysToBeRemoved
}
}
def closeAllFetchers() {
mapLock synchronized {
for ( (_, fetcher) <- fetcherThreadMap) {
fetcher.shutdown()
}
fetcherThreadMap.clear()
}
}
}
case class BrokerAndFetcherId(broker: Broker, fetcherId: Int)
| kavink92/kafka-0.8.0-beta1-src | core/src/main/scala/kafka/server/AbstractFetcherManager.scala | Scala | apache-2.0 | 4,315 |
package coursier.cli
import caseapp.core.app.CommandsEntryPoint
import caseapp.RemainingArgs
import coursier.cache.CacheUrl
import coursier.cli.internal.{Argv0, PathUtil}
import coursier.cli.setup.{Setup, SetupOptions}
import coursier.install.InstallDir
import coursier.jniutils.ModuleFileName
import java.nio.file.Paths
import java.util.Scanner
import scala.util.control.NonFatal
import scala.util.Properties
import caseapp.core.help.HelpFormat
object Coursier extends CommandsEntryPoint {
private def isGraalvmNativeImage: Boolean =
sys.props.contains("org.graalvm.nativeimage.imagecode")
lazy val progName = (new Argv0).get("coursier")
override val description =
"""|Coursier is the Scala application and artifact manager.
|It can install Scala applications and setup your Scala development environment.
|It can also download and cache artifacts from the web.""".stripMargin
val commands = Seq(
bootstrap.Bootstrap,
channel.Channel,
coursier.cli.complete.Complete,
fetch.Fetch,
get.Get,
install.Install,
jvm.Java,
jvm.JavaHome,
launch.Launch,
install.List,
publish.Publish,
resolve.Resolve,
search.Search,
setup.Setup,
install.Uninstall,
install.Update
)
override def enableCompleteCommand = true
override def enableCompletionsCommand = true
private def isNonInstalledLauncherWindows: Boolean =
Properties.isWin && isGraalvmNativeImage && {
val p = Paths.get(ModuleFileName.get())
!PathUtil.isInPath(p)
}
private def runSetup(): Unit = {
Setup.run(SetupOptions(banner = Some(true)), RemainingArgs(Nil, Nil))
// https://stackoverflow.com/questions/26184409/java-console-prompt-for-enter-input-before-moving-on/26184535#26184535
println("Press \\"ENTER\\" to continue...")
val scanner = new Scanner(System.in)
scanner.nextLine()
}
override def main(args: Array[String]): Unit = {
if (Properties.isWin && isGraalvmNativeImage)
// The DLL loaded by LoadWindowsLibrary is statically linked in
// the coursier native image, no need to manually load it.
coursier.jniutils.LoadWindowsLibrary.assumeInitialized()
if (System.console() != null && Properties.isWin) {
val useJni = coursier.paths.Util.useJni()
try if (useJni)
coursier.jniutils.WindowsAnsiTerminal.enableAnsiOutput()
else
io.github.alexarchambault.windowsansi.WindowsAnsi.setup()
catch {
case NonFatal(e) =>
val doThrow = java.lang.Boolean.getBoolean("coursier.windows-ansi.throw-exception")
if (doThrow || java.lang.Boolean.getBoolean("coursier.windows-ansi.verbose"))
System.err.println(s"Error setting up Windows terminal for ANSI escape codes: $e")
if (doThrow)
throw e
}
}
CacheUrl.setupProxyAuth()
val csArgs =
if (isGraalvmNativeImage) {
// process -J* args ourselves
val (jvmArgs, csArgs0) = args.partition(_.startsWith("-J"))
for (jvmArg <- jvmArgs) {
val arg = jvmArg.stripPrefix("-J")
if (arg.startsWith("-D"))
arg.stripPrefix("-D").split("=", 2) match {
case Array(k) => System.setProperty(k, "")
case Array(k, v) => System.setProperty(k, v)
}
else
System.err.println(s"Warning: ignoring unhandled -J argument: $jvmArg")
}
csArgs0
}
else
args
if (csArgs.nonEmpty)
super.main(csArgs)
else if (isNonInstalledLauncherWindows)
runSetup()
else {
println(help.help(helpFormat, showHidden = false))
sys.exit(1)
}
}
override def helpFormat: HelpFormat =
HelpFormat.default()
.withSortedCommandGroups(Some(CommandGroup.order))
}
| coursier/coursier | modules/cli/src/main/scala/coursier/cli/Coursier.scala | Scala | apache-2.0 | 3,823 |
package models.base
import net.scalytica.symbiotic.api.types.IdOps
import net.scalytica.symbiotic.api.types.PartyBaseTypes.OrgId
import play.api.libs.json._
case class SymbioticOrgId(value: String) extends OrgId
object SymbioticOrgId extends IdOps[SymbioticOrgId] {
implicit val reads: Reads[SymbioticOrgId] = Reads {
_.validate[String] match {
case JsSuccess(value, _) => JsSuccess(asId(value))
case err: JsError => err
}
}
implicit val writes: Writes[SymbioticOrgId] =
Writes[OrgId](oid => JsString(oid.value))
override implicit def asId(s: String): SymbioticOrgId = SymbioticOrgId(s)
}
| kpmeen/symbiotic | examples/symbiotic-server/app/models/base/SymbioticOrgId.scala | Scala | apache-2.0 | 634 |
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert
package logging
/**
* A mixin trait which provides a <code>Logger</code> instance.
*/
trait Logging {
protected val log: Logger = Logger(this)
}
| thesiddharth/norbert | cluster/src/main/scala/com/linkedin/norbert/logging/Logging.scala | Scala | apache-2.0 | 777 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.classification.LogisticRegressionSuite._
import org.apache.spark.ml.feature.Instance
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{BLAS, DenseVector, Vector, Vectors}
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.random._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.FloatType
class GeneralizedLinearRegressionSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
private val seed: Int = 42
@transient var datasetGaussianIdentity: DataFrame = _
@transient var datasetGaussianLog: DataFrame = _
@transient var datasetGaussianInverse: DataFrame = _
@transient var datasetBinomial: DataFrame = _
@transient var datasetPoissonLog: DataFrame = _
@transient var datasetPoissonIdentity: DataFrame = _
@transient var datasetPoissonSqrt: DataFrame = _
@transient var datasetGammaInverse: DataFrame = _
@transient var datasetGammaIdentity: DataFrame = _
@transient var datasetGammaLog: DataFrame = _
override def beforeAll(): Unit = {
super.beforeAll()
import GeneralizedLinearRegressionSuite._
datasetGaussianIdentity = spark.createDataFrame(
sc.parallelize(generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gaussian", link = "identity"), 2))
datasetGaussianLog = spark.createDataFrame(
sc.parallelize(generateGeneralizedLinearRegressionInput(
intercept = 0.25, coefficients = Array(0.22, 0.06), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gaussian", link = "log"), 2))
datasetGaussianInverse = spark.createDataFrame(
sc.parallelize(generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gaussian", link = "inverse"), 2))
datasetBinomial = {
val nPoints = 10000
val coefficients = Array(-0.57997, 0.912083, -0.371077, -0.819866, 2.688191)
val xMean = Array(5.843, 3.057, 3.758, 1.199)
val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
val testData =
generateMultinomialLogisticInput(coefficients, xMean, xVariance,
addIntercept = true, nPoints, seed)
spark.createDataFrame(sc.parallelize(testData, 2))
}
datasetPoissonLog = spark.createDataFrame(
sc.parallelize(generateGeneralizedLinearRegressionInput(
intercept = 0.25, coefficients = Array(0.22, 0.06), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "poisson", link = "log"), 2))
datasetPoissonIdentity = spark.createDataFrame(
sc.parallelize(generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "poisson", link = "identity"), 2))
datasetPoissonSqrt = spark.createDataFrame(
sc.parallelize(generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "poisson", link = "sqrt"), 2))
datasetGammaInverse = spark.createDataFrame(
sc.parallelize(generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gamma", link = "inverse"), 2))
datasetGammaIdentity = spark.createDataFrame(
sc.parallelize(generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gamma", link = "identity"), 2))
datasetGammaLog = spark.createDataFrame(
sc.parallelize(generateGeneralizedLinearRegressionInput(
intercept = 0.25, coefficients = Array(0.22, 0.06), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gamma", link = "log"), 2))
}
/**
* Enable the ignored test to export the dataset into CSV format,
* so we can validate the training accuracy compared with R's glm and glmnet package.
*/
ignore("export test data into CSV format") {
datasetGaussianIdentity.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianIdentity")
datasetGaussianLog.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianLog")
datasetGaussianInverse.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianInverse")
datasetBinomial.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetBinomial")
datasetPoissonLog.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonLog")
datasetPoissonIdentity.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonIdentity")
datasetPoissonSqrt.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonSqrt")
datasetGammaInverse.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGammaInverse")
datasetGammaIdentity.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGammaIdentity")
datasetGammaLog.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGammaLog")
}
test("params") {
ParamsSuite.checkParams(new GeneralizedLinearRegression)
val model = new GeneralizedLinearRegressionModel("genLinReg", Vectors.dense(0.0), 0.0)
ParamsSuite.checkParams(model)
}
test("generalized linear regression: default params") {
val glr = new GeneralizedLinearRegression
assert(glr.getLabelCol === "label")
assert(glr.getFeaturesCol === "features")
assert(glr.getPredictionCol === "prediction")
assert(glr.getFitIntercept)
assert(glr.getTol === 1E-6)
assert(!glr.isDefined(glr.weightCol))
assert(glr.getRegParam === 0.0)
assert(glr.getSolver == "irls")
// TODO: Construct model directly instead of via fitting.
val model = glr.setFamily("gaussian").setLink("identity")
.fit(datasetGaussianIdentity)
// copied model must have the same parent.
MLTestingUtils.checkCopy(model)
assert(model.getFeaturesCol === "features")
assert(model.getPredictionCol === "prediction")
assert(model.intercept !== 0.0)
assert(model.hasParent)
assert(model.getFamily === "gaussian")
assert(model.getLink === "identity")
}
test("generalized linear regression: gaussian family against glm") {
/*
R code:
f1 <- data$V1 ~ data$V2 + data$V3 - 1
f2 <- data$V1 ~ data$V2 + data$V3
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family="gaussian", data=data)
print(as.vector(coef(model)))
}
[1] 2.2960999 0.8087933
[1] 2.5002642 2.2000403 0.5999485
data <- read.csv("path", header=FALSE)
model1 <- glm(f1, family=gaussian(link=log), data=data, start=c(0,0))
model2 <- glm(f2, family=gaussian(link=log), data=data, start=c(0,0,0))
print(as.vector(coef(model1)))
print(as.vector(coef(model2)))
[1] 0.23069326 0.07993778
[1] 0.25001858 0.22002452 0.05998789
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=gaussian(link=inverse), data=data)
print(as.vector(coef(model)))
}
[1] 2.3010179 0.8198976
[1] 2.4108902 2.2130248 0.6086152
*/
val expected = Seq(
Vectors.dense(0.0, 2.2960999, 0.8087933),
Vectors.dense(2.5002642, 2.2000403, 0.5999485),
Vectors.dense(0.0, 0.23069326, 0.07993778),
Vectors.dense(0.25001858, 0.22002452, 0.05998789),
Vectors.dense(0.0, 2.3010179, 0.8198976),
Vectors.dense(2.4108902, 2.2130248, 0.6086152))
import GeneralizedLinearRegression._
var idx = 0
for ((link, dataset) <- Seq(("identity", datasetGaussianIdentity), ("log", datasetGaussianLog),
("inverse", datasetGaussianInverse))) {
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily("gaussian").setLink(link)
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with gaussian family, " +
s"$link link and fitIntercept = $fitIntercept.")
val familyLink = new FamilyAndLink(Gaussian, Link.fromName(link))
model.transform(dataset).select("features", "prediction", "linkPrediction").collect()
.foreach {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"gaussian family, $link link and fitIntercept = $fitIntercept.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with gaussian family, $link link and fitIntercept = $fitIntercept.")
}
idx += 1
}
}
}
test("generalized linear regression: gaussian family against glmnet") {
/*
R code:
library(glmnet)
data <- read.csv("path", header=FALSE)
label = data$V1
features = as.matrix(data.frame(data$V2, data$V3))
for (intercept in c(FALSE, TRUE)) {
for (lambda in c(0.0, 0.1, 1.0)) {
model <- glmnet(features, label, family="gaussian", intercept=intercept,
lambda=lambda, alpha=0, thresh=1E-14)
print(as.vector(coef(model)))
}
}
[1] 0.0000000 2.2961005 0.8087932
[1] 0.0000000 2.2130368 0.8309556
[1] 0.0000000 1.7176137 0.9610657
[1] 2.5002642 2.2000403 0.5999485
[1] 3.1106389 2.0935142 0.5712711
[1] 6.7597127 1.4581054 0.3994266
*/
val expected = Seq(
Vectors.dense(0.0, 2.2961005, 0.8087932),
Vectors.dense(0.0, 2.2130368, 0.8309556),
Vectors.dense(0.0, 1.7176137, 0.9610657),
Vectors.dense(2.5002642, 2.2000403, 0.5999485),
Vectors.dense(3.1106389, 2.0935142, 0.5712711),
Vectors.dense(6.7597127, 1.4581054, 0.3994266))
var idx = 0
for (fitIntercept <- Seq(false, true);
regParam <- Seq(0.0, 0.1, 1.0)) {
val trainer = new GeneralizedLinearRegression().setFamily("gaussian")
.setFitIntercept(fitIntercept).setRegParam(regParam)
val model = trainer.fit(datasetGaussianIdentity)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with gaussian family, " +
s"fitIntercept = $fitIntercept and regParam = $regParam.")
idx += 1
}
}
test("generalized linear regression: binomial family against glm") {
/*
R code:
f1 <- data$V1 ~ data$V2 + data$V3 + data$V4 + data$V5 - 1
f2 <- data$V1 ~ data$V2 + data$V3 + data$V4 + data$V5
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family="binomial", data=data)
print(as.vector(coef(model)))
}
[1] -0.3560284 1.3010002 -0.3570805 -0.7406762
[1] 2.8367406 -0.5896187 0.8931655 -0.3925169 -0.7996989
for (formula in c(f1, f2)) {
model <- glm(formula, family=binomial(link=probit), data=data)
print(as.vector(coef(model)))
}
[1] -0.2134390 0.7800646 -0.2144267 -0.4438358
[1] 1.6995366 -0.3524694 0.5332651 -0.2352985 -0.4780850
for (formula in c(f1, f2)) {
model <- glm(formula, family=binomial(link=cloglog), data=data)
print(as.vector(coef(model)))
}
[1] -0.2832198 0.8434144 -0.2524727 -0.5293452
[1] 1.5063590 -0.4038015 0.6133664 -0.2687882 -0.5541758
*/
val expected = Seq(
Vectors.dense(0.0, -0.3560284, 1.3010002, -0.3570805, -0.7406762),
Vectors.dense(2.8367406, -0.5896187, 0.8931655, -0.3925169, -0.7996989),
Vectors.dense(0.0, -0.2134390, 0.7800646, -0.2144267, -0.4438358),
Vectors.dense(1.6995366, -0.3524694, 0.5332651, -0.2352985, -0.4780850),
Vectors.dense(0.0, -0.2832198, 0.8434144, -0.2524727, -0.5293452),
Vectors.dense(1.5063590, -0.4038015, 0.6133664, -0.2687882, -0.5541758))
import GeneralizedLinearRegression._
var idx = 0
for ((link, dataset) <- Seq(("logit", datasetBinomial), ("probit", datasetBinomial),
("cloglog", datasetBinomial))) {
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily("binomial").setLink(link)
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1),
model.coefficients(2), model.coefficients(3))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with binomial family, " +
s"$link link and fitIntercept = $fitIntercept.")
val familyLink = new FamilyAndLink(Binomial, Link.fromName(link))
model.transform(dataset).select("features", "prediction", "linkPrediction").collect()
.foreach {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"binomial family, $link link and fitIntercept = $fitIntercept.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with binomial family, $link link and fitIntercept = $fitIntercept.")
}
idx += 1
}
}
}
test("generalized linear regression: poisson family against glm") {
/*
R code:
f1 <- data$V1 ~ data$V2 + data$V3 - 1
f2 <- data$V1 ~ data$V2 + data$V3
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family="poisson", data=data)
print(as.vector(coef(model)))
}
[1] 0.22999393 0.08047088
[1] 0.25022353 0.21998599 0.05998621
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=poisson(link=identity), data=data)
print(as.vector(coef(model)))
}
[1] 2.2929501 0.8119415
[1] 2.5012730 2.1999407 0.5999107
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=poisson(link=sqrt), data=data)
print(as.vector(coef(model)))
}
[1] 2.2958947 0.8090515
[1] 2.5000480 2.1999972 0.5999968
*/
val expected = Seq(
Vectors.dense(0.0, 0.22999393, 0.08047088),
Vectors.dense(0.25022353, 0.21998599, 0.05998621),
Vectors.dense(0.0, 2.2929501, 0.8119415),
Vectors.dense(2.5012730, 2.1999407, 0.5999107),
Vectors.dense(0.0, 2.2958947, 0.8090515),
Vectors.dense(2.5000480, 2.1999972, 0.5999968))
import GeneralizedLinearRegression._
var idx = 0
for ((link, dataset) <- Seq(("log", datasetPoissonLog), ("identity", datasetPoissonIdentity),
("sqrt", datasetPoissonSqrt))) {
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily("poisson").setLink(link)
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with poisson family, " +
s"$link link and fitIntercept = $fitIntercept.")
val familyLink = new FamilyAndLink(Poisson, Link.fromName(link))
model.transform(dataset).select("features", "prediction", "linkPrediction").collect()
.foreach {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"poisson family, $link link and fitIntercept = $fitIntercept.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with poisson family, $link link and fitIntercept = $fitIntercept.")
}
idx += 1
}
}
}
test("generalized linear regression: gamma family against glm") {
/*
R code:
f1 <- data$V1 ~ data$V2 + data$V3 - 1
f2 <- data$V1 ~ data$V2 + data$V3
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family="Gamma", data=data)
print(as.vector(coef(model)))
}
[1] 2.3392419 0.8058058
[1] 2.3507700 2.2533574 0.6042991
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=Gamma(link=identity), data=data)
print(as.vector(coef(model)))
}
[1] 2.2908883 0.8147796
[1] 2.5002406 2.1998346 0.6000059
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=Gamma(link=log), data=data)
print(as.vector(coef(model)))
}
[1] 0.22958970 0.08091066
[1] 0.25003210 0.21996957 0.06000215
*/
val expected = Seq(
Vectors.dense(0.0, 2.3392419, 0.8058058),
Vectors.dense(2.3507700, 2.2533574, 0.6042991),
Vectors.dense(0.0, 2.2908883, 0.8147796),
Vectors.dense(2.5002406, 2.1998346, 0.6000059),
Vectors.dense(0.0, 0.22958970, 0.08091066),
Vectors.dense(0.25003210, 0.21996957, 0.06000215))
import GeneralizedLinearRegression._
var idx = 0
for ((link, dataset) <- Seq(("inverse", datasetGammaInverse),
("identity", datasetGammaIdentity), ("log", datasetGammaLog))) {
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily("gamma").setLink(link)
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with gamma family, " +
s"$link link and fitIntercept = $fitIntercept.")
val familyLink = new FamilyAndLink(Gamma, Link.fromName(link))
model.transform(dataset).select("features", "prediction", "linkPrediction").collect()
.foreach {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"gamma family, $link link and fitIntercept = $fitIntercept.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with gamma family, $link link and fitIntercept = $fitIntercept.")
}
idx += 1
}
}
}
test("glm summary: gaussian family with weight") {
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b <- c(17, 19, 23, 29)
w <- c(1, 2, 3, 4)
df <- as.data.frame(cbind(A, b))
*/
val datasetWithWeight = spark.createDataFrame(sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(3.0, 13.0))
), 2))
/*
R code:
model <- glm(formula = "b ~ .", family="gaussian", data = df, weights = w)
summary(model)
Deviance Residuals:
1 2 3 4
1.920 -1.358 -1.109 0.960
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 18.080 9.608 1.882 0.311
V1 6.080 5.556 1.094 0.471
V2 -0.600 1.960 -0.306 0.811
(Dispersion parameter for gaussian family taken to be 7.68)
Null deviance: 202.00 on 3 degrees of freedom
Residual deviance: 7.68 on 1 degrees of freedom
AIC: 18.783
Number of Fisher Scoring iterations: 2
residuals(model, type="pearson")
1 2 3 4
1.920000 -1.357645 -1.108513 0.960000
residuals(model, type="working")
1 2 3 4
1.92 -0.96 -0.64 0.48
residuals(model, type="response")
1 2 3 4
1.92 -0.96 -0.64 0.48
*/
val trainer = new GeneralizedLinearRegression()
.setWeightCol("weight")
val model = trainer.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(6.080, -0.600))
val interceptR = 18.080
val devianceResidualsR = Array(1.920, -1.358, -1.109, 0.960)
val pearsonResidualsR = Array(1.920000, -1.357645, -1.108513, 0.960000)
val workingResidualsR = Array(1.92, -0.96, -0.64, 0.48)
val responseResidualsR = Array(1.92, -0.96, -0.64, 0.48)
val seCoefR = Array(5.556, 1.960, 9.608)
val tValsR = Array(1.094, -0.306, 1.882)
val pValsR = Array(0.471, 0.811, 0.311)
val dispersionR = 7.68
val nullDevianceR = 202.00
val residualDevianceR = 7.68
val residualDegreeOfFreedomNullR = 3
val residualDegreeOfFreedomR = 1
val aicR = 18.783
assert(model.hasSummary)
val summary = model.summary
assert(summary.isInstanceOf[GeneralizedLinearRegressionTrainingSummary])
val devianceResiduals = summary.residuals()
.select(col("devianceResiduals"))
.collect()
.map(_.getDouble(0))
val pearsonResiduals = summary.residuals("pearson")
.select(col("pearsonResiduals"))
.collect()
.map(_.getDouble(0))
val workingResiduals = summary.residuals("working")
.select(col("workingResiduals"))
.collect()
.map(_.getDouble(0))
val responseResiduals = summary.residuals("response")
.select(col("responseResiduals"))
.collect()
.map(_.getDouble(0))
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
pearsonResiduals.zip(pearsonResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
workingResiduals.zip(workingResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
responseResiduals.zip(responseResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
assert(summary.dispersion ~== dispersionR absTol 1E-3)
assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3)
assert(summary.deviance ~== residualDevianceR absTol 1E-3)
assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR)
assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR)
assert(summary.aic ~== aicR absTol 1E-3)
assert(summary.solver === "irls")
val summary2: GeneralizedLinearRegressionSummary = model.evaluate(datasetWithWeight)
assert(summary.predictions.columns.toSet === summary2.predictions.columns.toSet)
assert(summary.predictionCol === summary2.predictionCol)
assert(summary.rank === summary2.rank)
assert(summary.degreesOfFreedom === summary2.degreesOfFreedom)
assert(summary.residualDegreeOfFreedom === summary2.residualDegreeOfFreedom)
assert(summary.residualDegreeOfFreedomNull === summary2.residualDegreeOfFreedomNull)
assert(summary.nullDeviance === summary2.nullDeviance)
assert(summary.deviance === summary2.deviance)
assert(summary.dispersion === summary2.dispersion)
assert(summary.aic === summary2.aic)
}
test("glm summary: binomial family with weight") {
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 2, 1, 3), 4, 2)
b <- c(1, 0, 1, 0)
w <- c(1, 2, 3, 4)
df <- as.data.frame(cbind(A, b))
*/
val datasetWithWeight = spark.createDataFrame(sc.parallelize(Seq(
Instance(1.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(0.0, 2.0, Vectors.dense(1.0, 2.0)),
Instance(1.0, 3.0, Vectors.dense(2.0, 1.0)),
Instance(0.0, 4.0, Vectors.dense(3.0, 3.0))
), 2))
/*
R code:
model <- glm(formula = "b ~ . -1", family="binomial", data = df, weights = w)
summary(model)
Deviance Residuals:
1 2 3 4
1.273 -1.437 2.533 -1.556
Coefficients:
Estimate Std. Error z value Pr(>|z|)
V1 -0.30217 0.46242 -0.653 0.513
V2 -0.04452 0.37124 -0.120 0.905
(Dispersion parameter for binomial family taken to be 1)
Null deviance: 13.863 on 4 degrees of freedom
Residual deviance: 12.524 on 2 degrees of freedom
AIC: 16.524
Number of Fisher Scoring iterations: 5
residuals(model, type="pearson")
1 2 3 4
1.117731 -1.162962 2.395838 -1.189005
residuals(model, type="working")
1 2 3 4
2.249324 -1.676240 2.913346 -1.353433
residuals(model, type="response")
1 2 3 4
0.5554219 -0.4034267 0.6567520 -0.2611382
*/
val trainer = new GeneralizedLinearRegression()
.setFamily("binomial")
.setWeightCol("weight")
.setFitIntercept(false)
val model = trainer.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(-0.30217, -0.04452))
val interceptR = 0.0
val devianceResidualsR = Array(1.273, -1.437, 2.533, -1.556)
val pearsonResidualsR = Array(1.117731, -1.162962, 2.395838, -1.189005)
val workingResidualsR = Array(2.249324, -1.676240, 2.913346, -1.353433)
val responseResidualsR = Array(0.5554219, -0.4034267, 0.6567520, -0.2611382)
val seCoefR = Array(0.46242, 0.37124)
val tValsR = Array(-0.653, -0.120)
val pValsR = Array(0.513, 0.905)
val dispersionR = 1.0
val nullDevianceR = 13.863
val residualDevianceR = 12.524
val residualDegreeOfFreedomNullR = 4
val residualDegreeOfFreedomR = 2
val aicR = 16.524
val summary = model.summary
val devianceResiduals = summary.residuals()
.select(col("devianceResiduals"))
.collect()
.map(_.getDouble(0))
val pearsonResiduals = summary.residuals("pearson")
.select(col("pearsonResiduals"))
.collect()
.map(_.getDouble(0))
val workingResiduals = summary.residuals("working")
.select(col("workingResiduals"))
.collect()
.map(_.getDouble(0))
val responseResiduals = summary.residuals("response")
.select(col("responseResiduals"))
.collect()
.map(_.getDouble(0))
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
pearsonResiduals.zip(pearsonResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
workingResiduals.zip(workingResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
responseResiduals.zip(responseResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
assert(summary.dispersion ~== dispersionR absTol 1E-3)
assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3)
assert(summary.deviance ~== residualDevianceR absTol 1E-3)
assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR)
assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR)
assert(summary.aic ~== aicR absTol 1E-3)
assert(summary.solver === "irls")
}
test("glm summary: poisson family with weight") {
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b <- c(2, 8, 3, 9)
w <- c(1, 2, 3, 4)
df <- as.data.frame(cbind(A, b))
*/
val datasetWithWeight = spark.createDataFrame(sc.parallelize(Seq(
Instance(2.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(8.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(3.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(9.0, 4.0, Vectors.dense(3.0, 13.0))
), 2))
/*
R code:
model <- glm(formula = "b ~ .", family="poisson", data = df, weights = w)
summary(model)
Deviance Residuals:
1 2 3 4
-0.28952 0.11048 0.14839 -0.07268
Coefficients:
Estimate Std. Error z value Pr(>|z|)
(Intercept) 6.2999 1.6086 3.916 8.99e-05 ***
V1 3.3241 1.0184 3.264 0.00110 **
V2 -1.0818 0.3522 -3.071 0.00213 **
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
(Dispersion parameter for poisson family taken to be 1)
Null deviance: 15.38066 on 3 degrees of freedom
Residual deviance: 0.12333 on 1 degrees of freedom
AIC: 41.803
Number of Fisher Scoring iterations: 3
residuals(model, type="pearson")
1 2 3 4
-0.28043145 0.11099310 0.14963714 -0.07253611
residuals(model, type="working")
1 2 3 4
-0.17960679 0.02813593 0.05113852 -0.01201650
residuals(model, type="response")
1 2 3 4
-0.4378554 0.2189277 0.1459518 -0.1094638
*/
val trainer = new GeneralizedLinearRegression()
.setFamily("poisson")
.setWeightCol("weight")
.setFitIntercept(true)
val model = trainer.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(3.3241, -1.0818))
val interceptR = 6.2999
val devianceResidualsR = Array(-0.28952, 0.11048, 0.14839, -0.07268)
val pearsonResidualsR = Array(-0.28043145, 0.11099310, 0.14963714, -0.07253611)
val workingResidualsR = Array(-0.17960679, 0.02813593, 0.05113852, -0.01201650)
val responseResidualsR = Array(-0.4378554, 0.2189277, 0.1459518, -0.1094638)
val seCoefR = Array(1.0184, 0.3522, 1.6086)
val tValsR = Array(3.264, -3.071, 3.916)
val pValsR = Array(0.00110, 0.00213, 0.00009)
val dispersionR = 1.0
val nullDevianceR = 15.38066
val residualDevianceR = 0.12333
val residualDegreeOfFreedomNullR = 3
val residualDegreeOfFreedomR = 1
val aicR = 41.803
val summary = model.summary
val devianceResiduals = summary.residuals()
.select(col("devianceResiduals"))
.collect()
.map(_.getDouble(0))
val pearsonResiduals = summary.residuals("pearson")
.select(col("pearsonResiduals"))
.collect()
.map(_.getDouble(0))
val workingResiduals = summary.residuals("working")
.select(col("workingResiduals"))
.collect()
.map(_.getDouble(0))
val responseResiduals = summary.residuals("response")
.select(col("responseResiduals"))
.collect()
.map(_.getDouble(0))
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
pearsonResiduals.zip(pearsonResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
workingResiduals.zip(workingResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
responseResiduals.zip(responseResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
assert(summary.dispersion ~== dispersionR absTol 1E-3)
assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3)
assert(summary.deviance ~== residualDevianceR absTol 1E-3)
assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR)
assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR)
assert(summary.aic ~== aicR absTol 1E-3)
assert(summary.solver === "irls")
}
test("glm summary: gamma family with weight") {
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b <- c(2, 8, 3, 9)
w <- c(1, 2, 3, 4)
df <- as.data.frame(cbind(A, b))
*/
val datasetWithWeight = spark.createDataFrame(sc.parallelize(Seq(
Instance(2.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(8.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(3.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(9.0, 4.0, Vectors.dense(3.0, 13.0))
), 2))
/*
R code:
model <- glm(formula = "b ~ .", family="Gamma", data = df, weights = w)
summary(model)
Deviance Residuals:
1 2 3 4
-0.26343 0.05761 0.12818 -0.03484
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.81511 0.23449 -3.476 0.178
V1 -0.72730 0.16137 -4.507 0.139
V2 0.23894 0.05481 4.359 0.144
(Dispersion parameter for Gamma family taken to be 0.07986091)
Null deviance: 2.937462 on 3 degrees of freedom
Residual deviance: 0.090358 on 1 degrees of freedom
AIC: 23.202
Number of Fisher Scoring iterations: 4
residuals(model, type="pearson")
1 2 3 4
-0.24082508 0.05839241 0.13135766 -0.03463621
residuals(model, type="working")
1 2 3 4
0.091414181 -0.005374314 -0.027196998 0.001890910
residuals(model, type="response")
1 2 3 4
-0.6344390 0.3172195 0.2114797 -0.1586097
*/
val trainer = new GeneralizedLinearRegression()
.setFamily("gamma")
.setWeightCol("weight")
val model = trainer.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(-0.72730, 0.23894))
val interceptR = -0.81511
val devianceResidualsR = Array(-0.26343, 0.05761, 0.12818, -0.03484)
val pearsonResidualsR = Array(-0.24082508, 0.05839241, 0.13135766, -0.03463621)
val workingResidualsR = Array(0.091414181, -0.005374314, -0.027196998, 0.001890910)
val responseResidualsR = Array(-0.6344390, 0.3172195, 0.2114797, -0.1586097)
val seCoefR = Array(0.16137, 0.05481, 0.23449)
val tValsR = Array(-4.507, 4.359, -3.476)
val pValsR = Array(0.139, 0.144, 0.178)
val dispersionR = 0.07986091
val nullDevianceR = 2.937462
val residualDevianceR = 0.090358
val residualDegreeOfFreedomNullR = 3
val residualDegreeOfFreedomR = 1
val aicR = 23.202
val summary = model.summary
val devianceResiduals = summary.residuals()
.select(col("devianceResiduals"))
.collect()
.map(_.getDouble(0))
val pearsonResiduals = summary.residuals("pearson")
.select(col("pearsonResiduals"))
.collect()
.map(_.getDouble(0))
val workingResiduals = summary.residuals("working")
.select(col("workingResiduals"))
.collect()
.map(_.getDouble(0))
val responseResiduals = summary.residuals("response")
.select(col("responseResiduals"))
.collect()
.map(_.getDouble(0))
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
pearsonResiduals.zip(pearsonResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
workingResiduals.zip(workingResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
responseResiduals.zip(responseResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
assert(summary.dispersion ~== dispersionR absTol 1E-3)
assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3)
assert(summary.deviance ~== residualDevianceR absTol 1E-3)
assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR)
assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR)
assert(summary.aic ~== aicR absTol 1E-3)
assert(summary.solver === "irls")
}
test("read/write") {
def checkModelData(
model: GeneralizedLinearRegressionModel,
model2: GeneralizedLinearRegressionModel): Unit = {
assert(model.intercept === model2.intercept)
assert(model.coefficients.toArray === model2.coefficients.toArray)
}
val glr = new GeneralizedLinearRegression()
testEstimatorAndModelReadWrite(glr, datasetPoissonLog,
GeneralizedLinearRegressionSuite.allParamSettings, checkModelData)
}
test("should support all NumericType labels and not support other types") {
val glr = new GeneralizedLinearRegression().setMaxIter(1)
MLTestingUtils.checkNumericTypes[
GeneralizedLinearRegressionModel, GeneralizedLinearRegression](
glr, spark, isClassification = false) { (expected, actual) =>
assert(expected.intercept === actual.intercept)
assert(expected.coefficients === actual.coefficients)
}
}
test("glm accepts Dataset[LabeledPoint]") {
val context = spark
import context.implicits._
new GeneralizedLinearRegression()
.setFamily("gaussian")
.fit(datasetGaussianIdentity.as[LabeledPoint])
}
test("evaluate with labels that are not doubles") {
// Evaulate with a dataset that contains Labels not as doubles to verify correct casting
val dataset = spark.createDataFrame(sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(19.0, 1.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 1.0, Vectors.dense(2.0, 11.0)),
Instance(29.0, 1.0, Vectors.dense(3.0, 13.0))
), 2))
val trainer = new GeneralizedLinearRegression()
.setMaxIter(1)
val model = trainer.fit(dataset)
assert(model.hasSummary)
val summary = model.summary
val longLabelDataset = dataset.select(col(model.getLabelCol).cast(FloatType),
col(model.getFeaturesCol))
val evalSummary = model.evaluate(longLabelDataset)
// The calculations below involve pattern matching with Label as a double
assert(evalSummary.nullDeviance === summary.nullDeviance)
assert(evalSummary.deviance === summary.deviance)
assert(evalSummary.aic === summary.aic)
}
}
object GeneralizedLinearRegressionSuite {
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allParamSettings: Map[String, Any] = Map(
"family" -> "poisson",
"link" -> "log",
"fitIntercept" -> true,
"maxIter" -> 2, // intentionally small
"tol" -> 0.8,
"regParam" -> 0.01,
"predictionCol" -> "myPrediction")
def generateGeneralizedLinearRegressionInput(
intercept: Double,
coefficients: Array[Double],
xMean: Array[Double],
xVariance: Array[Double],
nPoints: Int,
seed: Int,
noiseLevel: Double,
family: String,
link: String): Seq[LabeledPoint] = {
val rnd = new Random(seed)
def rndElement(i: Int) = {
(rnd.nextDouble() - 0.5) * math.sqrt(12.0 * xVariance(i)) + xMean(i)
}
val (generator, mean) = family match {
case "gaussian" => (new StandardNormalGenerator, 0.0)
case "poisson" => (new PoissonGenerator(1.0), 1.0)
case "gamma" => (new GammaGenerator(1.0, 1.0), 1.0)
}
generator.setSeed(seed)
(0 until nPoints).map { _ =>
val features = Vectors.dense(coefficients.indices.map(rndElement).toArray)
val eta = BLAS.dot(Vectors.dense(coefficients), features) + intercept
val mu = link match {
case "identity" => eta
case "log" => math.exp(eta)
case "sqrt" => math.pow(eta, 2.0)
case "inverse" => 1.0 / eta
}
val label = mu + noiseLevel * (generator.nextValue() - mean)
// Return LabeledPoints with DenseVector
LabeledPoint(label, features)
}
}
}
| gioenn/xSpark | mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala | Scala | apache-2.0 | 45,475 |
package gitbucket.core.controller
import gitbucket.core.settings.html
import gitbucket.core.model.WebHook
import gitbucket.core.service.{RepositoryService, AccountService, WebHookService, ProtectedBranchService, CommitStatusService}
import gitbucket.core.service.WebHookService._
import gitbucket.core.util._
import gitbucket.core.util.JGitUtil._
import gitbucket.core.util.ControlUtil._
import gitbucket.core.util.Implicits._
import gitbucket.core.util.Directory._
import io.github.gitbucket.scalatra.forms._
import org.apache.commons.io.FileUtils
import org.scalatra.i18n.Messages
import org.eclipse.jgit.api.Git
import org.eclipse.jgit.lib.Constants
import org.eclipse.jgit.lib.ObjectId
import gitbucket.core.model.WebHookContentType
class RepositorySettingsController extends RepositorySettingsControllerBase
with RepositoryService with AccountService with WebHookService with ProtectedBranchService with CommitStatusService
with OwnerAuthenticator with UsersAuthenticator
trait RepositorySettingsControllerBase extends ControllerBase {
self: RepositoryService with AccountService with WebHookService with ProtectedBranchService with CommitStatusService
with OwnerAuthenticator with UsersAuthenticator =>
// for repository options
case class OptionsForm(
repositoryName: String,
description: Option[String],
isPrivate: Boolean,
issuesOption: String,
externalIssuesUrl: Option[String],
wikiOption: String,
externalWikiUrl: Option[String],
allowFork: Boolean
)
val optionsForm = mapping(
"repositoryName" -> trim(label("Repository Name" , text(required, maxlength(100), identifier, renameRepositoryName))),
"description" -> trim(label("Description" , optional(text()))),
"isPrivate" -> trim(label("Repository Type" , boolean())),
"issuesOption" -> trim(label("Issues Option" , text(required, featureOption))),
"externalIssuesUrl" -> trim(label("External Issues URL", optional(text(maxlength(200))))),
"wikiOption" -> trim(label("Wiki Option" , text(required, featureOption))),
"externalWikiUrl" -> trim(label("External Wiki URL" , optional(text(maxlength(200))))),
"allowFork" -> trim(label("Allow Forking" , boolean()))
)(OptionsForm.apply)
// for default branch
case class DefaultBranchForm(defaultBranch: String)
val defaultBranchForm = mapping(
"defaultBranch" -> trim(label("Default Branch" , text(required, maxlength(100))))
)(DefaultBranchForm.apply)
// // for collaborator addition
// case class CollaboratorForm(userName: String)
//
// val collaboratorForm = mapping(
// "userName" -> trim(label("Username", text(required, collaborator)))
// )(CollaboratorForm.apply)
// for web hook url addition
case class WebHookForm(url: String, events: Set[WebHook.Event], ctype: WebHookContentType, token: Option[String])
def webHookForm(update:Boolean) = mapping(
"url" -> trim(label("url", text(required, webHook(update)))),
"events" -> webhookEvents,
"ctype" -> label("ctype", text()),
"token" -> optional(trim(label("token", text(maxlength(100)))))
)(
(url, events, ctype, token) => WebHookForm(url, events, WebHookContentType.valueOf(ctype), token)
)
// for transfer ownership
case class TransferOwnerShipForm(newOwner: String)
val transferForm = mapping(
"newOwner" -> trim(label("New owner", text(required, transferUser)))
)(TransferOwnerShipForm.apply)
/**
* Redirect to the Options page.
*/
get("/:owner/:repository/settings")(ownerOnly { repository =>
redirect(s"/${repository.owner}/${repository.name}/settings/options")
})
/**
* Display the Options page.
*/
get("/:owner/:repository/settings/options")(ownerOnly {
html.options(_, flash.get("info"))
})
/**
* Save the repository options.
*/
post("/:owner/:repository/settings/options", optionsForm)(ownerOnly { (form, repository) =>
saveRepositoryOptions(
repository.owner,
repository.name,
form.description,
repository.repository.parentUserName.map { _ =>
repository.repository.isPrivate
} getOrElse form.isPrivate,
form.issuesOption,
form.externalIssuesUrl,
form.wikiOption,
form.externalWikiUrl,
form.allowFork
)
// Change repository name
if(repository.name != form.repositoryName){
// Update database
renameRepository(repository.owner, repository.name, repository.owner, form.repositoryName)
// Move git repository
defining(getRepositoryDir(repository.owner, repository.name)){ dir =>
FileUtils.moveDirectory(dir, getRepositoryDir(repository.owner, form.repositoryName))
}
// Move wiki repository
defining(getWikiRepositoryDir(repository.owner, repository.name)){ dir =>
FileUtils.moveDirectory(dir, getWikiRepositoryDir(repository.owner, form.repositoryName))
}
}
flash += "info" -> "Repository settings has been updated."
redirect(s"/${repository.owner}/${form.repositoryName}/settings/options")
})
/** branch settings */
get("/:owner/:repository/settings/branches")(ownerOnly { repository =>
val protecteions = getProtectedBranchList(repository.owner, repository.name)
html.branches(repository, protecteions, flash.get("info"))
});
/** Update default branch */
post("/:owner/:repository/settings/update_default_branch", defaultBranchForm)(ownerOnly { (form, repository) =>
if(repository.branchList.find(_ == form.defaultBranch).isEmpty){
redirect(s"/${repository.owner}/${repository.name}/settings/options")
} else {
saveRepositoryDefaultBranch(repository.owner, repository.name, form.defaultBranch)
// Change repository HEAD
using(Git.open(getRepositoryDir(repository.owner, repository.name))) { git =>
git.getRepository.updateRef(Constants.HEAD, true).link(Constants.R_HEADS + form.defaultBranch)
}
flash += "info" -> "Repository default branch has been updated."
redirect(s"/${repository.owner}/${repository.name}/settings/branches")
}
})
/** Branch protection for branch */
get("/:owner/:repository/settings/branches/:branch")(ownerOnly { repository =>
import gitbucket.core.api._
val branch = params("branch")
if(repository.branchList.find(_ == branch).isEmpty){
redirect(s"/${repository.owner}/${repository.name}/settings/branches")
} else {
val protection = ApiBranchProtection(getProtectedBranchInfo(repository.owner, repository.name, branch))
val lastWeeks = getRecentStatuesContexts(repository.owner, repository.name, org.joda.time.LocalDateTime.now.minusWeeks(1).toDate).toSet
val knownContexts = (lastWeeks ++ protection.status.contexts).toSeq.sortBy(identity)
html.branchprotection(repository, branch, protection, knownContexts, flash.get("info"))
}
})
/**
* Display the Collaborators page.
*/
get("/:owner/:repository/settings/collaborators")(ownerOnly { repository =>
html.collaborators(
getCollaborators(repository.owner, repository.name),
getAccountByUserName(repository.owner).get.isGroupAccount,
repository)
})
post("/:owner/:repository/settings/collaborators")(ownerOnly { repository =>
val collaborators = params("collaborators")
removeCollaborators(repository.owner, repository.name)
collaborators.split(",").withFilter(_.nonEmpty).map { collaborator =>
val userName :: role :: Nil = collaborator.split(":").toList
addCollaborator(repository.owner, repository.name, userName, role)
}
redirect(s"/${repository.owner}/${repository.name}/settings/collaborators")
})
/**
* Display the web hook page.
*/
get("/:owner/:repository/settings/hooks")(ownerOnly { repository =>
html.hooks(getWebHooks(repository.owner, repository.name), repository, flash.get("info"))
})
/**
* Display the web hook edit page.
*/
get("/:owner/:repository/settings/hooks/new")(ownerOnly { repository =>
val webhook = WebHook(repository.owner, repository.name, "", WebHookContentType.FORM, None)
html.edithooks(webhook, Set(WebHook.Push), repository, flash.get("info"), true)
})
/**
* Add the web hook URL.
*/
post("/:owner/:repository/settings/hooks/new", webHookForm(false))(ownerOnly { (form, repository) =>
addWebHook(repository.owner, repository.name, form.url, form.events, form.ctype, form.token)
flash += "info" -> s"Webhook ${form.url} created"
redirect(s"/${repository.owner}/${repository.name}/settings/hooks")
})
/**
* Delete the web hook URL.
*/
get("/:owner/:repository/settings/hooks/delete")(ownerOnly { repository =>
deleteWebHook(repository.owner, repository.name, params("url"))
flash += "info" -> s"Webhook ${params("url")} deleted"
redirect(s"/${repository.owner}/${repository.name}/settings/hooks")
})
/**
* Send the test request to registered web hook URLs.
*/
ajaxPost("/:owner/:repository/settings/hooks/test")(ownerOnly { repository =>
def _headers(h: Array[org.apache.http.Header]): Array[Array[String]] = h.map { h => Array(h.getName, h.getValue) }
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent._
import scala.util.control.NonFatal
import org.apache.http.util.EntityUtils
import scala.concurrent.ExecutionContext.Implicits.global
val url = params("url")
val token = Some(params("token"))
val ctype = WebHookContentType.valueOf(params("ctype"))
val dummyWebHookInfo = WebHook(repository.owner, repository.name, url, ctype, token)
val dummyPayload = {
val ownerAccount = getAccountByUserName(repository.owner).get
val commits = if(JGitUtil.isEmpty(git)) List.empty else git.log
.add(git.getRepository.resolve(repository.repository.defaultBranch))
.setMaxCount(4)
.call.iterator.asScala.map(new CommitInfo(_)).toList
val pushedCommit = commits.drop(1)
WebHookPushPayload(
git = git,
sender = ownerAccount,
refName = "refs/heads/" + repository.repository.defaultBranch,
repositoryInfo = repository,
commits = pushedCommit,
repositoryOwner = ownerAccount,
oldId = commits.lastOption.map(_.id).map(ObjectId.fromString).getOrElse(ObjectId.zeroId()),
newId = commits.headOption.map(_.id).map(ObjectId.fromString).getOrElse(ObjectId.zeroId())
)
}
val (webHook, json, reqFuture, resFuture) = callWebHook(WebHook.Push, List(dummyWebHookInfo), dummyPayload).head
val toErrorMap: PartialFunction[Throwable, Map[String,String]] = {
case e: java.net.UnknownHostException => Map("error"-> ("Unknown host " + e.getMessage))
case e: java.lang.IllegalArgumentException => Map("error"-> ("invalid url"))
case e: org.apache.http.client.ClientProtocolException => Map("error"-> ("invalid url"))
case NonFatal(e) => Map("error"-> (e.getClass + " "+ e.getMessage))
}
contentType = formats("json")
org.json4s.jackson.Serialization.write(Map(
"url" -> url,
"request" -> Await.result(reqFuture.map(req => Map(
"headers" -> _headers(req.getAllHeaders),
"payload" -> json
)).recover(toErrorMap), 20 seconds),
"responce" -> Await.result(resFuture.map(res => Map(
"status" -> res.getStatusLine(),
"body" -> EntityUtils.toString(res.getEntity()),
"headers" -> _headers(res.getAllHeaders())
)).recover(toErrorMap), 20 seconds)
))
}
})
/**
* Display the web hook edit page.
*/
get("/:owner/:repository/settings/hooks/edit")(ownerOnly { repository =>
getWebHook(repository.owner, repository.name, params("url")).map{ case (webhook, events) =>
html.edithooks(webhook, events, repository, flash.get("info"), false)
} getOrElse NotFound()
})
/**
* Update web hook settings.
*/
post("/:owner/:repository/settings/hooks/edit", webHookForm(true))(ownerOnly { (form, repository) =>
updateWebHook(repository.owner, repository.name, form.url, form.events, form.ctype, form.token)
flash += "info" -> s"webhook ${form.url} updated"
redirect(s"/${repository.owner}/${repository.name}/settings/hooks")
})
/**
* Display the danger zone.
*/
get("/:owner/:repository/settings/danger")(ownerOnly {
html.danger(_, flash.get("info"))
})
/**
* Transfer repository ownership.
*/
post("/:owner/:repository/settings/transfer", transferForm)(ownerOnly { (form, repository) =>
// Change repository owner
if(repository.owner != form.newOwner){
LockUtil.lock(s"${repository.owner}/${repository.name}"){
// Update database
renameRepository(repository.owner, repository.name, form.newOwner, repository.name)
// Move git repository
defining(getRepositoryDir(repository.owner, repository.name)){ dir =>
FileUtils.moveDirectory(dir, getRepositoryDir(form.newOwner, repository.name))
}
// Move wiki repository
defining(getWikiRepositoryDir(repository.owner, repository.name)){ dir =>
FileUtils.moveDirectory(dir, getWikiRepositoryDir(form.newOwner, repository.name))
}
}
}
redirect(s"/${form.newOwner}/${repository.name}")
})
/**
* Delete the repository.
*/
post("/:owner/:repository/settings/delete")(ownerOnly { repository =>
LockUtil.lock(s"${repository.owner}/${repository.name}"){
deleteRepository(repository.owner, repository.name)
FileUtils.deleteDirectory(getRepositoryDir(repository.owner, repository.name))
FileUtils.deleteDirectory(getWikiRepositoryDir(repository.owner, repository.name))
FileUtils.deleteDirectory(getTemporaryDir(repository.owner, repository.name))
}
redirect(s"/${repository.owner}")
})
/**
* Run GC
*/
post("/:owner/:repository/settings/gc")(ownerOnly { repository =>
LockUtil.lock(s"${repository.owner}/${repository.name}") {
using(Git.open(getRepositoryDir(repository.owner, repository.name))) { git =>
git.gc();
}
}
flash += "info" -> "Garbage collection has been executed."
redirect(s"/${repository.owner}/${repository.name}/settings/danger")
})
/**
* Provides duplication check for web hook url.
*/
private def webHook(needExists: Boolean): Constraint = new Constraint(){
override def validate(name: String, value: String, messages: Messages): Option[String] =
if(getWebHook(params("owner"), params("repository"), value).isDefined != needExists){
Some(if(needExists){
"URL had not been registered yet."
} else {
"URL had been registered already."
})
} else {
None
}
}
private def webhookEvents = new ValueType[Set[WebHook.Event]]{
def convert(name: String, params: Map[String, String], messages: Messages): Set[WebHook.Event] = {
WebHook.Event.values.flatMap { t =>
params.get(name + "." + t.name).map(_ => t)
}.toSet
}
def validate(name: String, params: Map[String, String], messages: Messages): Seq[(String, String)] = if(convert(name,params,messages).isEmpty){
Seq(name -> messages("error.required").format(name))
} else {
Nil
}
}
// /**
// * Provides Constraint to validate the collaborator name.
// */
// private def collaborator: Constraint = new Constraint(){
// override def validate(name: String, value: String, messages: Messages): Option[String] =
// getAccountByUserName(value) match {
// case None => Some("User does not exist.")
//// case Some(x) if(x.isGroupAccount)
//// => Some("User does not exist.")
// case Some(x) if(x.userName == params("owner") || getCollaborators(params("owner"), params("repository")).contains(x.userName))
// => Some(value + " is repository owner.") // TODO also group members?
// case _ => None
// }
// }
/**
* Duplicate check for the rename repository name.
*/
private def renameRepositoryName: Constraint = new Constraint(){
override def validate(name: String, value: String, params: Map[String, String], messages: Messages): Option[String] =
params.get("repository").filter(_ != value).flatMap { _ =>
params.get("owner").flatMap { userName =>
getRepositoryNamesOfUser(userName).find(_ == value).map(_ => "Repository already exists.")
}
}
}
/**
*
*/
private def featureOption: Constraint = new Constraint(){
override def validate(name: String, value: String, params: Map[String, String], messages: Messages): Option[String] =
if(Seq("DISABLE", "PRIVATE", "PUBLIC", "ALL").contains(value)) None else Some("Option is invalid.")
}
/**
* Provides Constraint to validate the repository transfer user.
*/
private def transferUser: Constraint = new Constraint(){
override def validate(name: String, value: String, messages: Messages): Option[String] =
getAccountByUserName(value) match {
case None => Some("User does not exist.")
case Some(x) => if(x.userName == params("owner")){
Some("This is current repository owner.")
} else {
params.get("repository").flatMap { repositoryName =>
getRepositoryNamesOfUser(x.userName).find(_ == repositoryName).map{ _ => "User already has same repository." }
}
}
}
}
}
| zhoffice/gitbucket | src/main/scala/gitbucket/core/controller/RepositorySettingsController.scala | Scala | apache-2.0 | 17,700 |
package org.jetbrains.plugins.scala.dfa.lattice
trait HasBottom[+L] {
def bottom: L
}
trait HasBottomOps {
final def latticeBottom[L](implicit provider: HasBottom[L]): L = provider.bottom
}
object HasBottomOps extends HasBottomOps | JetBrains/intellij-scala | scala/dfa/src/org/jetbrains/plugins/scala/dfa/lattice/HasBottom.scala | Scala | apache-2.0 | 237 |
package ru.pavlenov.scala.homework.rosalind.spectrometry
import ru.pavlenov.scala.libs.peptide.AminoAcid
import ru.pavlenov.scala.utils.File
/**
* ⓭ + 14
* Какой сам? by Pavlenov Semen 15.07.14.
* Matching a Spectrum to a Protein
* http://rosalind.info/problems/prsm/
*
* Given:
* A positive integer n followed by a collection of n protein strings s1, s2, ..., sn and a multiset R of positive numbers (corresponding to the complete spectrum of some unknown protein string).
*
* Return:
* The maximum multiplicity of R⊖S[sk] taken over all strings sk, followed by the string sk for which this maximum multiplicity occurs (you may output any such value if multiple solutions exist).
*/
object Prsm {
def start() {
println("Matching a Spectrum to a Protein")
println("from http://rosalind.info/problems/prsm/")
println("==========================")
val data = File.fromData(this)
val n = data(0).toInt
val aa = new Array[AminoAcid](n)
val masses = new Array[Double](data.length-n-1)
for (i <- 1 to n) aa(i-1) = AminoAcid(data(i))
for (i <- n+1 until data.length ) masses(i-n-1) = data(i).toDouble
// println(n)
// println(aa.mkString(", "))
// println(masses.mkString(", "))
var max = 0
var str = ""
for (a <- aa) {
val cs = a.completeSpectrum()
val md = AminoAcid.MinkowskiDiff(masses, cs)
val m = md.groupBy(c => c.toUpperCase).map(e => (e._1, e._2.length)).maxBy(e => e._2)
if (m._2 >= max) { max = m._2; str = a.toString }
}
println(max)
println(str)
}
} | laser13/rosalind | src/scala/ru/pavlenov/scala/homework/rosalind/spectrometry/Prsm.scala | Scala | apache-2.0 | 1,591 |
package com.geishatokyo.tezcatlipoca.util
import collection.mutable.HashMap
import collection.mutable.SynchronizedMap
import java.lang.reflect.Modifier
/**
* Created with IntelliJ IDEA.
* User: takezou
* Date: 12/06/22
* Time: 1:43
* To change this template use File | Settings | File Templates.
*/
object PropFinder {
def listUpProps( clazz : Class[_]) : Seq[Prop] = {
val methods = clazz.getMethods
val maybeSetters = methods.withFilter(m => {
Modifier.isPublic(m.getModifiers) &&
(m.getName.endsWith("_$eq") && m.getParameterTypes.length == 1)
}).map(m => {
val name = m.getName
(name.substring(0,name.length - 4)+ ":" + m.getParameterTypes()(0).getName) -> m
}).toMap
methods.withFilter(m => {
Modifier.isPublic(m.getModifiers) &&
m.getParameterTypes().length == 0 &&
maybeSetters.contains(m.getName + ":" + m.getReturnType.getName)
}).map( getter => {
val name = getter.getName
val field = try{
// TODO if field is overrided ,search super class field.
Some(clazz.getDeclaredField(name))
}catch{
case e : NoSuchFieldException => {
None
}
}
Prop(getter,maybeSetters(name + ":" + getter.getReturnType.getName),field)
}).toSeq
}
}
| takezoux2/tezcatlipoca | src/main/scala/com/geishatokyo/tezcatlipoca/util/PropFinder.scala | Scala | mit | 1,293 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.text.escape
/** Escape javascript
* borrow from freemarker.template.utility.StringUtil
*/
object JavascriptEscaper {
private val NO_ESC = 0
private val ESC_HEXA = 1
private val ESC_BACKSLASH = 3
def escape(s: String, json: Boolean): String = {
val ln = s.length
var sb: StringBuilder = null
for (i <- 0 until ln) {
val c = s.charAt(i)
if (!(c > '>' && c < 0x7F && c != '\\') && c != ' ' && !(c >= 0xA0 && c < 0x2028)) { // skip common chars
val escapeType: Int =
if (c <= 0x1F) { // control chars range 1
c match {
case '\n' => 'n'
case '\r' => 'r'
case '\f' => 'f'
case '\b' => 'b'
case '\t' => 't'
case _ => ESC_HEXA
}
}
else if (c == '"') ESC_BACKSLASH
else if (c == '\'') if json then NO_ESC else ESC_BACKSLASH
else if (c == '\\') ESC_BACKSLASH
else if (c == '/' && (i == 0 || s.charAt(i - 1) == '<')) ESC_BACKSLASH
else if (c == '>') { // against "]]> and "-->"
var dangerous = false
if (i == 0) dangerous = true
else {
val prevC = s.charAt(i - 1)
if (prevC == ']' || prevC == '-') if (i == 1) dangerous = true
else {
val prevPrevC = s.charAt(i - 2)
dangerous = prevPrevC == prevC
}
else dangerous = false
}
if dangerous then (if json then ESC_HEXA else ESC_BACKSLASH) else NO_ESC
} else {
if (c == '<') { // against "<!"
var dangerous = false
if (i == ln - 1) dangerous = true
else {
val nextC = s.charAt(i + 1)
dangerous = nextC == '!' || nextC == '?'
}
if dangerous then ESC_HEXA else NO_ESC
} else if ((c >= 0x7F && c <= 0x9F) || (c == 0x2028 || c == 0x2029)) ESC_HEXA //control chars range 2 or UNICODE line terminators
else NO_ESC
}
if (escapeType != NO_ESC) { // If needs escaping
if (sb == null) {
sb = new StringBuilder(ln + 6)
sb.append(s.substring(0, i))
}
sb.append('\\')
if (escapeType > 0x20) sb.append(escapeType.toChar)
else if (escapeType == ESC_HEXA) if (!json && c < 0x100) {
sb.append('x')
sb.append(toHexDigit(c >> 4))
sb.append(toHexDigit(c & 0xF))
}
else {
sb.append('u')
val cp = c
sb.append(toHexDigit((cp >> 12) & 0xF))
sb.append(toHexDigit((cp >> 8) & 0xF))
sb.append(toHexDigit((cp >> 4) & 0xF))
sb.append(toHexDigit(cp & 0xF))
}
else sb.append(c) // escapeType == ESC_BACKSLASH
} else {
if (sb != null) sb.append(c)
}
} else {
if (sb != null) sb.append(c)
}
}
// for each characters}
if (sb == null) s else sb.toString
}
private def toHexDigit(d: Int): Char = (if d < 0xA then d + '0' else d - 0xA + 'A').asInstanceOf[Char]
}
| beangle/commons | text/src/main/scala/org/beangle/commons/text/escape/JavascriptEscaper.scala | Scala | lgpl-3.0 | 3,948 |
/*
* #%L
* Core runtime for OOXOO
* %%
* Copyright (C) 2006 - 2017 Open Design Flow
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package com.idyria.osi.ooxoo.core
import com.idyria.osi.ooxoo.core.buffers.structural.io.sax.StAXIOBuffer
import com.idyria.osi.ooxoo.core.buffers.structural.XList
import com.idyria.osi.ooxoo.core.buffers.structural.xelement
import com.idyria.osi.ooxoo.core.buffers.datatypes.XSDStringBuffer
import com.idyria.osi.ooxoo.core.buffers.structural.xattribute
import com.idyria.osi.ooxoo.core.buffers.structural.ElementBuffer
import org.scalatest.funsuite.AnyFunSuite
import java.io.ByteArrayOutputStream
class StreamOutTest extends AnyFunSuite {
@xelement
class TestRoot extends ElementBuffer {
// Base Fields
//---------------------
@xattribute
var attr1: XSDStringBuffer = null
@xattribute
var attr2: XSDStringBuffer = null
@xelement(name = "subStringMultiple")
var subStringMultiple: XList[XSDStringBuffer] = XList[XSDStringBuffer] { new XSDStringBuffer }
@xelement
var subSingle: TestRootSub = null
@xelement
var subMultiple = XList { new TestRootSub }
@xelement
var subStringSingle: XSDStringBuffer = null
var irrelevantField: XSDStringBuffer = null
}
@xelement()
class TestRootSub extends ElementBuffer {
@xattribute
var attr1: XSDStringBuffer = "Default Value"
}
@xelement
class LazyValTestRoot extends ElementBuffer {
// Base Fields
//---------------------
@xattribute
var a: XSDStringBuffer = null
@xattribute
var b: XSDStringBuffer = null
@xelement(name = "subStringMultiple")
var subStringMultiple: XList[XSDStringBuffer] = XList[XSDStringBuffer] { new XSDStringBuffer }
@xelement
lazy val c: TestRootSub = new TestRootSub
@xelement(name = "TestRootSubD")
lazy val d: TestRootSub = new TestRootSub
}
test("Stream out with lazy val") {
var root = new LazyValTestRoot
root.c
//-- Add IO
var outStream = new ByteArrayOutputStream
StAXIOBuffer.writeToOutputStream(root, outStream)
// Results
//---------------
assert(outStream.toByteArray().length > 0, "Data must not be empty")
println("Result: " + new String(outStream.toByteArray()))
}
test("Stream out a simple element") {
//-- Instanciate Root
var root = new TestRoot
root.attr1 = "test"
root.attr2 = "test"
root.subStringSingle = "test"
root.subStringMultiple += root.subStringMultiple.createBuffer
root.subStringMultiple.last.dataFromString("testM")
root.subStringMultiple += root.subStringMultiple.createBuffer
root.subStringMultiple.last.dataFromString("testM")
root.subStringMultiple += root.subStringMultiple.createBuffer
root.subStringMultiple.last.dataFromString("testM")
root.subSingle = new TestRootSub
root.subMultiple += new TestRootSub
root.subMultiple += new TestRootSub
//-- Add IO
var outStream = new ByteArrayOutputStream
StAXIOBuffer.writeToOutputStream(root, outStream)
// Results
//---------------
assert(outStream.toByteArray().length > 0, "Data must not be empty")
println("Result: " + new String(outStream.toByteArray()))
assert(root.getNextBuffer == null, "IO Buffer must have dissappeard")
}
@xelement(name = "ElementSimpleDataType")
class ElementSimpleDataType extends XSDStringBuffer with ElementBuffer {
@xattribute
var attr: XSDStringBuffer = null
}
test("Streamout with SimpleDataType as element") {
var elt = new ElementSimpleDataType
elt.data = "testelt"
elt.attr = "testattribute"
// Streamout
var outStream = new ByteArrayOutputStream
StAXIOBuffer.writeToOutputStream(elt, outStream)
println("Result: " + new String(outStream.toByteArray()))
assert(outStream.toByteArray().length > 0, "Data must not be empty")
assert(new String(outStream.toByteArray()).matches(".*<ElementSimpleDataType .+</ElementSimpleDataType>"))
}
}
| richnou/ooxoo-core | ooxoo-core/src/test/scala_old/com/idyria/osi/ooxoo/core/StreamOutTest.scala | Scala | agpl-3.0 | 4,655 |
package knub.master_thesis.welda
case class TopicModelInfo(alpha: Array[Double], beta: Double, betaSum: Double, numTopics: Int)
| knub/master-thesis | code/scala/src/main/scala/knub/master_thesis/welda/TopicModelInfo.scala | Scala | apache-2.0 | 129 |
package com.codacy.client.stash
import play.api.data.validation.ValidationError
object JsResultHelper {
def error(error: String): ValidationError = ValidationError(error)
}
| codacy/stash-scala-client | src/main/play_json_2.5-/com/codacy/client/stash/JsResultHelper.scala | Scala | apache-2.0 | 179 |
/* sbt -- Simple Build Tool
* Copyright 2009, 2010 Mark Harrah
*/
package sbt
package compiler
import xsbti.ArtifactInfo
import scala.util
import java.io.File
import CompilerArguments.{abs, absString, BootClasspathOption}
/** Forms the list of options that is passed to the compiler from the required inputs and other options.
* The directory containing scala-library.jar and scala-compiler.jar (scalaLibDirectory) is required in
* order to add these jars to the boot classpath. The 'scala.home' property must be unset because Scala
* puts jars in that directory on the bootclasspath. Because we use multiple Scala versions,
* this would lead to compiling against the wrong library jar.*/
final class CompilerArguments(scalaInstance: xsbti.compile.ScalaInstance, cp: xsbti.compile.ClasspathOptions)
{
def apply(sources: Seq[File], classpath: Seq[File], outputDirectory: Option[File], options: Seq[String]): Seq[String] =
{
checkScalaHomeUnset()
val cpWithCompiler = finishClasspath(classpath)
// Scala compiler's treatment of empty classpath is troublesome (as of 2.9.1).
// We append a random dummy element as workaround.
val dummy = "dummy_" + Integer.toHexString(util.Random.nextInt)
val classpathOption = Seq("-classpath", if(cpWithCompiler.isEmpty) dummy else absString(cpWithCompiler))
val outputOption = outputDirectory map {out => Seq("-d", out.getAbsolutePath)} getOrElse Seq()
options ++ outputOption ++ bootClasspathOption(hasLibrary(classpath)) ++ classpathOption ++ abs(sources)
}
def finishClasspath(classpath: Seq[File]): Seq[File] =
filterLibrary(classpath) ++ include(cp.compiler, scalaInstance.compilerJar) ++ include(cp.extra, scalaInstance.otherJars : _*)
private def include(flag: Boolean, jars: File*) = if(flag) jars else Nil
protected def abs(files: Seq[File]) = files.map(_.getAbsolutePath).sortWith(_ < _)
protected def checkScalaHomeUnset()
{
val scalaHome = System.getProperty("scala.home")
assert((scalaHome eq null) || scalaHome.isEmpty, "'scala.home' should not be set (was " + scalaHome + ")")
}
def createBootClasspathFor(classpath: Seq[File]) = createBootClasspath(hasLibrary(classpath))
/** Add the correct Scala library jar to the boot classpath if `addLibrary` is true.*/
def createBootClasspath(addLibrary: Boolean) =
{
val originalBoot = System.getProperty("sun.boot.class.path", "")
if(addLibrary)
{
val newBootPrefix = if(originalBoot.isEmpty) "" else originalBoot + File.pathSeparator
newBootPrefix + scalaInstance.libraryJar.getAbsolutePath
}
else
originalBoot
}
def filterLibrary(classpath: Seq[File]) = if(cp.filterLibrary) classpath filterNot isScalaLibrary else classpath
def hasLibrary(classpath: Seq[File]) = classpath exists isScalaLibrary
private[this] val isScalaLibrary: File => Boolean = file => {
val name = file.getName
(name contains ArtifactInfo.ScalaLibraryID) || file.getName == scalaInstance.libraryJar.getName
}
def bootClasspathOption(addLibrary: Boolean) = if(cp.autoBoot) Seq(BootClasspathOption, createBootClasspath(addLibrary)) else Nil
def bootClasspath(addLibrary: Boolean) = if(cp.autoBoot) IO.parseClasspath(createBootClasspath(addLibrary)) else Nil
def bootClasspathFor(classpath: Seq[File]) = bootClasspath(hasLibrary(classpath))
}
object CompilerArguments
{
val BootClasspathOption = "-bootclasspath"
def abs(files: Seq[File]): Seq[String] = files.map(_.getAbsolutePath)
def abs(files: Set[File]): Seq[String] = abs(files.toSeq)
def absString(files: Seq[File]): String = abs(files).mkString(File.pathSeparator)
def absString(files: Set[File]): String = absString(files.toSeq)
}
| jamesward/xsbt | compile/CompilerArguments.scala | Scala | bsd-3-clause | 3,634 |
package com.wbillingsley.handy
/**
* Indicates a type has a "kind" string.
*
* Primarily this is used where we have collections of slightly disparate objects.
* For example, a course might contain many different kinds of assessment item. Although at
* runtime, we can use the type of the object to determine what it is, when we receive it in
* serialised form (from a database, or from JSON) we need to know what kind it is in order
* to know what method to call to recreate the object.
*
* The simple solution I often use is to put it in a string -- that way it can easily be
* inspected from the JSON or the object alike.
*/
trait HasKind {
def kind: String
}
object EmptyKind extends HasKind {
val kind = "empty"
}
case class FaultyKind(t:Throwable) extends HasKind {
val kind = "faulty"
}
| wbillingsley/handy | handy/src/main/scala/com/wbillingsley/handy/HasKind.scala | Scala | mit | 813 |
package com.airport
/**
* The aircraft protocol used for interaction with aircraft modeled as an actor domain
* with messaging as the communication.
*/
object AircraftProtocol {
sealed trait AircraftProtocolMessage
final case class ChangeAltitude(altitude: Double) extends AircraftProtocolMessage
final case class ChangeSpeed(speed: Double) extends AircraftProtocolMessage
final case class ChangeHeading(heading: Double) extends AircraftProtocolMessage
final case class BoardPassenger(passenger: Passenger) extends AircraftProtocolMessage
final case class AddWeather(weather: Weather) extends AircraftProtocolMessage
final case object OK
}
| ironfish/reactive-application-development-scala | chapter5_001_airport/src/main/scala/com/airport/bAircraftProtocol.scala | Scala | apache-2.0 | 658 |
package abeel.genometools.faq
import java.util.Properties
import java.io.File
import atk.util.Tool
import net.sf.samtools.SAMFileReader
import scala.collection.JavaConversions._
import be.abeel.util.FrequencyMap
import be.abeel.util.FrequencyMapUtils
import abeel.genometools.Main
import java.util.HashMap
import atk.compbio.fastq.FastQFile
import atk.compbio.DNAString
import atk.compbio.fastq.FastAFile
import java.io.PrintWriter
import atk.io.NixWriter
object Faq2GC extends Main {
case class Config(val inputFile: File = null, val outputFile: File = null, val fastq: Boolean = false, val window: Int = 1000)
override val description = "Tool to draw GC statistics from fasta or fastq file."
override val version = """
2016/10/04 Initial version included in genometools
"""
override def main(args: Array[String]) {
val parser = new scopt.OptionParser[Config]("java -jar genometools.jar faq2gc") {
opt[File]('i', "input") required () action { (x, c) => c.copy(inputFile = x) } text ("Input file. By default FASTA formatted. If you have a FASTQ, use the --fq flag")
opt[Unit]("fq") action { (x, c) => c.copy(fastq = true) } text ("If you have a FASTQ file, use this flag")
opt[File]('o', "output") action { (x, c) => c.copy(outputFile = x) } text ("File where you want the output to be written")
opt[Int]('w', "window") action { (x, c) => c.copy(window = x) } text ("Window length, default = " + new Config().window)
}
parser.parse(args, Config()) map { config =>
assume(config.inputFile != null)
processFile(config)
}
}
private def processFile(config: Config) {
val gcIt = if (config.fastq)
FastQFile(config.inputFile).map(fr => fr.seq).map(seq => seq.grouped(config.window))
else
FastAFile(config.inputFile).map(fr => fr.seq).map(seq => seq.grouped(config.window))
val map = scala.collection.mutable.Map[Long, Int]().withDefaultValue(0)
for (sq <- gcIt.flatten) {
if (sq.size == config.window) {
val nt=sq.toUpperCase().groupBy(identity).mapValues { _.size }
val gc=nt.getOrElse('C', 0)+nt.getOrElse('G', 0)
val at=nt.getOrElse('A', 0)+nt.getOrElse('T', 0)
if(at+gc == config.window){
val fract=math.round((gc*100.0)/(at+gc))
map(fract) +=1
}else{
}
}
}
val pw = if(config.outputFile!=null) new NixWriter(config.outputFile, config) else new NixWriter(config.inputFile+".gc",config)
map.toList.sortBy(_._1).map { case (x, y) => pw.println(x + "\\t" + y) }
pw.close
}
} | AbeelLab/genometools | scala/abeel/genometools/faq/Faq2GC.scala | Scala | gpl-3.0 | 2,632 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs.concurrent
import java.util.concurrent.atomic.AtomicBoolean
import akka.Done
import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.actor.CoordinatedShutdown._
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import com.typesafe.config.ConfigValueFactory
import org.specs2.mutable.Specification
import play.api.inject.DefaultApplicationLifecycle
import play.api.internal.libs.concurrent.CoordinatedShutdownSupport
import play.api.Configuration
import play.api.Environment
import play.api.PlayException
import scala.concurrent.Await
import scala.concurrent.Future
import scala.concurrent.duration._
class ActorSystemProviderSpec extends Specification {
val akkaMaxDuration = (Int.MaxValue / 1000).seconds
val akkaTimeoutKey = "akka.coordinated-shutdown.phases.actor-system-terminate.timeout"
val playTimeoutKey = "play.akka.shutdown-timeout"
val akkaExitJvmKey = "akka.coordinated-shutdown.exit-jvm"
"ActorSystemProvider" should {
s"use '$playTimeoutKey'" in {
testTimeout(s"$playTimeoutKey = 12s", 12.seconds)
}
s"use Akka's max duration if '$playTimeoutKey = null' " in {
testTimeout(s"$playTimeoutKey = null", akkaMaxDuration)
}
s"use Akka's max duration when no '$playTimeoutKey' is defined, ignoring '$akkaTimeoutKey'" in {
testTimeout(s"$akkaTimeoutKey = 21s", akkaMaxDuration)
}
s"use Akka's max duration when '$playTimeoutKey = null', ignoring '$akkaTimeoutKey'" in {
testTimeout(s"$playTimeoutKey = null\\n$akkaTimeoutKey = 17s", akkaMaxDuration)
}
s"fail to start if '$akkaExitJvmKey = on'" in {
withConfiguration { config =>
ConfigFactory.parseString(s"$akkaExitJvmKey = on").withFallback(config)
}(identity) must throwA[PlayException]
}
s"start as expected if '$akkaExitJvmKey = off'" in {
withConfiguration { config =>
ConfigFactory.parseString(s"$akkaExitJvmKey = off").withFallback(config)
} { actorSystem =>
actorSystem.dispatcher must not beNull
}
}
s"start as expected with the default configuration for $akkaExitJvmKey" in {
withConfiguration(identity) { actorSystem =>
actorSystem.dispatcher must not beNull
}
}
"run all the phases for coordinated shutdown" in {
// The default phases of Akka CoordinatedShutdown are ordered as a DAG by defining the
// dependencies between the phases. That means we don't need to test each phase, but
// just the first and the last one. We are then adding a custom phase so that we
// can assert that Play is correctly executing CoordinatedShutdown.
// First phase is PhaseBeforeServiceUnbind
val phaseBeforeServiceUnbindExecuted = new AtomicBoolean(false)
// Last phase is PhaseActorSystemTerminate
val phaseActorSystemTerminateExecuted = new AtomicBoolean(false)
val config = Configuration
.load(Environment.simple())
.underlying
// Add a custom phase which executes after the last one defined by Akka.
.withValue(
"akka.coordinated-shutdown.phases.custom-defined-phase.depends-on",
ConfigValueFactory.fromIterable(java.util.Arrays.asList("actor-system-terminate"))
)
// Custom phase CustomDefinedPhase
val PhaseCustomDefinedPhase = "custom-defined-phase"
val phaseCustomDefinedPhaseExecuted = new AtomicBoolean(false)
val actorSystem = ActorSystemProvider.start(getClass.getClassLoader, Configuration(config))
val cs = new CoordinatedShutdownProvider(actorSystem, new DefaultApplicationLifecycle()).get
def run(atomicBoolean: AtomicBoolean) = () => {
atomicBoolean.set(true)
Future.successful(Done)
}
cs.addTask(PhaseBeforeServiceUnbind, "test-BeforeServiceUnbindExecuted")(run(phaseBeforeServiceUnbindExecuted))
cs.addTask(PhaseActorSystemTerminate, "test-ActorSystemTerminateExecuted")(run(phaseActorSystemTerminateExecuted))
cs.addTask(PhaseCustomDefinedPhase, "test-PhaseCustomDefinedPhaseExecuted")(run(phaseCustomDefinedPhaseExecuted))
CoordinatedShutdownSupport.syncShutdown(actorSystem, CoordinatedShutdown.UnknownReason)
phaseBeforeServiceUnbindExecuted.get() must equalTo(true)
phaseActorSystemTerminateExecuted.get() must equalTo(true)
phaseCustomDefinedPhaseExecuted.get() must equalTo(true)
}
}
private def withConfiguration[T](reconfigure: Config => Config)(block: ActorSystem => T): T = {
val config = reconfigure(Configuration.load(Environment.simple()).underlying)
val actorSystem = ActorSystemProvider.start(getClass.getClassLoader, Configuration(config))
try block(actorSystem)
finally {
Await.ready(CoordinatedShutdown(actorSystem).run(CoordinatedShutdown.UnknownReason), 5.seconds)
}
}
private def testTimeout(configString: String, expected: Duration) = {
withConfiguration { config =>
config.withoutPath(playTimeoutKey).withFallback(ConfigFactory.parseString(configString))
} { actorSystem =>
val akkaTimeout = actorSystem.settings.config.getDuration(akkaTimeoutKey)
Duration.fromNanos(akkaTimeout.toNanos) must equalTo(expected)
}
}
}
| benmccann/playframework | core/play/src/test/scala/play/api/libs/concurrent/ActorSystemProviderSpec.scala | Scala | apache-2.0 | 5,346 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.persistence.cassandra
import akka.actor.ActorSystem
import akka.event.Logging
import akka.persistence.cassandra.session.CassandraSessionSettings
import akka.stream.scaladsl
import akka.Done
import akka.NotUsed
import com.datastax.driver.core._
import com.lightbend.lagom.internal.persistence.cassandra.CassandraKeyspaceConfig
import com.lightbend.lagom.internal.persistence.cassandra.CassandraReadSideSessionProvider
import scala.annotation.varargs
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
/**
* Data Access Object for Cassandra. The statements are expressed in
* <a href="http://docs.datastax.com/en/cql/3.3/cql/cqlIntro.html">Cassandra Query Language</a>
* (CQL) syntax.
*
* The configured keyspace is automatically created if it doesn't already exists. The keyspace
* is also set as the current keyspace, i.e. it doesn't have to be qualified in the statements.
*
* All methods are non-blocking.
*/
final class CassandraSession(
system: ActorSystem,
settings: CassandraSessionSettings,
executionContext: ExecutionContext
) {
def this(system: ActorSystem) =
this(
system,
settings = CassandraSessionSettings(
system.settings.config.getConfig(
"lagom.persistence.read-side.cassandra"
)
),
executionContext = system.dispatchers.lookup(
system.settings.config.getString(
"lagom.persistence.read-side.use-dispatcher"
)
)
)
private val log = Logging.getLogger(system, getClass)
CassandraKeyspaceConfig.validateKeyspace("lagom.persistence.read-side.cassandra", system.settings.config, log)
/**
* Internal API
*/
private[lagom] val delegate: akka.persistence.cassandra.session.scaladsl.CassandraSession =
CassandraReadSideSessionProvider(system, settings, executionContext)
/**
* The `Session` of the underlying
* <a href="http://datastax.github.io/java-driver/">Datastax Java Driver</a>.
* Can be used in case you need to do something that is not provided by the
* API exposed by this class. Be careful to not use blocking calls.
*/
def underlying(): Future[Session] =
delegate.underlying()
/**
* See <a href="http://docs.datastax.com/en/cql/3.3/cql/cql_using/useCreateTableTOC.html">Creating a table</a>.
*
* The returned `CompletionStage` is completed when the table has been created,
* or if the statement fails.
*/
def executeCreateTable(stmt: String): Future[Done] =
delegate.executeCreateTable(stmt)
/**
* Create a `PreparedStatement` that can be bound and used in
* `executeWrite` or `select` multiple times.
*/
def prepare(stmt: String): Future[PreparedStatement] =
delegate.prepare(stmt)
/**
* Execute several statements in a batch. First you must [[#prepare]] the
* statements and bind its parameters.
*
* See <a href="http://docs.datastax.com/en/cql/3.3/cql/cql_using/useBatchTOC.html">Batching data insertion and updates</a>.
*
* The configured write consistency level is used if a specific consistency
* level has not been set on the `BatchStatement`.
*
* The returned `CompletionStage` is completed when the batch has been
* successfully executed, or if it fails.
*/
def executeWriteBatch(batch: BatchStatement): Future[Done] =
delegate.executeWriteBatch(batch)
/**
* Execute one statement. First you must [[#prepare]] the
* statement and bind its parameters.
*
* See <a href="http://docs.datastax.com/en/cql/3.3/cql/cql_using/useInsertDataTOC.html">Inserting and updating data</a>.
*
* The configured write consistency level is used if a specific consistency
* level has not been set on the `Statement`.
*
* The returned `CompletionStage` is completed when the statement has been
* successfully executed, or if it fails.
*/
def executeWrite(stmt: Statement): Future[Done] =
delegate.executeWrite(stmt)
/**
* Prepare, bind and execute one statement in one go.
*
* See <a href="http://docs.datastax.com/en/cql/3.3/cql/cql_using/useInsertDataTOC.html">Inserting and updating data</a>.
*
* The configured write consistency level is used.
*
* The returned `CompletionStage` is completed when the statement has been
* successfully executed, or if it fails.
*/
@varargs
def executeWrite(stmt: String, bindValues: AnyRef*): Future[Done] =
delegate.executeWrite(stmt, bindValues: _*)
/**
* Execute a select statement. First you must [[#prepare]] the
* statement and bind its parameters.
*
* See <a href="http://docs.datastax.com/en/cql/3.3/cql/cql_using/useQueryDataTOC.html">Querying tables</a>.
*
* The configured read consistency level is used if a specific consistency
* level has not been set on the `Statement`.
*
* You can return this `Source` as a response in a `ServiceCall`
* and the elements will be streamed to the client.
* Otherwise you have to connect a `Sink` that consumes the messages from
* this `Source` and then `run` the stream.
*/
def select(stmt: Statement): scaladsl.Source[Row, NotUsed] =
delegate.select(stmt)
/**
* Prepare, bind and execute a select statement in one go.
*
* See <a href="http://docs.datastax.com/en/cql/3.3/cql/cql_using/useQueryDataTOC.html">Querying tables</a>.
*
* The configured read consistency level is used.
*
* You can return this `Source` as a response in a `ServiceCall`
* and the elements will be streamed to the client.
* Otherwise you have to connect a `Sink` that consumes the messages from
* this `Source` and then `run` the stream.
*/
@varargs
def select(stmt: String, bindValues: AnyRef*): scaladsl.Source[Row, NotUsed] =
delegate.select(stmt, bindValues: _*)
/**
* Execute a select statement. First you must [[#prepare]] the statement and
* bind its parameters. Only use this method when you know that the result
* is small, e.g. includes a `LIMIT` clause. Otherwise you should use the
* `select` method that returns a `Source`.
*
* The configured read consistency level is used if a specific consistency
* level has not been set on the `Statement`.
*
* The returned `CompletionStage` is completed with the found rows.
*/
def selectAll(stmt: Statement): Future[Seq[Row]] =
delegate.selectAll(stmt)
/**
* Prepare, bind and execute a select statement in one go. Only use this method
* when you know that the result is small, e.g. includes a `LIMIT` clause.
* Otherwise you should use the `select` method that returns a `Source`.
*
* The configured read consistency level is used.
*
* The returned `CompletionStage` is completed with the found rows.
*/
@varargs
def selectAll(stmt: String, bindValues: AnyRef*): Future[Seq[Row]] =
delegate.selectAll(stmt, bindValues: _*)
/**
* Execute a select statement that returns one row. First you must [[#prepare]] the
* statement and bind its parameters.
*
* The configured read consistency level is used if a specific consistency
* level has not been set on the `Statement`.
*
* The returned `CompletionStage` is completed with the first row,
* if any.
*/
def selectOne(stmt: Statement): Future[Option[Row]] =
delegate.selectOne(stmt)
/**
* Prepare, bind and execute a select statement that returns one row.
*
* The configured read consistency level is used.
*
* The returned `CompletionStage` is completed with the first row,
* if any.
*/
@varargs
def selectOne(stmt: String, bindValues: AnyRef*): Future[Option[Row]] =
delegate.selectOne(stmt, bindValues: _*)
}
| lagom/lagom | persistence-cassandra/scaladsl/src/main/scala/com/lightbend/lagom/scaladsl/persistence/cassandra/CassandraSession.scala | Scala | apache-2.0 | 7,762 |
package com.twitter.scrooge.testutil
import org.junit.runner.RunWith
import org.scalatest.fixture.{WordSpec => FixtureWordSpec}
import org.scalatestplus.junit.JUnitRunner
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.jmock.JMockCycleFixture
import org.scalatestplus.mockito.MockitoSugar
@RunWith(classOf[JUnitRunner])
abstract class Spec extends AnyWordSpec with Matchers with MockitoSugar
@RunWith(classOf[JUnitRunner])
abstract class JMockSpec extends FixtureWordSpec with Matchers with JMockCycleFixture
| twitter/scrooge | scrooge-generator-tests/src/test/scala/com/twitter/scrooge/testutil/Spec.scala | Scala | apache-2.0 | 575 |
package com.hyenawarrior.OldNorseGrammar.grammar.morphology
import com.hyenawarrior.OldNorseGrammar.grammar.morphophonology.ProductiveTransforms.SieversLaw
import org.junit.Assert.assertEquals
import org.junit.Test
/**
* Created by HyenaWarrior on 2018.03.18..
*/
class SieversLawTest {
@Test
def testRestore(): Unit = {
val str = SieversLaw restore "ríkj"
assertEquals("ríkij", str.get)
}
@Test
def testRestore2(): Unit = assertEquals(None, SieversLaw restore "ríkij")
@Test
def testRestore3(): Unit = assertEquals(None, SieversLaw restore "ríkija")
@Test
def testRestore4(): Unit = {
val str = SieversLaw restore "ríkja"
assertEquals("ríkija", str.get)
}
}
| HyenaSoftware/IG-Dictionary | OldNorseGrammarEngine/src/test/scala/com/hyenawarrior/OldNorseGrammar/grammar/morphology/SieversLawTest.scala | Scala | lgpl-3.0 | 715 |
package net.acira.bexprp.visitors
import net.acira.bexprp.core._
class VariableBinder(variableBindings: Map[Variable, Boolean]) extends TraversingVisitor[Expression] {
private def bindIfPossible(expression: Expression): Expression = expression match {
case v: Variable => if (variableBindings.contains(v))BoundVariable(v.literal, variableBindings(v)) else v
case _ => expression.accept(this)
}
override def visitNot(expression: Not) = Not(bindIfPossible(expression.operand))
override def visitAnd(expression: And) = And(bindIfPossible(expression.left), bindIfPossible(expression.right))
override def visitOr(expression: Or) = Or(bindIfPossible(expression.left), bindIfPossible(expression.right))
override def visitImplication(expression: Implication) = Implication(bindIfPossible(expression.left), bindIfPossible(expression.right))
override def visitLeftImplication(expression: LeftImplication) = LeftImplication(bindIfPossible(expression.left), bindIfPossible(expression.right))
override def visitEquivalence(expression: Equivalence) = Equivalence(bindIfPossible(expression.left), bindIfPossible(expression.right))
override def visitConstant(expression: Constant) = expression
override def visitBoundVariable(expression: BoundVariable) = bindIfPossible(expression)
override def visitFreeVariable(expression: FreeVariable) = bindIfPossible(expression)
}
| leoschweizer/bexprp | src/main/scala/net/acira/bexprp/visitors/VariableBinder.scala | Scala | mit | 1,374 |
package com.sksamuel.elastic4s.requests.searches.queries
import com.sksamuel.elastic4s.ext.OptionImplicits._
import com.sksamuel.elastic4s.requests.searches.ScoreMode
case class HasChildQuery(`type`: String,
query: Query,
scoreMode: ScoreMode,
boost: Option[Double] = None,
ignoreUnmapped: Option[Boolean] = None,
innerHit: Option[InnerHit] = None,
minChildren: Option[Int] = None,
maxChildren: Option[Int] = None,
queryName: Option[String] = None)
extends Query {
def boost(boost: Double): HasChildQuery = copy(boost = Some(boost))
def ignoreUnmapped(ignoreUnmapped: Boolean): HasChildQuery = copy(ignoreUnmapped = Some(ignoreUnmapped))
def minMaxChildren(min: Int, max: Int): HasChildQuery = minChildren(min).maxChildren(max)
def minChildren(min: Int): HasChildQuery = copy(minChildren = min.some)
def maxChildren(max: Int): HasChildQuery = copy(maxChildren = max.some)
def innerHit(innerHit: InnerHit): HasChildQuery = copy(innerHit = innerHit.some)
// create an inner hit with the default options
def innerHit(name: String): HasChildQuery = copy(innerHit = Some(InnerHit(name)))
def queryName(queryName: String): HasChildQuery = copy(queryName = Some(queryName))
}
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/searches/queries/HasChildQuery.scala | Scala | apache-2.0 | 1,393 |
def orderByPositive(a: Array[Int]) = a.filter(_ > 0) ++ a.filter(_ <= 0)
val a = Array(11, 3, -10, 21, 22, -34, 0, -10, -40, 56, -32)
val result = orderByPositive(a)
println(a.mkString(", "))
println(result.mkString(", "))
| demiazz/scala-impatient | chapter-03/exercise-04/main.scala | Scala | unlicense | 225 |
package org.apache.bigtop.bigpetstore
import org.junit.Test
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest._
import scala.collection.mutable.Stack
@RunWith(classOf[JUnitRunner])
class ScalaTestSample extends FlatSpec with Matchers {
"This test" should "show an example of what we can do with the scala-test library" in {
val stack = new Stack[Int]
stack.push(1)
stack.push(2)
stack.pop() should be(2)
stack.pop() should be(1)
}
}
| mbukatov/bigtop | bigtop-bigpetstore/src/test/scala/org/apache/bigtop/bigpetstore/ScalaTestSample.scala | Scala | apache-2.0 | 489 |
import scala.reflect.ClassTag
object Test {
def main(args: Array[String]): Unit = {
def testArray[T: ClassTag](n: Int, elem: Int => T): Unit = {
val t: Int *: Tuple = 0 *: Tuple.fromArray(Array.tabulate(n)(elem))
println(t.tail)
}
for (i <- 0 to 25)
testArray(i, j => j)
println(Tuple1(1).tail)
println((1, 2).tail)
println((1, 2, 3).tail)
println((1, 2, 3, 4).tail)
println((1, 2, 3, 4, 5).tail)
println((1, 2, 3, 4, 5, 6).tail)
println((1, 2, 3, 4, 5, 6, 7).tail)
println((1, 2, 3, 4, 5, 6, 7, 8).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24).tail)
println((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25).tail)
println((1 *: Tuple()).tail)
println((1 *: 2 *: Tuple()).tail)
println((1 *: 2 *: 3 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: 22 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: 22 *: 23 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: 22 *: 23 *: 24 *: Tuple()).tail)
println((1 *: 2 *: 3 *: 4 *: 5 *: 6 *: 7 *: 8 *: 9 *: 10 *: 11 *: 12 *: 13 *: 14 *: 15 *: 16 *: 17 *: 18 *: 19 *: 20 *: 21 *: 22 *: 23 *: 24 *: 25 *: Tuple()).tail)
}
}
| dotty-staging/dotty | tests/run-deep-subtype/Tuple-tail.scala | Scala | apache-2.0 | 4,367 |
package com.twitter.finagle.http.util
import org.scalatest.funsuite.AnyFunSuite
class HeaderKeyOrderingTest extends AnyFunSuite {
test("keys are case insensitive") {
val key1 = "coffee"
val key2 = "COFFEE"
val key3 = "coFfEe"
// case difference ends up equal
assert(HeaderKeyOrdering.compare(key1, key2) == 0)
assert(HeaderKeyOrdering.compare(key2, key3) == 0)
assert(HeaderKeyOrdering.compare(key1, key3) == 0)
}
test("different keys are not equal") {
val key1 = "coffee"
val key2 = "covfefe"
assert(HeaderKeyOrdering.compare(key1, key2) != 0)
}
}
| twitter/finagle | finagle-base-http/src/test/scala/com/twitter/finagle/http/util/HeaderKeyOrderingTest.scala | Scala | apache-2.0 | 601 |
/*
* Copyright (c) 2014 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression
import scala.util.Failure
import scala.util.Success
import org.junit.Assert._
import org.junit.Test
import Names.Scala
class ExpressionManagerTest extends BaseIntegrationTest(ExpressionManagerTest) {
/**
* Executes code using [[org.scalaide.debug.internal.expression.ExpressionManager]] and checks result.
*
* @param code to compile and run
* @param expectedResult `Some(<string that should be returned>)` or `None` if error should be returned
* @param expectedError `Some(<string that should exist in result>)` or `None` if correct result should be returned
*/
protected final def withExpressionManager(code: String, expectedResult: Option[String], expectedError: Option[String]) = {
var result: Option[String] = None
var error: String = null
ExpressionManager.compute(code) match {
case SuccessWithValue(_, outputText) =>
result = Some(outputText)
case SuccessWithoutValue(outputText) =>
result = Some(outputText)
case EvaluationFailure(errorMessage) =>
error = errorMessage
}
expectedError.foreach(expected => assertTrue(s"'$error' does not contain '$expected'", error.contains(expected)))
assertEquals(expectedResult, result)
}
/**
* Executes condition using [[org.scalaide.debug.internal.expression.ExpressionManager]] and checks result.
* Differs from `withExpressionManager` in calling `ExpressionManager.evaluateCondition` instead of `ExpressionManager.compute`.
*
* @param code to compile and run
* @param expectedResult `Some(<boolean that should be returned>)` or `None` if error should be returned
* @param expectedError `Some(<string that should exist in result>)` or `None` if correct result should be returned
*/
private def evalConditionWithManager(code: String, expectedResult: Option[Boolean], expectedError: Option[String]): Unit = {
var result: Option[Boolean] = None
var error: String = null
val location = companion.session.currentStackFrame.stackFrame.location
val classPath = companion.session.debugTarget.classPath
val threadRef = companion.session.currentStackFrame.thread.threadRef
ExpressionManager.evaluateCondition(code, classPath, threadRef, location) match {
case Success(shouldStop) => result = Some(shouldStop)
case Failure(exception) => error = exception.getMessage
}
expectedError.foreach(expected => assertTrue(s"'$error' does not contain '$expected'", error.contains(expected)))
assertEquals(expectedResult, result)
}
@Test
def testDisplayNullResult(): Unit = withExpressionManager(
code = "null",
expectedError = None,
expectedResult = Some(s"${Scala.nullLiteral} (of type: ${Scala.nullType})"))
@Test
def testDisplayNullValue(): Unit = withExpressionManager(
code = "Libs.nullVal",
expectedError = None,
expectedResult = Some(s"${Scala.nullLiteral} (of type: ${Scala.nullType})"))
// If in this test we'd use function returning Unit, some other tests in this class would fail (they work when we run them separately).
// In this case there's error from compiler: <Cannot read source file> in scala.tools.nsc.transform.AddInterfaces$LazyImplClassType.implType$1(AddInterfaces.scala:190).
// And there's no such problem during real work with expression evaluator installed in Eclipse.
@Test
def testDisplayUnitResult(): Unit = withExpressionManager(
code = "print('a')",
expectedError = None,
expectedResult = Some(s"${Scala.unitLiteral} (of type: ${Scala.unitType})"))
@Test
def testDisplayIntResult(): Unit = withExpressionManager(
code = "int",
expectedError = None,
expectedResult = Some(s"${TestValues.ValuesTestCase.int} (of type: ${Names.Java.primitives.int})"))
@Test
def testDisplayEmptyExpressionError(): Unit = withExpressionManager(
code = "",
expectedError = Some("Expression is empty"),
expectedResult = None)
@Test
def testDisplayInvalidExpressionError(): Unit = withExpressionManager(
code = "1 === 2",
expectedError = Some(ExpressionException.reflectiveCompilationFailureMessage("")),
expectedResult = None)
@Test
def testDisplayInvalidConditionError(): Unit = evalConditionWithManager(
code = "1 === 2",
expectedError = Some(ExpressionException.reflectiveCompilationFailureMessage("")),
expectedResult = None)
@Test
def testDisplayInvalidExpressionErrorWithTypeIssue(): Unit = withExpressionManager(
code = "List.alaString",
expectedError = Some(ExpressionException.reflectiveCompilationFailureMessage("")),
expectedResult = None)
@Test
def testDisplayInvalidConditionErrorWithTypeIssue(): Unit = evalConditionWithManager(
code = "List.alaString",
expectedError = Some(ExpressionException.reflectiveCompilationFailureMessage("")),
expectedResult = None)
@Test
def testDisplayExceptionMessage(): Unit = withExpressionManager(
code = "Seq(1, 2, 3).apply(4)",
expectedError = Some("java.lang.IndexOutOfBoundsException: 4"),
expectedResult = None)
@Test
def testDisplayMessageForLambdaWithoutInferredTypeInCondition(): Unit = evalConditionWithManager(
code = "list.map(_ - 1)",
expectedError = Some(ExpressionException.noBooleanJdiProxyExceptionMessage("scala.collection.immutable.$colon$colon")),
expectedResult = None)
@Test
def testEqualsOnNonexistingField(): Unit = evalConditionWithManager(
code = "uuula == 1",
expectedError = Some(ExpressionException.notExistingField("uuula")),
expectedResult = None)
}
object ExpressionManagerTest extends BaseIntegrationTestCompanion
| scala-ide/scala-ide | org.scala-ide.sdt.debug.expression.tests/src/org/scalaide/debug/internal/expression/ExpressionManagerTest.scala | Scala | bsd-3-clause | 5,727 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package vta.shell
import chisel3._
import vta.util.config._
import vta.interface.axi._
import vta.core._
/** IntelShell.
*
* The IntelShell is based on a VME, VCR and core. This creates a complete VTA
* system that can be used for simulation or real hardware.
*/
class IntelShell(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val host = new AXIClient(p(ShellKey).hostParams)
val mem = new AXIMaster(p(ShellKey).memParams)
})
val vcr = Module(new VCR)
val vme = Module(new VME)
val core = Module(new Core)
core.io.vcr <> vcr.io.vcr
vme.io.vme <> core.io.vme
// vcr.io.host <> io.host
io.host.aw.ready := vcr.io.host.aw.ready
vcr.io.host.aw.valid := io.host.aw.valid
vcr.io.host.aw.bits.addr := io.host.aw.bits.addr
io.host.w.ready := vcr.io.host.w.ready
vcr.io.host.w.valid := io.host.w.valid
vcr.io.host.w.bits.data := io.host.w.bits.data
vcr.io.host.w.bits.strb := io.host.w.bits.strb
vcr.io.host.b.ready := io.host.b.ready
io.host.b.valid := vcr.io.host.b.valid
io.host.b.bits.resp := vcr.io.host.b.bits.resp
io.host.b.bits.id := io.host.w.bits.id
io.host.ar.ready := vcr.io.host.ar.ready
vcr.io.host.ar.valid := io.host.ar.valid
vcr.io.host.ar.bits.addr := io.host.ar.bits.addr
vcr.io.host.r.ready := io.host.r.ready
io.host.r.valid := vcr.io.host.r.valid
io.host.r.bits.data := vcr.io.host.r.bits.data
io.host.r.bits.resp := vcr.io.host.r.bits.resp
io.host.r.bits.id := io.host.ar.bits.id
io.host.b.bits.user <> DontCare
io.host.r.bits.user <> DontCare
io.host.r.bits.last := 1.U
io.mem <> vme.io.mem
}
| Huyuwei/tvm | vta/hardware/chisel/src/main/scala/shell/IntelShell.scala | Scala | apache-2.0 | 2,422 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala.batch.table.stringexpr
import org.apache.flink.api.scala._
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.api.Types._
import org.apache.flink.table.api.scala._
import org.junit._
class CastingStringExpressionTest {
@Test
def testNumericAutocastInArithmetic() {
val env = ExecutionEnvironment.getExecutionEnvironment
val tableEnv = TableEnvironment.getTableEnvironment(env)
val table = env.fromElements(
(1.toByte, 1.toShort, 1, 1L, 1.0f, 1.0d, 1L, 1001.1)).toTable(tableEnv)
val t1 = table.select('_1 + 1, '_2 + 1, '_3 + 1L, '_4 + 1.0f,
'_5 + 1.0d, '_6 + 1, '_7 + 1.0d, '_8 + '_1)
val t2 = table.select("_1 + 1, _2 +" +
" 1, _3 + 1L, _4 + 1.0f, _5 + 1.0d, _6 + 1, _7 + 1.0d, _8 + _1")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match", lPlan1, lPlan2)
}
@Test
@throws[Exception]
def testNumericAutocastInComparison() {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val tableEnv = TableEnvironment.getTableEnvironment(env)
val table = env.fromElements(
(1.toByte, 1.toShort, 1, 1L, 1.0f, 1.0d),
(2.toByte, 2.toShort, 2, 2L, 2.0f, 2.0d))
.toTable(tableEnv, 'a, 'b, 'c, 'd, 'e, 'f)
val t1 = table.filter('a > 1 && 'b > 1 && 'c > 1L &&
'd > 1.0f && 'e > 1.0d && 'f > 1)
val t2 = table
.filter("a > 1 && b > 1 && c > 1L && d > 1.0f && e > 1.0d && f > 1")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match", lPlan1, lPlan2)
}
@Test
@throws[Exception]
def testCasting() {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val tableEnv = TableEnvironment.getTableEnvironment(env)
val table = env.fromElements((1, 0.0, 1L, true)).toTable(tableEnv)
val t1 = table .select(
// * -> String
'_1.cast(STRING), '_2.cast(STRING), '_3.cast(STRING), '_4.cast(STRING),
// NUMERIC TYPE -> Boolean
'_1.cast(BOOLEAN), '_2.cast(BOOLEAN), '_3.cast(BOOLEAN),
// NUMERIC TYPE -> NUMERIC TYPE
'_1.cast(DOUBLE), '_2.cast(INT), '_3.cast(SHORT),
// Boolean -> NUMERIC TYPE
'_4.cast(DOUBLE), // identity casting
'_1.cast(INT), '_2.cast(DOUBLE), '_3.cast(LONG), '_4.cast(BOOLEAN))
val t2 = table.select(
// * -> String
"_1.cast(STRING), _2.cast(STRING), _3.cast(STRING), _4.cast(STRING)," +
// NUMERIC TYPE -> Boolean
"_1.cast(BOOLEAN), _2.cast(BOOLEAN), _3.cast(BOOLEAN)," +
// NUMERIC TYPE -> NUMERIC TYPE
"_1.cast(DOUBLE), _2.cast(INT), _3.cast(SHORT)," +
// Boolean -> NUMERIC TYPE
"_4.cast(DOUBLE)," +
// identity casting
"_1.cast(INT), _2.cast(DOUBLE), _3.cast(LONG), _4.cast(BOOLEAN)")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match", lPlan1, lPlan2)
}
@Test
@throws[Exception]
def testCastFromString() {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val tableEnv = TableEnvironment.getTableEnvironment(env)
val table = env.fromElements(("1", "true", "2.0")).toTable(tableEnv)
val t1 = table .select('_1.cast(BYTE), '_1.cast(SHORT), '_1.cast(INT), '_1.cast(LONG),
'_3.cast(DOUBLE), '_3.cast(FLOAT), '_2.cast(BOOLEAN))
val t2 = table.select(
"_1.cast(BYTE), _1.cast(SHORT), _1.cast(INT), _1.cast(LONG), " +
"_3.cast(DOUBLE), _3.cast(FLOAT), _2.cast(BOOLEAN)")
val lPlan1 = t1.logicalPlan
val lPlan2 = t2.logicalPlan
Assert.assertEquals("Logical Plans do not match", lPlan1, lPlan2)
}
}
| WangTaoTheTonic/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/scala/batch/table/stringexpr/CastingStringExpressionTest.scala | Scala | apache-2.0 | 4,573 |
package spire.algebra
package partial
import spire.util.Opt
/**
* A left partial action of a semigroupoid `G` on `P` is the implementation of
* a method `partialActl(g, p)`, or `g ?|+|> p` returning `Opt[P]`, such that:
*
* 1. for all `g`, `h` in `G`, `p` in `P` such that `g |+|? h` and `h ?|+|> p` are defined,
*
* `((g |+|? h).get ?|+|> p).get === (g ?|+|> (h ?|+|> p).get).get` with all operations
* defined.
*
* In addition, if `G` is a partial monoid, the following relation holds:
*
* 2. for all `g` in `G` and `p` in `P` such that `g ?|+|> p` is defined:
*
* `(g.rightId ?|+|> p).get === p`, the operation `?|+|>` being defined.
*/
trait LeftPartialAction[P, G] extends Any {
def actlIsDefined(g: G, p: P): Boolean = partialActl(g, p).nonEmpty
def partialActl(g: G, p: P): Opt[P]
}
object LeftPartialAction {
@inline final def apply[P, G](implicit G: LeftPartialAction[P, G]): LeftPartialAction[P, G] = G
implicit def fromLeftAction[P, G](implicit G: LeftAction[P, G]): LeftPartialAction[P, G] =
new LeftPartialAction[P, G] {
override def actlIsDefined(g: G, p: P): Boolean = true
def partialActl(g: G, p: P): Opt[P] = Opt(G.actl(g, p))
}
}
/**
* A right partial action of a semigroupoid `G` on `P` is the implementation of
* a method `partialActr(p, g)`, or `p <|+|? g` returning `Opt[P]`, such that:
*
* 1. for all `g`, `h` in `G`, `p` in `P` such that `g |+|? h` and `p <|+|? g` are defined,
*
* `(p <|+|? (g |+|? h).get).get === ((p <|+|? g).get |+|? h).get`,
* and all operations are defined.
*
* In addition, if `G` is a partial monoid, the following relation holds:
*
* 2. for all `g` in `G` and `p` in `P` such that `p <|+|? g` is defined:
*
* `(p <|+|? g.leftId).get === p`, the operation `<|+|?` being defined.
*/
trait RightPartialAction[P, G] extends Any {
def actrIsDefined(p: P, g: G): Boolean = partialActr(p, g).nonEmpty
def partialActr(p: P, g: G): Opt[P]
}
object RightPartialAction {
@inline final def apply[P, G](implicit G: RightPartialAction[P, G]): RightPartialAction[P, G] = G
implicit def fromRightAction[P, G](implicit G: RightAction[P, G]): RightPartialAction[P, G] =
new RightPartialAction[P, G] {
override def actrIsDefined(p: P, g: G): Boolean = true
def partialActr(p: P, g: G): Opt[P] = Opt(G.actr(p, g))
}
}
/**
* A partial action is the combination of left and right partial actions, providing:
*
* - a method `partialActl(g, p)`, or `g ?|+|> p` returning `Opt[P]`, such that:
*
* 1. for all `g`, `h` in `G`, `p` in `P` such that `g |+|? h` and `h ?|+|> p` are defined,
*
* `((g |+|? h).get ?|+|> p).get === (g ?|+|> (h ?|+|> p).get).get` with all operations
* defined.
*
* - a method `partialActr(p, g)`, or `p <|+|? g` returning `Opt[P]`, such that:
*
* 2. for all `g`, `h` in `G`, `p` in `P` such that `g |+|? h` and `p <|+|? g` are defined,
*
* `(p <|+|? (g |+|? h).get).get === ((p <|+|? g).get |+|? h).get`,
* and all operations are defined.
*
* In addition, if `G` is a groupoid, the following relations holds:
*
* 3. for all `g` in `G` and `p` in `P` such that `g ?|+|> p` is defined:
*
* `(g.rightId ?|+|> p).get === p`, the operation `?|+|>` being defined.
*
* 4. for all `g` in `G` and `p` in `P` such that `p <|+|? g` is defined:
*
* `(p <|+|? g.leftId).get === p`, the operation `<|+|?` being defined.
*
* 5. for all `g` in `G` and `p` in `P` such that `g ?|+|> p` is defined:
*
* `(g ?|+|> p).get === (p <|+|? g.inverse).get`
*
*/
trait PartialAction[P, G] extends Any
with LeftPartialAction[P, G] with RightPartialAction[P, G]
object PartialAction {
@inline final def apply[P, G](implicit G: PartialAction[P, G]): PartialAction[P, G] = G
implicit def fromAction[P, G](implicit G: Action[P, G]): PartialAction[P, G] =
new PartialAction[P, G] {
override def actlIsDefined(g: G, p: P): Boolean = true
def partialActl(g: G, p: P): Opt[P] = Opt(G.actl(g, p))
override def actrIsDefined(p: P, g: G): Boolean = true
def partialActr(p: P, g: G): Opt[P] = Opt(G.actr(p, g))
}
}
| woparry/spire | core/src/main/scala/spire/algebra/partial/PartialAction.scala | Scala | mit | 4,176 |
package monocle.function
import monocle.TestUtil._
import monocle.std._
import monocle.syntax._
import org.specs2.scalaz.{ScalazMatchers, Spec}
import scalaz.Tree._
import scalaz.{IMap, IList, OneAnd}
import scalaz.std.string._
class EachExample extends Spec with ScalazMatchers {
"Each can be used on Option" in {
(Some(3) applyTraversal each modify( _ + 1)) ==== Some(4)
((None : Option[Int]) applyTraversal each modify( _ + 1)) ==== None
}
"Each can be used on List, IList, Vector, Stream and OneAnd" in {
(List(1,2) applyTraversal each modify( _ + 1)) ==== List(2,3)
(IList(1,2) applyTraversal each modify( _ + 1)) ==== IList(2,3)
(Stream(1,2) applyTraversal each modify( _ + 1)) ==== Stream(2,3)
(Vector(1,2) applyTraversal each modify( _ + 1)) ==== Vector(2,3)
(OneAnd(1, List(2,3)) applyTraversal each modify( _ + 1)) ==== OneAnd(2, List(3,4))
}
"Each can be used on Map, IMap to update all values" in {
(Map("One" -> 1, "Two" -> 2) applyTraversal each modify( _ + 1)) ==== Map("One" -> 2, "Two" -> 3)
(IMap("One" -> 1, "Two" -> 2) applyTraversal each modify( _ + 1)) ==== IMap("One" -> 2, "Two" -> 3)
}
"Each can be used on tuple of same type" in {
((1, 2) applyTraversal each modify( _ + 1)) ==== ((2, 3))
((1, 2, 3) applyTraversal each modify( _ + 1)) ==== ((2, 3, 4))
((1, 2, 3, 4, 5, 6) applyTraversal each modify( _ + 1)) ==== ((2, 3, 4, 5, 6, 7))
}
"Each can be used on Tree" in {
node(1, Stream(leaf(2), leaf(3))) applyTraversal each modify( _ + 1) must equal (node(2, Stream(leaf(3), leaf(4))))
(node(1, Stream(leaf(2), leaf(3))) applyTraversal each getAll) ==== List(1,2,3)
}
}
| CapeSepias/Monocle | example/src/test/scala/monocle/function/EachExample.scala | Scala | mit | 1,719 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.io._
import java.net._
import java.nio._
import java.nio.channels._
import java.util.Random
import java.util.Properties
import collection.mutable.Map
import collection.mutable.ListBuffer
import org.I0Itec.zkclient.ZkClient
import kafka.server._
import kafka.producer._
import kafka.message._
import kafka.api._
import kafka.cluster.Broker
import kafka.consumer.ConsumerConfig
import kafka.serializer.{StringEncoder, DefaultEncoder, Encoder}
import kafka.common.TopicAndPartition
import kafka.admin.AdminUtils
import kafka.producer.ProducerConfig
import junit.framework.AssertionFailedError
import junit.framework.Assert._
import org.apache.kafka.clients.producer.KafkaProducer
/**
* Utility functions to help with testing
*/
object TestUtils extends Logging {
val IoTmpDir = System.getProperty("java.io.tmpdir")
val Letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
val Digits = "0123456789"
val LettersAndDigits = Letters + Digits
/* A consistent random number generator to make tests repeatable */
val seededRandom = new Random(192348092834L)
val random = new Random()
/**
* Choose a number of random available ports
*/
def choosePorts(count: Int): List[Int] = {
val sockets =
for(i <- 0 until count)
yield new ServerSocket(0)
val socketList = sockets.toList
val ports = socketList.map(_.getLocalPort)
socketList.map(_.close)
ports
}
/**
* Choose an available port
*/
def choosePort(): Int = choosePorts(1).head
/**
* Create a temporary directory
*/
def tempDir(): File = {
val f = new File(IoTmpDir, "kafka-" + random.nextInt(1000000))
f.mkdirs()
f.deleteOnExit()
f
}
def tempTopic(): String = "testTopic" + random.nextInt(1000000)
/**
* Create a temporary relative directory
*/
def tempRelativeDir(parent: String): File = {
val f = new File(parent, "kafka-" + random.nextInt(1000000))
f.mkdirs()
f.deleteOnExit()
f
}
/**
* Create a temporary file
*/
def tempFile(): File = {
val f = File.createTempFile("kafka", ".tmp")
f.deleteOnExit()
f
}
/**
* Create a temporary file and return an open file channel for this file
*/
def tempChannel(): FileChannel = new RandomAccessFile(tempFile(), "rw").getChannel()
/**
* Create a kafka server instance with appropriate test settings
* USING THIS IS A SIGN YOU ARE NOT WRITING A REAL UNIT TEST
* @param config The configuration of the server
*/
def createServer(config: KafkaConfig, time: Time = SystemTime): KafkaServer = {
val server = new KafkaServer(config, time)
server.startup()
server
}
/**
* Create a test config for the given node id
*/
def createBrokerConfigs(numConfigs: Int): List[Properties] = {
for((port, node) <- choosePorts(numConfigs).zipWithIndex)
yield createBrokerConfig(node, port)
}
def getBrokerListStrFromConfigs(configs: Seq[KafkaConfig]): String = {
configs.map(c => c.hostName + ":" + c.port).mkString(",")
}
/**
* Create a test config for the given node id
*/
def createBrokerConfig(nodeId: Int, port: Int = choosePort()): Properties = {
val props = new Properties
props.put("broker.id", nodeId.toString)
props.put("host.name", "localhost")
props.put("port", port.toString)
props.put("log.dir", TestUtils.tempDir().getAbsolutePath)
props.put("zookeeper.connect", TestZKUtils.zookeeperConnect)
props.put("replica.socket.timeout.ms", "1500")
props
}
/**
* Create a topic in zookeeper.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(zkClient: ZkClient, topic: String, numPartitions: Int = 1, replicationFactor: Int = 1,
servers: Seq[KafkaServer]) : scala.collection.immutable.Map[Int, Option[Int]] = {
// create topic
AdminUtils.createTopic(zkClient, topic, numPartitions, replicationFactor)
// wait until the update metadata request for new topic reaches all servers
(0 until numPartitions).map { case i =>
TestUtils.waitUntilMetadataIsPropagated(servers, topic, i)
i -> TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, i)
}.toMap
}
/**
* Create a topic in zookeeper using a customized replica assignment.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(zkClient: ZkClient, topic: String, partitionReplicaAssignment: collection.Map[Int, Seq[Int]],
servers: Seq[KafkaServer]) : scala.collection.immutable.Map[Int, Option[Int]] = {
// create topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, partitionReplicaAssignment)
// wait until the update metadata request for new topic reaches all servers
partitionReplicaAssignment.keySet.map { case i =>
TestUtils.waitUntilMetadataIsPropagated(servers, topic, i)
i -> TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, i)
}.toMap
}
/**
* Create a test config for a consumer
*/
def createConsumerProperties(zkConnect: String, groupId: String, consumerId: String,
consumerTimeout: Long = -1): Properties = {
val props = new Properties
props.put("zookeeper.connect", zkConnect)
props.put("group.id", groupId)
props.put("consumer.id", consumerId)
props.put("consumer.timeout.ms", consumerTimeout.toString)
props.put("zookeeper.session.timeout.ms", "6000")
props.put("zookeeper.sync.time.ms", "200")
props.put("auto.commit.interval.ms", "1000")
props.put("rebalance.max.retries", "4")
props.put("auto.offset.reset", "smallest")
props.put("num.consumer.fetchers", "2")
props
}
/**
* Wrap the message in a message set
* @param payload The bytes of the message
*/
def singleMessageSet(payload: Array[Byte], codec: CompressionCodec = NoCompressionCodec, key: Array[Byte] = null) =
new ByteBufferMessageSet(compressionCodec = codec, messages = new Message(payload, key))
/**
* Generate an array of random bytes
* @param numBytes The size of the array
*/
def randomBytes(numBytes: Int): Array[Byte] = {
val bytes = new Array[Byte](numBytes)
seededRandom.nextBytes(bytes)
bytes
}
/**
* Generate a random string of letters and digits of the given length
* @param len The length of the string
* @return The random string
*/
def randomString(len: Int): String = {
val b = new StringBuilder()
for(i <- 0 until len)
b.append(LettersAndDigits.charAt(seededRandom.nextInt(LettersAndDigits.length)))
b.toString
}
/**
* Check that the buffer content from buffer.position() to buffer.limit() is equal
*/
def checkEquals(b1: ByteBuffer, b2: ByteBuffer) {
assertEquals("Buffers should have equal length", b1.limit - b1.position, b2.limit - b2.position)
for(i <- 0 until b1.limit - b1.position)
assertEquals("byte " + i + " byte not equal.", b1.get(b1.position + i), b2.get(b1.position + i))
}
/**
* Throw an exception if the two iterators are of differing lengths or contain
* different messages on their Nth element
*/
def checkEquals[T](expected: Iterator[T], actual: Iterator[T]) {
var length = 0
while(expected.hasNext && actual.hasNext) {
length += 1
assertEquals(expected.next, actual.next)
}
// check if the expected iterator is longer
if (expected.hasNext) {
var length1 = length;
while (expected.hasNext) {
expected.next
length1 += 1
}
assertFalse("Iterators have uneven length-- first has more: "+length1 + " > " + length, true);
}
// check if the actual iterator was longer
if (actual.hasNext) {
var length2 = length;
while (actual.hasNext) {
actual.next
length2 += 1
}
assertFalse("Iterators have uneven length-- second has more: "+length2 + " > " + length, true);
}
}
/**
* Throw an exception if an iterable has different length than expected
*
*/
def checkLength[T](s1: Iterator[T], expectedLength:Int) {
var n = 0
while (s1.hasNext) {
n+=1
s1.next
}
assertEquals(expectedLength, n)
}
/**
* Throw an exception if the two iterators are of differing lengths or contain
* different messages on their Nth element
*/
def checkEquals[T](s1: java.util.Iterator[T], s2: java.util.Iterator[T]) {
while(s1.hasNext && s2.hasNext)
assertEquals(s1.next, s2.next)
assertFalse("Iterators have uneven length--first has more", s1.hasNext)
assertFalse("Iterators have uneven length--second has more", s2.hasNext)
}
def stackedIterator[T](s: Iterator[T]*): Iterator[T] = {
new Iterator[T] {
var cur: Iterator[T] = null
val topIterator = s.iterator
def hasNext() : Boolean = {
while (true) {
if (cur == null) {
if (topIterator.hasNext)
cur = topIterator.next
else
return false
}
if (cur.hasNext)
return true
cur = null
}
// should never reach her
throw new RuntimeException("should not reach here")
}
def next() : T = cur.next
}
}
/**
* Create a hexidecimal string for the given bytes
*/
def hexString(bytes: Array[Byte]): String = hexString(ByteBuffer.wrap(bytes))
/**
* Create a hexidecimal string for the given bytes
*/
def hexString(buffer: ByteBuffer): String = {
val builder = new StringBuilder("0x")
for(i <- 0 until buffer.limit)
builder.append(String.format("%x", Integer.valueOf(buffer.get(buffer.position + i))))
builder.toString
}
/**
* Create a producer with a few pre-configured properties.
* If certain properties need to be overridden, they can be provided in producerProps.
*/
def createProducer[K, V](brokerList: String,
encoder: String = classOf[DefaultEncoder].getName,
keyEncoder: String = classOf[DefaultEncoder].getName,
partitioner: String = classOf[DefaultPartitioner].getName,
producerProps: Properties = null): Producer[K, V] = {
val props: Properties = getProducerConfig(brokerList)
//override any explicitly specified properties
if (producerProps != null)
props.putAll(producerProps)
props.put("serializer.class", encoder)
props.put("key.serializer.class", keyEncoder)
props.put("partitioner.class", partitioner)
new Producer[K, V](new ProducerConfig(props))
}
/**
* Create a (new) producer with a few pre-configured properties.
*/
def createNewProducer(brokerList: String,
acks: Int = -1,
metadataFetchTimeout: Long = 3000L,
blockOnBufferFull: Boolean = true,
bufferSize: Long = 1024L * 1024L,
retries: Int = 0) : KafkaProducer = {
import org.apache.kafka.clients.producer.ProducerConfig
val producerProps = new Properties()
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
producerProps.put(ProducerConfig.ACKS_CONFIG, acks.toString)
producerProps.put(ProducerConfig.METADATA_FETCH_TIMEOUT_CONFIG, metadataFetchTimeout.toString)
producerProps.put(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, blockOnBufferFull.toString)
producerProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferSize.toString)
producerProps.put(ProducerConfig.RETRIES_CONFIG, retries.toString)
producerProps.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, "1000")
return new KafkaProducer(producerProps)
}
/**
* Create a default producer config properties map with the given metadata broker list
*/
def getProducerConfig(brokerList: String): Properties = {
val props = new Properties()
props.put("metadata.broker.list", brokerList)
props.put("message.send.max.retries", "5")
props.put("retry.backoff.ms", "1000")
props.put("request.timeout.ms", "2000")
props.put("request.required.acks", "-1")
props.put("send.buffer.bytes", "65536")
props.put("connect.timeout.ms", "100000")
props.put("reconnect.interval", "10000")
props
}
def getSyncProducerConfig(port: Int): Properties = {
val props = new Properties()
props.put("host", "localhost")
props.put("port", port.toString)
props.put("request.timeout.ms", "500")
props.put("request.required.acks", "1")
props.put("serializer.class", classOf[StringEncoder].getName)
props
}
def updateConsumerOffset(config : ConsumerConfig, path : String, offset : Long) = {
val zkClient = new ZkClient(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs, ZKStringSerializer)
ZkUtils.updatePersistentPath(zkClient, path, offset.toString)
}
def getMessageIterator(iter: Iterator[MessageAndOffset]): Iterator[Message] = {
new IteratorTemplate[Message] {
override def makeNext(): Message = {
if (iter.hasNext)
return iter.next.message
else
return allDone()
}
}
}
def createBrokersInZk(zkClient: ZkClient, ids: Seq[Int]): Seq[Broker] = {
val brokers = ids.map(id => new Broker(id, "localhost", 6667))
brokers.foreach(b => ZkUtils.registerBrokerInZk(zkClient, b.id, b.host, b.port, 6000, jmxPort = -1))
brokers
}
def deleteBrokersInZk(zkClient: ZkClient, ids: Seq[Int]): Seq[Broker] = {
val brokers = ids.map(id => new Broker(id, "localhost", 6667))
brokers.foreach(b => ZkUtils.deletePath(zkClient, ZkUtils.BrokerIdsPath + "/" + b))
brokers
}
def getMsgStrings(n: Int): Seq[String] = {
val buffer = new ListBuffer[String]
for (i <- 0 until n)
buffer += ("msg" + i)
buffer
}
/**
* Create a wired format request based on simple basic information
*/
def produceRequest(topic: String,
partition: Int,
message: ByteBufferMessageSet,
acks: Int = SyncProducerConfig.DefaultRequiredAcks,
timeout: Int = SyncProducerConfig.DefaultAckTimeoutMs,
correlationId: Int = 0,
clientId: String = SyncProducerConfig.DefaultClientId): ProducerRequest = {
produceRequestWithAcks(Seq(topic), Seq(partition), message, acks, timeout, correlationId, clientId)
}
def produceRequestWithAcks(topics: Seq[String],
partitions: Seq[Int],
message: ByteBufferMessageSet,
acks: Int = SyncProducerConfig.DefaultRequiredAcks,
timeout: Int = SyncProducerConfig.DefaultAckTimeoutMs,
correlationId: Int = 0,
clientId: String = SyncProducerConfig.DefaultClientId): ProducerRequest = {
val data = topics.flatMap(topic =>
partitions.map(partition => (TopicAndPartition(topic, partition), message))
)
new ProducerRequest(correlationId, clientId, acks.toShort, timeout, Map(data:_*))
}
def makeLeaderForPartition(zkClient: ZkClient, topic: String,
leaderPerPartitionMap: scala.collection.immutable.Map[Int, Int],
controllerEpoch: Int) {
leaderPerPartitionMap.foreach
{
leaderForPartition => {
val partition = leaderForPartition._1
val leader = leaderForPartition._2
try{
val currentLeaderAndIsrOpt = ZkUtils.getLeaderAndIsrForPartition(zkClient, topic, partition)
var newLeaderAndIsr: LeaderAndIsr = null
if(currentLeaderAndIsrOpt == None)
newLeaderAndIsr = new LeaderAndIsr(leader, List(leader))
else{
newLeaderAndIsr = currentLeaderAndIsrOpt.get
newLeaderAndIsr.leader = leader
newLeaderAndIsr.leaderEpoch += 1
newLeaderAndIsr.zkVersion += 1
}
ZkUtils.updatePersistentPath(zkClient, ZkUtils.getTopicPartitionLeaderAndIsrPath(topic, partition),
ZkUtils.leaderAndIsrZkData(newLeaderAndIsr, controllerEpoch))
} catch {
case oe: Throwable => error("Error while electing leader for partition [%s,%d]".format(topic, partition), oe)
}
}
}
}
/**
* If neither oldLeaderOpt nor newLeaderOpt is defined, wait until the leader of a partition is elected.
* If oldLeaderOpt is defined, it waits until the new leader is different from the old leader.
* If newLeaderOpt is defined, it waits until the new leader becomes the expected new leader.
* @return The new leader or assertion failure if timeout is reached.
*/
def waitUntilLeaderIsElectedOrChanged(zkClient: ZkClient, topic: String, partition: Int, timeoutMs: Long = 5000L,
oldLeaderOpt: Option[Int] = None, newLeaderOpt: Option[Int] = None): Option[Int] = {
require(!(oldLeaderOpt.isDefined && newLeaderOpt.isDefined), "Can't define both the old and the new leader")
val startTime = System.currentTimeMillis()
var isLeaderElectedOrChanged = false
trace("Waiting for leader to be elected or changed for partition [%s,%d], older leader is %s, new leader is %s"
.format(topic, partition, oldLeaderOpt, newLeaderOpt))
var leader: Option[Int] = None
while (!isLeaderElectedOrChanged && System.currentTimeMillis() < startTime + timeoutMs) {
// check if leader is elected
leader = ZkUtils.getLeaderForPartition(zkClient, topic, partition)
leader match {
case Some(l) =>
if (newLeaderOpt.isDefined && newLeaderOpt.get == l) {
trace("Expected new leader %d is elected for partition [%s,%d]".format(l, topic, partition))
isLeaderElectedOrChanged = true
} else if (oldLeaderOpt.isDefined && oldLeaderOpt.get != l) {
trace("Leader for partition [%s,%d] is changed from %d to %d".format(topic, partition, oldLeaderOpt.get, l))
isLeaderElectedOrChanged = true
} else if (!oldLeaderOpt.isDefined) {
trace("Leader %d is elected for partition [%s,%d]".format(l, topic, partition))
isLeaderElectedOrChanged = true
} else {
trace("Current leader for partition [%s,%d] is %d".format(topic, partition, l))
}
case None =>
trace("Leader for partition [%s,%d] is not elected yet".format(topic, partition))
}
Thread.sleep(timeoutMs.min(100L))
}
if (!isLeaderElectedOrChanged)
fail("Timing out after %d ms since leader is not elected or changed for partition [%s,%d]"
.format(timeoutMs, topic, partition))
return leader
}
/**
* Execute the given block. If it throws an assert error, retry. Repeat
* until no error is thrown or the time limit ellapses
*/
def retry(maxWaitMs: Long)(block: => Unit) {
var wait = 1L
val startTime = System.currentTimeMillis()
while(true) {
try {
block
return
} catch {
case e: AssertionFailedError =>
val ellapsed = System.currentTimeMillis - startTime
if(ellapsed > maxWaitMs) {
throw e
} else {
info("Attempt failed, sleeping for " + wait + ", and then retrying.")
Thread.sleep(wait)
wait += math.min(wait, 1000)
}
}
}
}
/**
* Wait until the given condition is true or throw an exception if the given wait time elapses.
*/
def waitUntilTrue(condition: () => Boolean, msg: String, waitTime: Long = 5000L): Boolean = {
val startTime = System.currentTimeMillis()
while (true) {
if (condition())
return true
if (System.currentTimeMillis() > startTime + waitTime)
fail(msg)
Thread.sleep(waitTime.min(100L))
}
// should never hit here
throw new RuntimeException("unexpected error")
}
def isLeaderLocalOnBroker(topic: String, partitionId: Int, server: KafkaServer): Boolean = {
val partitionOpt = server.replicaManager.getPartition(topic, partitionId)
partitionOpt match {
case None => false
case Some(partition) =>
val replicaOpt = partition.leaderReplicaIfLocal
replicaOpt match {
case None => false
case Some(_) => true
}
}
}
def createRequestByteBuffer(request: RequestOrResponse): ByteBuffer = {
val byteBuffer = ByteBuffer.allocate(request.sizeInBytes + 2)
byteBuffer.putShort(request.requestId.get)
request.writeTo(byteBuffer)
byteBuffer.rewind()
byteBuffer
}
/**
* Wait until a valid leader is propagated to the metadata cache in each broker.
* It assumes that the leader propagated to each broker is the same.
* @param servers The list of servers that the metadata should reach to
* @param topic The topic name
* @param partition The partition Id
* @param timeout The amount of time waiting on this condition before assert to fail
* @return The leader of the partition.
*/
def waitUntilMetadataIsPropagated(servers: Seq[KafkaServer], topic: String, partition: Int, timeout: Long = 5000L): Int = {
var leader: Int = -1
TestUtils.waitUntilTrue(() =>
servers.foldLeft(true) {
(result, server) =>
val partitionStateOpt = server.apis.metadataCache.getPartitionInfo(topic, partition)
partitionStateOpt match {
case None => false
case Some(partitionState) =>
leader = partitionState.leaderIsrAndControllerEpoch.leaderAndIsr.leader
result && Request.isValidBrokerId(leader)
}
},
"Partition [%s,%d] metadata not propagated after %d ms".format(topic, partition, timeout),
waitTime = timeout)
leader
}
def writeNonsenseToFile(fileName: File, position: Long, size: Int) {
val file = new RandomAccessFile(fileName, "rw")
file.seek(position)
for(i <- 0 until size)
file.writeByte(random.nextInt(255))
file.close()
}
def appendNonsenseToFile(fileName: File, size: Int) {
val file = new FileOutputStream(fileName, true)
for(i <- 0 until size)
file.write(random.nextInt(255))
file.close()
}
def checkForPhantomInSyncReplicas(zkClient: ZkClient, topic: String, partitionToBeReassigned: Int, assignedReplicas: Seq[Int]) {
val inSyncReplicas = ZkUtils.getInSyncReplicasForPartition(zkClient, topic, partitionToBeReassigned)
// in sync replicas should not have any replica that is not in the new assigned replicas
val phantomInSyncReplicas = inSyncReplicas.toSet -- assignedReplicas.toSet
assertTrue("All in sync replicas %s must be in the assigned replica list %s".format(inSyncReplicas, assignedReplicas),
phantomInSyncReplicas.size == 0)
}
def ensureNoUnderReplicatedPartitions(zkClient: ZkClient, topic: String, partitionToBeReassigned: Int, assignedReplicas: Seq[Int],
servers: Seq[KafkaServer]) {
TestUtils.waitUntilTrue(() => {
val inSyncReplicas = ZkUtils.getInSyncReplicasForPartition(zkClient, topic, partitionToBeReassigned)
inSyncReplicas.size == assignedReplicas.size
},
"Reassigned partition [%s,%d] is under replicated".format(topic, partitionToBeReassigned))
var leader: Option[Int] = None
TestUtils.waitUntilTrue(() => {
leader = ZkUtils.getLeaderForPartition(zkClient, topic, partitionToBeReassigned)
leader.isDefined
},
"Reassigned partition [%s,%d] is unavailable".format(topic, partitionToBeReassigned))
TestUtils.waitUntilTrue(() => {
val leaderBroker = servers.filter(s => s.config.brokerId == leader.get).head
leaderBroker.replicaManager.underReplicatedPartitionCount() == 0
},
"Reassigned partition [%s,%d] is under-replicated as reported by the leader %d".format(topic, partitionToBeReassigned, leader.get))
}
def checkIfReassignPartitionPathExists(zkClient: ZkClient): Boolean = {
ZkUtils.pathExists(zkClient, ZkUtils.ReassignPartitionsPath)
}
}
object TestZKUtils {
val zookeeperConnect = "127.0.0.1:" + TestUtils.choosePort()
}
class IntEncoder(props: VerifiableProperties = null) extends Encoder[Int] {
override def toBytes(n: Int) = n.toString.getBytes
}
class StaticPartitioner(props: VerifiableProperties = null) extends Partitioner{
def partition(data: Any, numPartitions: Int): Int = {
(data.asInstanceOf[String].length % numPartitions)
}
}
class HashPartitioner(props: VerifiableProperties = null) extends Partitioner {
def partition(data: Any, numPartitions: Int): Int = {
(data.hashCode % numPartitions)
}
}
class FixedValuePartitioner(props: VerifiableProperties = null) extends Partitioner {
def partition(data: Any, numPartitions: Int): Int = data.asInstanceOf[Int]
}
| stealthly/kafka | core/src/test/scala/unit/kafka/utils/TestUtils.scala | Scala | apache-2.0 | 25,957 |
package thangiee.riotapi.matchlist
case class MatchList(
endIndex: Int,
matches: List[MatchReference],
startIndex: Int,
totalGames: Int
) | Thangiee/Riot-API-Scala | src/main/scala/thangiee/riotapi/matchlist/MatchList.scala | Scala | mit | 148 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
package signatures
import com.intellij.lang.ASTNode
import com.intellij.psi.stubs.IndexSink
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter
import org.jetbrains.plugins.scala.lang.psi.impl.statements.params.ScClassParameterImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.index.ScalaIndexKeys.CLASS_PARAMETER_NAME_KEY
/**
* User: Alexander Podkhalyuzin
* Date: 19.10.2008
*/
class ScClassParameterElementType extends ScParamElementType[ScClassParameter]("class parameter") {
override def createPsi(stub: ScParameterStub): ScClassParameter = new ScClassParameterImpl(stub)
override def createElement(node: ASTNode): ScClassParameter = new ScClassParameterImpl(node)
override def indexStub(stub: ScParameterStub, sink: IndexSink): Unit =
this.indexStub(Array(stub.getName), sink, CLASS_PARAMETER_NAME_KEY)
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/signatures/ScClassParameterElementType.scala | Scala | apache-2.0 | 956 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.filters.gzip
import javax.inject.Inject
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.util.ByteString
import play.api.Application
import play.api.http.{ HttpChunk, HttpEntity, HttpFilters, HttpProtocol }
import play.api.inject._
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.routing.{ Router, SimpleRouterImpl }
import play.api.test._
import play.api.mvc.{ Cookie, DefaultActionBuilder, Result }
import play.api.mvc.Results._
import java.util.zip.GZIPInputStream
import java.io.ByteArrayInputStream
import org.apache.commons.io.IOUtils
import scala.concurrent.Future
import scala.util.Random
import org.specs2.matcher.DataTables
object GzipFilterSpec {
class ResultRouter @Inject() (action: DefaultActionBuilder, result: Result) extends SimpleRouterImpl({ case _ => action(result) })
class Filters @Inject() (gzipFilter: GzipFilter) extends HttpFilters {
def filters = Seq(gzipFilter)
}
}
class GzipFilterSpec extends PlaySpecification with DataTables {
sequential
import GzipFilterSpec._
"The GzipFilter" should {
"gzip responses" in withApplication(Ok("hello")) { implicit app =>
checkGzippedBody(makeGzipRequest(app), "hello")(app.materializer)
}
"""gzip a response if (and only if) it is accepted and preferred by the request.
|Although not explicitly mentioned in RFC 2616 sect. 14.3, the default qvalue
|is assumed to be 1 for all mentioned codings. If no "*" is present, unmentioned
|codings are assigned a qvalue of 0, except the identity coding which gets q=0.001,
|which is the lowest possible acceptable qvalue.
|This seems to be the most consistent behaviour with respect to the other "accept"
|header fields described in sect 14.1-5.""".stripMargin in withApplication(Ok("meep")) { implicit app =>
val (plain, gzipped) = (None, Some("gzip"))
"Accept-Encoding of request" || "Response" |
//------------------------------------++------------+
"gzip" !! gzipped |
"compress,gzip" !! gzipped |
"compress, gzip" !! gzipped |
"gzip,compress" !! gzipped |
"deflate, gzip,compress" !! gzipped |
"gzip, compress" !! gzipped |
"identity, gzip, compress" !! gzipped |
"GZip" !! gzipped |
"*" !! gzipped |
"*;q=0" !! plain |
"*; q=0" !! plain |
"*;q=0.000" !! plain |
"gzip;q=0" !! plain |
"gzip; q=0.00" !! plain |
"*;q=0, gZIP" !! gzipped |
"compress;q=0.1, *;q=0, gzip" !! gzipped |
"compress;q=0.1, *;q=0, gzip;q=0.005" !! gzipped |
"compress, gzip;q=0.001" !! gzipped |
"compress, gzip;q=0.002" !! gzipped |
"compress;q=1, *;q=0, gzip;q=0.000" !! plain |
"compress;q=1, *;q=0" !! plain |
"identity" !! plain |
"gzip;q=0.5, identity" !! plain |
"gzip;q=0.5, identity;q=1" !! plain |
"gzip;q=0.6, identity;q=0.5" !! gzipped |
"*;q=0.7, gzip;q=0.6, identity;q=0.4" !! gzipped |
"" !! plain |> {
(codings, expectedEncoding) =>
header(CONTENT_ENCODING, requestAccepting(app, codings)) must be equalTo (expectedEncoding)
}
}
"not gzip empty responses" in withApplication(Ok) { implicit app =>
checkNotGzipped(makeGzipRequest(app), "")(app.materializer)
}
"not gzip responses when not requested" in withApplication(Ok("hello")) { implicit app =>
checkNotGzipped(route(app, FakeRequest()).get, "hello")(app.materializer)
}
"not gzip HEAD requests" in withApplication(Ok) { implicit app =>
checkNotGzipped(route(app, FakeRequest("HEAD", "/").withHeaders(ACCEPT_ENCODING -> "gzip")).get, "")(app.materializer)
}
"not gzip no content responses" in withApplication(NoContent) { implicit app =>
checkNotGzipped(makeGzipRequest(app), "")(app.materializer)
}
"not gzip not modified responses" in withApplication(NotModified) { implicit app =>
checkNotGzipped(makeGzipRequest(app), "")(app.materializer)
}
"gzip content type which is on the whiteList" in withApplication(Ok("hello").as("text/css"), whiteList = contentTypes) { implicit app =>
checkGzippedBody(makeGzipRequest(app), "hello")(app.materializer)
}
"gzip content type which is on the whiteList ignoring case" in withApplication(Ok("hello").as("TeXt/CsS"), whiteList = List("TExT/HtMl", "tExT/cSs")) { implicit app =>
checkGzippedBody(makeGzipRequest(app), "hello")(app.materializer)
}
"gzip uppercase content type which is on the whiteList" in withApplication(Ok("hello").as("TEXT/CSS"), whiteList = contentTypes) { implicit app =>
checkGzippedBody(makeGzipRequest(app), "hello")(app.materializer)
}
"gzip content type with charset which is on the whiteList" in withApplication(Ok("hello").as("text/css; charset=utf-8"), whiteList = contentTypes) { implicit app =>
checkGzippedBody(makeGzipRequest(app), "hello")(app.materializer)
}
"don't gzip content type which is not on the whiteList" in withApplication(Ok("hello").as("text/plain"), whiteList = contentTypes) { implicit app =>
checkNotGzipped(makeGzipRequest(app), "hello")(app.materializer)
}
"don't gzip content type with charset which is not on the whiteList" in withApplication(Ok("hello").as("text/plain; charset=utf-8"), whiteList = contentTypes) { implicit app =>
checkNotGzipped(makeGzipRequest(app), "hello")(app.materializer)
}
"don't gzip content type which is on the blackList" in withApplication(Ok("hello").as("text/css"), blackList = contentTypes) { implicit app =>
checkNotGzipped(makeGzipRequest(app), "hello")(app.materializer)
}
"don't gzip content type with charset which is on the blackList" in withApplication(Ok("hello").as("text/css; charset=utf-8"), blackList = contentTypes) { implicit app =>
checkNotGzipped(makeGzipRequest(app), "hello")(app.materializer)
}
"gzip content type which is not on the blackList" in withApplication(Ok("hello").as("text/plain"), blackList = contentTypes) { implicit app =>
checkGzippedBody(makeGzipRequest(app), "hello")(app.materializer)
}
"gzip content type with charset which is not on the blackList" in withApplication(Ok("hello").as("text/plain; charset=utf-8"), blackList = contentTypes) { implicit app =>
checkGzippedBody(makeGzipRequest(app), "hello")(app.materializer)
}
"ignore blackList if there is a whiteList" in withApplication(Ok("hello").as("text/css; charset=utf-8"), whiteList = contentTypes, blackList = contentTypes) { implicit app =>
checkGzippedBody(makeGzipRequest(app), "hello")(app.materializer)
}
"gzip 'text/html' content type when using media range 'text/*' in the whiteList" in withApplication(Ok("hello").as("text/css"), whiteList = List("text/*")) { implicit app =>
checkGzippedBody(makeGzipRequest(app), "hello")(app.materializer)
}
"don't gzip 'application/javascript' content type when using media range 'text/*' in the whiteList" in withApplication(Ok("hello").as("application/javascript"), whiteList = List("text/*")) { implicit app =>
checkNotGzipped(makeGzipRequest(app), "hello")(app.materializer)
}
"gzip chunked responses" in withApplication(Ok.chunked(Source(List("foo", "bar")))) { implicit app =>
val result = makeGzipRequest(app)
checkGzippedBody(result, "foobar")(app.materializer)
await(result).body must beAnInstanceOf[HttpEntity.Chunked]
}
val body = Random.nextString(1000)
"a streamed body" should {
val entity = HttpEntity.Streamed(Source.single(ByteString(body)), Some(1000), None)
"not buffer more than the configured threshold" in withApplication(
Ok.sendEntity(entity), chunkedThreshold = 512) { implicit app =>
val result = makeGzipRequest(app)
checkGzippedBody(result, body)(app.materializer)
await(result).body must beAnInstanceOf[HttpEntity.Chunked]
}
"preserve original headers, cookies, flash and session values" in {
"when buffer is less than configured threshold" in withApplication(
Ok.sendEntity(entity)
.withHeaders(SERVER -> "Play")
.withCookies(Cookie("cookieName", "cookieValue"))
.flashing("flashName" -> "flashValue")
.withSession("sessionName" -> "sessionValue"),
chunkedThreshold = 2048 // body size is 1000
) { implicit app =>
val result = makeGzipRequest(app)
checkGzipped(result)
header(SERVER, result) must beSome("Play")
cookies(result).get("cookieName") must beSome.which(cookie => cookie.value == "cookieValue")
flash(result).get("flashName") must beSome.which(value => value == "flashValue")
session(result).get("sessionName") must beSome.which(value => value == "sessionValue")
}
"when buffer more than configured threshold" in withApplication(
Ok.sendEntity(entity)
.withHeaders(SERVER -> "Play")
.withCookies(Cookie("cookieName", "cookieValue"))
.flashing("flashName" -> "flashValue")
.withSession("sessionName" -> "sessionValue"),
chunkedThreshold = 512
) { implicit app =>
val result = makeGzipRequest(app)
checkGzippedBody(result, body)(app.materializer)
header(SERVER, result) must beSome("Play")
cookies(result).get("cookieName") must beSome.which(cookie => cookie.value == "cookieValue")
flash(result).get("flashName") must beSome.which(value => value == "flashValue")
session(result).get("sessionName") must beSome.which(value => value == "sessionValue")
}
}
"not fallback to a chunked body when HTTP 1.0 is being used and the chunked threshold is exceeded" in withApplication(
Ok.sendEntity(entity), chunkedThreshold = 512) { implicit app =>
val result = route(app, gzipRequest.withVersion(HttpProtocol.HTTP_1_0)).get
checkGzippedBody(result, body)(app.materializer)
val entity = await(result).body
entity must beLike {
// Make sure it's a streamed entity with no content length
case HttpEntity.Streamed(_, None, None) => ok
}
}
}
"a chunked body" should {
val chunkedBody = Source.fromIterator(() =>
Seq[HttpChunk](HttpChunk.Chunk(ByteString("First chunk")), HttpChunk.LastChunk(FakeHeaders())).iterator
)
val entity = HttpEntity.Chunked(chunkedBody, Some("text/plain"))
"preserve original headers, cookie, flash and session values" in withApplication(
Ok.sendEntity(entity)
.withHeaders(SERVER -> "Play")
.withCookies(Cookie("cookieName", "cookieValue"))
.flashing("flashName" -> "flashValue")
.withSession("sessionName" -> "sessionValue")
) { implicit app =>
val result = makeGzipRequest(app)
checkGzipped(result)
header(SERVER, result) must beSome("Play")
cookies(result).get("cookieName") must beSome.which(cookie => cookie.value == "cookieValue")
flash(result).get("flashName") must beSome.which(value => value == "flashValue")
session(result).get("sessionName") must beSome.which(value => value == "sessionValue")
}
}
"a strict body" should {
"zip a strict body even if it exceeds the threshold" in withApplication(Ok(body), 512) { implicit app =>
val result = makeGzipRequest(app)
checkGzippedBody(result, body)(app.materializer)
await(result).body must beAnInstanceOf[HttpEntity.Strict]
}
"preserve original headers, cookie, flash and session values" in withApplication(
Ok("hello")
.withHeaders(SERVER -> "Play")
.withCookies(Cookie("cookieName", "cookieValue"))
.flashing("flashName" -> "flashValue")
.withSession("sessionName" -> "sessionValue")
) { implicit app =>
val result = makeGzipRequest(app)
checkGzipped(result)
header(SERVER, result) must beSome("Play")
cookies(result).get("cookieName") must beSome.which(cookie => cookie.value == "cookieValue")
flash(result).get("flashName") must beSome.which(value => value == "flashValue")
session(result).get("sessionName") must beSome.which(value => value == "sessionValue")
}
"preserve original Vary header values" in withApplication(Ok("hello").withHeaders(VARY -> "original")) { implicit app =>
val result = makeGzipRequest(app)
checkGzipped(result)
header(VARY, result) must beSome.which(header => header contains "original,")
}
"preserve original Vary header values and not duplicate case-insensitive ACCEPT-ENCODING" in withApplication(Ok("hello").withHeaders(VARY -> "original,ACCEPT-encoding")) { implicit app =>
val result = makeGzipRequest(app)
checkGzipped(result)
header(VARY, result) must beSome.which(header => header.split(",").count(_.toLowerCase(java.util.Locale.ENGLISH) == ACCEPT_ENCODING.toLowerCase(java.util.Locale.ENGLISH)) == 1)
}
}
}
def withApplication[T](result: Result, chunkedThreshold: Int = 1024, whiteList: List[String] = List.empty, blackList: List[String] = List.empty)(block: Application => T): T = {
val application = new GuiceApplicationBuilder()
.configure(
"play.filters.gzip.chunkedThreshold" -> chunkedThreshold,
"play.filters.gzip.bufferSize" -> 512,
"play.filters.gzip.contentType.whiteList" -> whiteList,
"play.filters.gzip.contentType.blackList" -> blackList
).overrides(
bind[Result].to(result),
bind[Router].to[ResultRouter],
bind[HttpFilters].to[Filters]
).build
running(application)(block(application))
}
val contentTypes = List("text/html", "text/css", "application/javascript")
def gzipRequest = FakeRequest().withHeaders(ACCEPT_ENCODING -> "gzip")
def makeGzipRequest(app: Application) = route(app, gzipRequest).get
def requestAccepting(app: Application, codings: String) = route(app, FakeRequest().withHeaders(ACCEPT_ENCODING -> codings)).get
def gunzip(bytes: ByteString): String = {
val is = new GZIPInputStream(new ByteArrayInputStream(bytes.toArray))
val result = IOUtils.toString(is, "UTF-8")
is.close()
result
}
def checkGzipped(result: Future[Result]) = {
header(CONTENT_ENCODING, result) aka "Content encoding header" must beSome("gzip")
}
def checkGzippedBody(result: Future[Result], body: String)(implicit mat: Materializer) = {
checkGzipped(result)
val resultBody = contentAsBytes(result)
await(result).body.contentLength.foreach { cl =>
resultBody.length must_== cl
}
gunzip(resultBody) must_== body
}
def checkNotGzipped(result: Future[Result], body: String)(implicit mat: Materializer) = {
header(CONTENT_ENCODING, result) must beNone
contentAsString(result) must_== body
}
}
| Shruti9520/playframework | framework/src/play-filters-helpers/src/test/scala/play/filters/gzip/GzipFilterSpec.scala | Scala | apache-2.0 | 15,275 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server.metadata
import java.util
import java.util.Properties
import java.util.concurrent.ConcurrentHashMap
import java.util.function.BiFunction
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.config.ConfigResource.Type
import scala.jdk.CollectionConverters._
/**
* A ConfigRepository that stores configurations locally.
*/
class CachedConfigRepository extends ConfigRepository {
private val configMap = new ConcurrentHashMap[ConfigResource, util.HashMap[String, String]]
/**
* Set the topic config for the given topic name and the given key to the given value.
*
* @param topicName the name of the topic for which the config will be set
* @param key the key identifying the topic config to set
* @param value the value to set for the topic config with null implying a removal
*/
def setTopicConfig(topicName: String, key: String, value: String): Unit = {
setConfig(new ConfigResource(Type.TOPIC, topicName), key, value)
}
/**
* Set the broker config for the given broker ID and the given key to the given value.
*
* @param brokerId the ID of the broker for which the config will be set
* @param key the key identifying the broker config to set
* @param value the value to set for the broker config with null implying a removal
*/
def setBrokerConfig(brokerId: Int, key: String, value: String): Unit = {
setConfig(new ConfigResource(Type.BROKER, brokerId.toString()), key, value)
}
/**
* Set the config for the given resource and the given key to the given value.
*
* @param configResource the resource for which the config will be set
* @param key the key identifying the resource config to set
* @param value the value to set for the resource config with null implying a removal
*/
def setConfig(configResource: ConfigResource, key: String, value: String): Unit = {
configMap.compute(configResource, new BiFunction[ConfigResource, util.HashMap[String, String], util.HashMap[String, String]] {
override def apply(resource: ConfigResource,
curConfig: util.HashMap[String, String]): util.HashMap[String, String] = {
if (value == null) {
if (curConfig == null) {
null
} else {
val newConfig = new util.HashMap[String, String](curConfig)
newConfig.remove(key)
if (newConfig.isEmpty) {
null
} else {
newConfig
}
}
} else {
if (curConfig == null) {
val newConfig = new util.HashMap[String, String](1)
newConfig.put(key, value)
newConfig
} else {
val newConfig = new util.HashMap[String, String](curConfig.size() + 1)
newConfig.putAll(curConfig)
newConfig.put(key, value)
newConfig
}
}
}
})
}
override def config(configResource: ConfigResource): Properties = {
val properties = new Properties()
Option(configMap.get(configResource)).foreach { resourceConfigMap =>
resourceConfigMap.entrySet.iterator.asScala.foreach { entry =>
properties.put(entry.getKey, entry.getValue)
}
}
properties
}
def remove(configResource: ConfigResource): Unit = {
configMap.remove(configResource)
}
}
| Chasego/kafka | core/src/main/scala/kafka/server/metadata/CachedConfigRepository.scala | Scala | apache-2.0 | 4,180 |
class App(arg: String) {
@deprecated("..") def this() = {
this("foo")
}
}
| yusuke2255/dotty | tests/pos/t1147.scala | Scala | bsd-3-clause | 83 |
package io.cumulus.controllers
import cats.data.EitherT
import cats.implicits._
import com.github.ghik.silencer.silent
import io.cumulus.Settings
import io.cumulus.utils.Base16
import io.cumulus.validation.AppError
import io.cumulus.models.fs.Path
import io.cumulus.models.user.session.SharingSession
import io.cumulus.services.{SessionService, SharingService, StorageService}
import play.api.mvc.{Action, AnyContent, ControllerComponents}
import scala.concurrent.{ExecutionContext, Future}
/**
* Sharing visitor controller. This controller handle all the unauthenticated operation on shared elements.
*/
class SharingPublicController(
cc: ControllerComponents,
sharingService: SharingService,
storageService: StorageService,
val sessionService: SessionService
)(implicit
val ec: ExecutionContext,
val settings: Settings
) extends Api(cc) with DownloadSupport with StreamSupport {
/**
* Gets a sharing for an unauthenticated user.
* @param path The paths within the sharing, '/' for the root element.
* @param reference The reference of the sharing.
* @param key The unique cipher key of the sharing.
*/
def get(path: Path, reference: String, key: String): Action[AnyContent] =
Action.async { implicit request =>
sharingService
.findSharedNode(reference, path, key)
.map(_.map {
case (_, _, node) =>
node
})
.toResult
}
/**
* Downloads the root element of the sharing.
* @param reference The reference of the sharing.
* @param name The name of the sharing, only used for display.
* @param key The unique cipher key of the sharing.
* @param forceDownload True to force download, otherwise content will be opened directly in the browser.
*/
@silent
def downloadRoot(reference: String, name: String, key: String, forceDownload: Option[Boolean]): Action[AnyContent] =
download("/", reference, key, forceDownload)
/**
* Downloads a shared file for an unauthenticated user.
* @param path The paths within the sharing, '/' for the root element.
* @param reference The reference of the sharing.
* @param key The unique cipher key of the sharing.
*/
def download(path: Path, reference: String, key: String, forceDownload: Option[Boolean]): Action[AnyContent] =
Action.async { implicit request =>
for {
// Get the sharing, the user and the file
res <- EitherT(sharingService.findSharedFile(reference, path, key))
(sharing, user, file) = res
// Decode the key & generate a session
decodedKey <- EitherT.fromEither[Future](Base16.decode(key).toRight(AppError.validation("validation.sharing.invalid-key")))
session = SharingSession(user, sharing, decodedKey)
// Get the file's content
maybeRange <- EitherT.fromEither[Future](headerRange(request, file))
content <- EitherT.fromEither[Future](storageService.downloadFile(file, maybeRange)(session))
// Create the response
result <- EitherT.pure[Future, AppError](
maybeRange match {
case Some(range) =>
streamFile(file, content, range)
case None =>
downloadFile(file, content, forceDownload.getOrElse(false))
}
)
} yield result
}
}
| Cumulus-Cloud/cumulus | server/cumulus-server/src/main/scala/io/cumulus/controllers/SharingPublicController.scala | Scala | mit | 3,340 |
package filodb.coordinator.queryplanner
import scala.concurrent.duration.FiniteDuration
import kamon.Kamon
import monix.eval.Task
import monix.execution.Scheduler
import filodb.core.query.QueryContext
import filodb.query.{LogicalPlan, QueryResponse}
import filodb.query.exec.ExecPlan
/**
* Abstraction for Query Planning. QueryPlanners can be composed using decorator pattern to add capabilities.
*/
trait QueryPlanner {
/**
* Converts a logical plan to execution plan.
*
* @param logicalPlan Logical plan after converting PromQL -> AST -> LogicalPlan
* @param qContext holder for additional query parameters
* @return materialized Execution Plan which can be dispatched
*/
def materialize(logicalPlan: LogicalPlan, qContext: QueryContext): ExecPlan
/**
* Trigger orchestration of the ExecPlan. It sends the ExecPlan to the destination where it will be executed.
*/
def dispatchExecPlan(execPlan: ExecPlan,
parentSpan: kamon.trace.Span)
(implicit sched: Scheduler, timeout: FiniteDuration): Task[QueryResponse] = {
// Please note that the following needs to be wrapped inside `runWithSpan` so that the context will be propagated
// across threads. Note that task/observable will not run on the thread where span is present since
// kamon uses thread-locals.
Kamon.runWithSpan(parentSpan, false) {
execPlan.dispatcher.dispatch(execPlan)
}
}
}
| tuplejump/FiloDB | coordinator/src/main/scala/filodb.coordinator/queryplanner/QueryPlanner.scala | Scala | apache-2.0 | 1,465 |
package edu.depauw.scales.music;
import javax.sound.sampled.{Clip => JClip, AudioSystem, DataLine}
import java.io.File
/*
* Constructor takes the absolute pathname of the audio file.
* If count is 0, loop clip as needed for duration;
* otherwise, loop clip count times (possibly beyond duration)
*/
case class Clip(count : Int, f : String) extends Step {
val stream = AudioSystem.getAudioInputStream(new File(f))
val numBytes = stream.getFrameLength.toInt * stream.getFormat.getFrameSize
val format = stream.getFormat
val info = new DataLine.Info(classOf[JClip], format)
val buffer = new Array[Byte](numBytes)
stream.read(buffer)
stream.close()
for (i <- 1000 until 1025) {
println(buffer(2 * i) + ", " + buffer(2 * i + 1))
}
println(format)
val beats = 1.0
def act(actor : StepActor) {
val clip = AudioSystem.getLine(info).asInstanceOf[JClip]
clip.open(format, buffer, 0, numBytes)
if (count == 0) {
clip.loop(JClip.LOOP_CONTINUOUSLY)
actor.director ! Request(actor, actor.startTime + actor.duration, Done)
} else {
clip.loop(count - 1)
actor.director ! Request(actor, actor.startTime + clip.getMicrosecondLength / 1000, Done)
}
actor.director ! Request(actor.parent, actor.startTime + actor.duration, Done)
actor.react {
case Done => {
clip.stop()
clip.close()
}
}
}
}
| bhoward/EscalatorOld | Scales2/src/edu/depauw/scales/music/SampledClip.scala | Scala | apache-2.0 | 1,411 |
package org.aja.tej.examples.sparksql.dataframe
import org.aja.tej.utils.TejUtils
import org.apache.spark.sql._
import org.apache.spark.sql.types._
/**
* Created by mageswaran on 6/2/16.
*/
object ExplodeExample extends App {
val sc = TejUtils.getSparkContext(this.getClass.getSimpleName)
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
case class Employee(firstName: String, lastName: String, email: String)
case class Department(id: String, name: String)
case class DepartmentWithEmployees(department: Department, employees: Seq[Employee])
val employee1 = new Employee("michael", "armbrust", "abc123@prodigy.net")
val employee2 = new Employee("chris", "fregly", "def456@compuserve.net")
val department1 = new Department("123456", "Engineering")
val department2 = new Department("123456", "Psychology")
val departmentWithEmployees1 = new DepartmentWithEmployees(department1, Seq(employee1, employee2))
val departmentWithEmployees2 = new DepartmentWithEmployees(department2, Seq(employee1, employee2))
val departmentWithEmployeesRDD = sc.parallelize(Seq(departmentWithEmployees1, departmentWithEmployees2))
departmentWithEmployeesRDD.toDF().saveAsParquetFile("dwe.parquet")
val departmentWithEmployeesDF = sqlContext.parquetFile("dwe.parquet")
departmentWithEmployeesDF.printSchema()
// root
// |-- department: struct (nullable = true)
// | |-- id: string (nullable = true)
// | |-- name: string (nullable = true)
// |-- employees: array (nullable = true)
// | |-- element: struct (containsNull = true)
// | | |-- firstName: string (nullable = true)
// | | |-- lastName: string (nullable = true)
// | | |-- email: string (nullable = true)
departmentWithEmployeesDF.show()
// +--------------------+--------------------+
// | department| employees|
// +--------------------+--------------------+
// | [123456,Psychology]|[[michael,armbrus...|
// |[123456,Engineering]|[[michael,armbrus...|
// +--------------------+--------------------+
// This would be replaced by explodeArray()
val explodedDepartmentWithEmployeesDF = departmentWithEmployeesDF.explode(departmentWithEmployeesDF("employees")) {
case Row(employee: Seq[Row]) => employee.map(employee =>
Employee(employee(0).asInstanceOf[String], employee(1).asInstanceOf[String], employee(2).asInstanceOf[String])
)
}
explodedDepartmentWithEmployeesDF.foreach(println)
// [[123456,Psychology],WrappedArray([michael,armbrust,abc123@prodigy.net], [chris,fregly,def456@compuserve.net]),michael,armbrust,abc123@prodigy.net]
// [[123456,Engineering],WrappedArray([michael,armbrust,abc123@prodigy.net], [chris,fregly,def456@compuserve.net]),michael,armbrust,abc123@prodigy.net]
// [[123456,Psychology],WrappedArray([michael,armbrust,abc123@prodigy.net], [chris,fregly,def456@compuserve.net]),chris,fregly,def456@compuserve.net]
// [[123456,Engineering],WrappedArray([michael,armbrust,abc123@prodigy.net], [chris,fregly,def456@compuserve.net]),chris,fregly,def456@compuserve.net]
// println(explodedDepartmentWithEmployeesDF.getClass.getTypeName)
// org.apache.spark.sql.DataFrame
explodedDepartmentWithEmployeesDF.printSchema()
// root
// |-- department: struct (nullable = true)
// | |-- id: string (nullable = true)
// | |-- name: string (nullable = true)
// |-- employees: array (nullable = true)
// | |-- element: struct (containsNull = true)
// | | |-- firstName: string (nullable = true)
// | | |-- lastName: string (nullable = true)
// | | |-- email: string (nullable = true)
// |-- firstName: string (nullable = true)
// |-- lastName: string (nullable = true)
// |-- email: string (nullable = true)
explodedDepartmentWithEmployeesDF.show()
// +--------------------+--------------------+---------+--------+--------------------+
// | department| employees|firstName|lastName| email|
// +--------------------+--------------------+---------+--------+--------------------+
// | [123456,Psychology]|[[michael,armbrus...| michael|armbrust| abc123@prodigy.net|
// | [123456,Psychology]|[[michael,armbrus...| chris| fregly|def456@compuserve...|
// |[123456,Engineering]|[[michael,armbrus...| michael|armbrust| abc123@prodigy.net|
// |[123456,Engineering]|[[michael,armbrus...| chris| fregly|def456@compuserve...|
// +--------------------+--------------------+---------+--------+--------------------+
println("explodedDepartmentWithEmployeesDF1")
departmentWithEmployeesDF.explode($"employees") {
case Row(employee: Seq[Row]) => employee.map(employee =>
Employee(employee(0).asInstanceOf[String], employee(1).asInstanceOf[String], employee(2).asInstanceOf[String])
)
}.show()
// +--------------------+--------------------+---------+--------+--------------------+
// | department| employees|firstName|lastName| email|
// +--------------------+--------------------+---------+--------+--------------------+
// | [123456,Psychology]|[[michael,armbrus...| michael|armbrust| abc123@prodigy.net|
// | [123456,Psychology]|[[michael,armbrus...| chris| fregly|def456@compuserve...|
// |[123456,Engineering]|[[michael,armbrus...| michael|armbrust| abc123@prodigy.net|
// |[123456,Engineering]|[[michael,armbrus...| chris| fregly|def456@compuserve...|
// +--------------------+--------------------+---------+--------+--------------------+
}
| Mageswaran1989/aja | src/examples/scala/org/aja/tej/examples/sparksql/dataframe/ExplodeExample.scala | Scala | apache-2.0 | 5,603 |
/*
* Copyright 2016 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.common.rich
import org.scalatest.{FunSuite,Matchers}
import org.scalatest.concurrent.ScalaFutures
import fm.common.Implicits._
class TestRichTraversableOnce extends FunSuite with Matchers with ScalaFutures {
test("foreachWithIndex") {
var count: Int = 0
Vector("0","1","2","3","4","5").foreachWithIndex{ (elem: String, idx: Int) =>
idx shouldBe elem.toInt
count += 1
}
count shouldBe 6
}
test("foreachWithLongIndex") {
var count: Int = 0
Vector("0","1","2","3","4","5").foreachWithLongIndex{ (elem: String, idx: Long) =>
idx shouldBe elem.toLong
count += 1
}
count shouldBe 6
}
test("minOption") {
Vector.empty[Int].minOption shouldBe None
Vector(1,2,3).minOption shouldBe Some(1)
Vector(3,2,1).minOption shouldBe Some(1)
}
test("maxOption") {
Vector.empty[Int].maxOption shouldBe None
Vector(1,2,3).maxOption shouldBe Some(3)
Vector(3,2,1).maxOption shouldBe Some(3)
}
test("mkStringOrBlank") {
Vector().mkStringOrBlank("[start]", "[sep]", "[end]") shouldBe ""
Vector("foo").mkStringOrBlank("[start]", "[sep]", "[end]") shouldBe "[start]foo[end]"
Vector("foo", "bar").mkStringOrBlank("[start]", "[sep]", "[end]") shouldBe "[start]foo[sep]bar[end]"
}
test("countBy") {
Vector(1,2,2,3,3,3,4,4,4,4).countBy{ i => i} shouldBe Map(1 -> 1, 2 -> 2, 3 -> 3, 4 -> 4)
}
test("collapseBy") {
Vector(2,4,6,1,3,2,5,7).collapseBy{ _ % 2 == 0 } shouldBe Vector((true,Vector(2, 4, 6)), (false,Vector(1, 3)), (true,Vector(2)), (false,Vector(5, 7)))
}
test("findMapped") {
Vector(1,2,3,4).findMapped{ i: Int => if (i % 2 == 0) Some("foo") else None } shouldBe Some("foo")
Vector(1,2,3,4).findMapped{ i: Int => if (i == 123) Some("foo") else None } shouldBe None
}
// Failing in Scala.JS due to the usage of Futures:
// test("findMappedFuture") {
// Vector(1,2,3,4).findMappedFuture{ i: Int => if (i == 2) Future.successful(Some("foo")) else Future.successful(None) }.futureValue shouldBe Some("foo")
// Vector(1,2,3).findMappedFuture{ i: Int => if (i == 123) Future.successful(Some("foo")) else Future.successful(None) }.futureValue shouldBe None
//
// // Only the Future for the first element should be run.
// Vector(1,2,3,4).findMappedFuture{ i: Int => if (i == 1) Future.successful(Some("foo")) else { System.exit(-1); ??? } }.futureValue shouldBe Some("foo")
// }
}
| frugalmechanic/fm-common | shared/src/test/scala/fm/common/rich/TestRichTraversableOnce.scala | Scala | apache-2.0 | 3,078 |
package codesniffer.deckard.search
import java.io.File
import java.util
import codesniffer.api.Node
import codesniffer.api.body.MethodDeclaration
import codesniffer.api.expr.ThisExpr
import codesniffer.api.stmt.EmptyStmt
import codesniffer.deckard.{WeightedVec, CharacVec, Indexer, MemWriter}
import codesniffer.deckard.vgen._
import scala.collection.convert.wrapAsScala._
import scala.concurrent._
import scala.util.{Failure, Success}
/**
* Created by Bowen Cai on 5/15/2015.
*/
object CrossMatch {
type SortedList = util.TreeMap[Double, (CharacVec[String], CharacVec[String])]
def vgen(path: String, idx: Indexer[String], cfg: DirScanConfig): MemWriter[String] = {
val dir = new File(path)
require(dir.exists() && dir.canRead)
val vs = new MemWriter[String]
vs.sizeHint(1024)
val scanner = new SrcScanner(new Context[String](cfg, null, null, idx, vs))
// save exact source to vector, for manually check
// scanner.methodVisitor.before = (m: MethodDeclaration, ctx: Context[String]) => new WeightedVec(BasicVecGen.newVec(m, ctx))
val mv = new SkipLocksVecGen[String]
scanner.methodVisitor = mv
mv.classVisitor = scanner.classVisitor
scanner.classVisitor.setMethodVisitor(mv)
mv.after = (m, ctx) => {
val v = ctx.data.get.asInstanceOf[CharacVec[String]]
if (v.count > 20) {
v.data = Some(m.toString.intern())
ctx.vecWriter.write(v)
}
}
dir match {
case where if where.isDirectory => scanner.scanDir(where, recursive = true)
case src if src.isFile => scanner.processFile(src)
}
System.gc()
vs
}
def findMatch(vLib: CharacVec[String], vApps: MemWriter[String], result: SortedList): SortedList = {
val c1 = vLib.count
val threshold = 18
for (vApp <- vApps) {
val c2 = vApp.count
if (math.abs(c1 - c2) < 60) {
val dist = vLib.distance(vApp)
if (dist < threshold)
result.put(dist, (vLib, vApp.asInstanceOf[CharacVec[String]]))
}
}
result
}
def main(args: Array[String]): Unit = {
var path2lib: String = "E:\\\\research\\\\top\\\\guava\\\\guava\\\\src"
// val path2lib = "E:\\\\research\\\\top\\\\guava\\\\guava\\\\src\\\\com\\\\google\\\\common\\\\collect"
// var path2lib: String = "E:\\\\research\\\\top\\\\jdk-1.7\\\\java\\\\util\\\\concurrent"
var path2Apps = Array("E:\\\\\\\\research\\\\\\\\top\\\\\\\\h2-1.4.187-sources")//,
// val path2Apps = Array("E:\\\\research\\\\top\\\\h2-1.4.187-sources\\\\org\\\\h2\\\\api")
// "E:\\\\research\\\\top\\\\derby")//,
// "E:\\\\research\\\\top\\\\Openfire",
// "E:\\\\research\\\\top\\\\spring-framework",
// "D:\\\\Program Files\\\\adt-bundle-windows-x86_64-20130219\\\\sdk\\\\sources\\\\android-19")
var resultSize = 20
// if (args != null && args.length > 2) {
// resultSize = Integer.parseInt(args(0))
// path2lib = args(1)
// path2Apps = args.drop(2)
// } else {
// println("Usage: <result_size> <path-to-library> <path-to-app> <path-to-app> ...")
// System.exit(1)
// }
println(s"Matching library $path2lib against applications:")
path2Apps.foreach(println)
/**
**************************************************************************
* prepare
*
*/
val appCount = path2Apps.length
val procCount = Runtime.getRuntime.availableProcessors()
implicit val _exe = ExecutionContext.fromExecutor(java.util.concurrent.Executors.newFixedThreadPool(procCount)).prepare()
val _nodeFilter = (node: Node)=>node.isInstanceOf[EmptyStmt] || node.isInstanceOf[ThisExpr]
val fileNameFilter = (name: String) => (
name.equals("package-info.java") // filter out package file
|| name.endsWith("Tests.java") // filter out test file
|| name.endsWith("Test.java") // filter out test file
)
val _appConfig = new DirScanConfig
_appConfig.filterDirName = fileNameFilter
_appConfig.filterNode = _nodeFilter
val _libConfig = new DirScanConfig
_libConfig.filterDirName = fileNameFilter
_libConfig.filterNode = _nodeFilter
// in a library, only public method is open to use
// _libConfig.filterMethod = (m: MethodDeclaration) => !Modifier.isPublic(m.getModifiers)
// common indexer, ensure vectors from different projects are generated in same coordinate
val _indexer = new Indexer[String]
/**
**************************************************************************
* config & generate vectors
*
*/
var t0 = System.currentTimeMillis()
val vsLib = vgen(path2lib, _indexer, _libConfig)
var t1 = System.currentTimeMillis()
println(s"library searched, ${vsLib.size} vectors generated, time ${t1 - t0} ms")
val vsApps = new Array[MemWriter[String]](appCount)
val results = (0 until appCount).map(_=>new SortedList).toArray
t0 = System.currentTimeMillis()
val tasks = for (i <- 0 until appCount) yield future[Unit] {
for (vLib <- vsLib) {
val _a = vsApps(i)
val vApp = if (_a != null) _a
else {
val _vs = vgen(path2Apps(i), _indexer, _appConfig)
vsApps(i) = _vs
_vs
}
findMatch(vLib.asInstanceOf[CharacVec[String]], vApp, results(i))
}
while (results(i).size() * 2 > resultSize * 3) results(i).pollLastEntry()
}
Future.sequence(tasks) onComplete {
case Success(r) =>
t1 = System.currentTimeMillis()
println(s"find ${results.foldLeft(0) { (s, e) => s + e.size() }} clone pair, time ${t1 - t0} ms")
var i = 0
while (i < path2Apps.length) {
val result = results(i)
println(s"Result for application ${path2Apps(i)}")
var rank = 1
for ((dist, pair) <- result) {
println(s"Rank $rank, distance $dist\\r\\nnode count: ${pair._1.count}, ${pair._1.location}\\r\\n${pair._1.data.get}")
println(s"node count: ${pair._2.count}, ${pair._2.location}\\r\\n${pair._2.data.get}")
println()
rank += 1
}
println("\\r\\n")
i += 1
}
System.exit(0)
case Failure(t) =>
println(s"Search failed, error:$t")
}
}
}
| xkommando/CodeSniffer | deckard/src/main/scala/codesniffer/deckard/search/CrossMatch.scala | Scala | lgpl-3.0 | 6,344 |
package com.evalonlabs.myinbox.http
import com.evalonlabs.net.{CustomInitializer, NettyServer}
import com.evalonlabs.monitoring.Logging
import io.netty.channel.socket.SocketChannel
import io.netty.bootstrap.ServerBootstrap
import io.netty.channel.{ChannelOption, ChannelPipeline, ChannelHandlerContext}
import io.netty.buffer.ByteBuf
import io.netty.handler.codec.http.{HttpObjectAggregator, HttpResponseEncoder, HttpRequestDecoder}
class HttpServer(name: String, port: Integer, initializer: CustomInitializer) extends NettyServer(name, port, initializer) with Logging {
def onStart(bootstrap: ServerBootstrap) = {
bootstrap.option(ChannelOption.SO_BACKLOG, Int.box(1024))
bootstrap.option(ChannelOption.SO_REUSEADDR, Boolean.box(true))
bootstrap.option(ChannelOption.TCP_NODELAY, Boolean.box(true))
bootstrap.option(ChannelOption.SO_KEEPALIVE, Boolean.box(true))
}
def onStop(f: => Any) = {
logger.info("Server Stoped")
}
def onConnectionOpened(ctx: ChannelHandlerContext) = {
logger.debug("Connection Opened " + ctx.channel().remoteAddress())
}
def onConnectionClosed(ctx: ChannelHandlerContext) = {
logger.debug("Connection Closed " + ctx.channel().remoteAddress())
}
def onServerError(ctx: ChannelHandlerContext, cause: Throwable) = {
logger.debug("Server Error" + ctx.channel().remoteAddress() + cause.getMessage + cause.getStackTrace)
}
def onReceived(ctx: ChannelHandlerContext, buf: ByteBuf) = {
}
}
object HttpService {
def apply(port: Integer): HttpServer =
new HttpServer("http", port, new CustomInitializer {
override def apply(ch: SocketChannel): Any = {
val pipeline: ChannelPipeline = ch.pipeline()
pipeline.addLast("decoder", new HttpRequestDecoder())
pipeline.addLast("http_encoder", new HttpResponseEncoder())
pipeline.addLast("aggregator", new HttpObjectAggregator(65536))
pipeline.addLast("handler", new HttpHandler(null))
pipeline
}
})
} | epappas/myinbox | http/src/main/scala/com/evalonlabs/myinbox/http/HttpServer.scala | Scala | mit | 1,998 |
package me.gregd.cineworld.wiring
import com.typesafe.scalalogging.LazyLogging
import me.gregd.cineworld.config._
import slick.jdbc.PostgresProfile
import slick.jdbc.PostgresProfile.api._
import slick.migration.api._
object DatabaseInitialisation extends LazyLogging {
type DB = PostgresProfile.backend.DatabaseDef
implicit val dialect: PostgresDialect = new PostgresDialect()
def createListings(listingsTableName: ListingsTableName): DBIO[Unit] = DBIO.seq(
sqlu"""
create table if not exists #${listingsTableName.value} (
cinema_id text not null,
date text not null,
listings text not null,
modified TIMESTAMPTZ not null default now(),
primary key (cinema_id, date)
)
"""
)
val createCinemas: DBIO[Unit] = DBIO.seq(
sqlu"""
create table if not exists cinemas (
id varchar,
chain varchar not null,
json varchar not null,
modified TIMESTAMPTZ not null default now(),
primary key (id)
)
"""
)
def migrate(listingsTableName: ListingsTableName): DBIO[Unit] =
DBIO.seq(
createListings(listingsTableName),
createCinemas
)
}
| Grogs/cinema-service | domain/src/main/scala/me/gregd/cineworld/wiring/DatabaseInitialisation.scala | Scala | gpl-3.0 | 1,193 |
/*
* OpenURP, Open University Resouce Planning
*
* Copyright (c) 2013-2014, OpenURP Software.
*
* OpenURP is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenURP is distributed in the hope that it will be useful.
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Beangle. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.attendance.ws.domain
import java.sql.Date
import java.util.Calendar
import org.beangle.commons.lang.Dates.toDate
import org.junit.runner.RunWith
import org.scalatest.{FunSpec, Matchers}
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TimeTest extends FunSpec with Matchers {
describe("WeekStats") {
it("build") {
val cal = Calendar.getInstance()
var rs = WeekStates.build(toDate(cal))
println(rs)
cal.setTime(Date.valueOf("2014-12-30"))
rs = WeekStates.build(toDate(cal))
assert(rs._2.size == 2)
println(rs)
cal.setTime(Date.valueOf("2014-01-05"))
rs = WeekStates.build(toDate(cal))
println(rs)
}
}
} | openurp/edu-core | attendance/ws/src/test/scala/org/openurp/edu/attendance/ws/domain/WeekStateTest.scala | Scala | gpl-3.0 | 1,462 |
package com.scalaAsm.x86
package Instructions
package x87
// Description: Compare Real
// Category: general/compar
trait FCOM extends InstructionDefinition {
val mnemonic = "FCOM"
}
object FCOM extends ZeroOperands[FCOM] with OneOperand[FCOM] with FCOMImpl
trait FCOMImpl extends FCOM {
implicit object _0 extends OneOp[m32] {
val opcode: OneOpcode = 0xD8 /+ 2
val format = RmFormat
override def hasImplicitOperand = true
}
implicit object _1 extends NoOp{
val opcode: OneOpcode = 0xD8 /+ 2
override def hasImplicitOperand = true
}
implicit object _2 extends OneOp[m64] {
val opcode: OneOpcode = 0xDC /+ 2
val format = RmFormat
override def hasImplicitOperand = true
}
}
| bdwashbu/scala-x86-inst | src/main/scala/com/scalaAsm/x86/Instructions/x87/FCOM.scala | Scala | apache-2.0 | 727 |
/*
* Copyright 2015 RONDHUIT Co.,LTD.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.nlp4l.repl
import java.io._
import java.nio.file.{Path, Files, FileSystems}
import org.apache.http.client.methods.HttpGet
import org.apache.http.impl.client.HttpClients
import org.apache.http.util.EntityUtils
/**
* Utility object for handling corpus
*/
object Corpora {
val CORPORA_ROOT = "corpora"
// corpora identifiers
val LDCC = "ldcc"
val BROWN = "brown"
val REUTERS = "reuters"
// corpora locations and download directories
val corpora = Map(
LDCC -> ("http://www.rondhuit.com/download/", "ldcc-20140209.tar.gz", FileSystems.getDefault.getPath(CORPORA_ROOT, LDCC)),
BROWN -> ("https://ia600503.us.archive.org/21/items/BrownCorpus/", "brown.zip", FileSystems.getDefault.getPath(CORPORA_ROOT, BROWN)),
REUTERS -> ("http://www.daviddlewis.com/resources/testcollections/reuters21578/", "reuters21578.tar.gz", FileSystems.getDefault.getPath(CORPORA_ROOT, REUTERS))
)
def downloadAndExtract(url: String, file: String, path: Path): Unit = {
def execSysCmd(cmd: String): Unit = {
println("Try to execute system command: %s".format(cmd))
val p = Runtime.getRuntime.exec(cmd)
if (p.waitFor() != 0) println("Execute failed.")
else println("Success.")
}
// path to save archive
val target = FileSystems.getDefault.getPath(path.toAbsolutePath.toString, file)
if (!Files.exists(path)) {
Files.createDirectories(path)
} else if (!Files.isDirectory(path)) {
println("[ERROR] File " + path.toAbsolutePath.toString + " already exists, but not a directory.")
System.exit(1)
} else {
Files.deleteIfExists(target)
}
// download archive file from corpus's location
val client = HttpClients.createDefault()
val httpGet = new HttpGet(url + file)
val response = client.execute(httpGet)
try {
val entity = response.getEntity
Files.copy(entity.getContent, FileSystems.getDefault.getPath(path.toAbsolutePath.toString, file))
EntityUtils.consume(entity)
println("Successfully downloaded " + file)
// extract archive
val archive = new File(target.toAbsolutePath.toString)
if (file.endsWith(".tgz") || file.endsWith(".tar.gz"))
execSysCmd("tar xzf %s -C %s".format(archive, path.toAbsolutePath.toString))
else if (file.endsWith(".zip"))
execSysCmd("unzip -o %s -d %s".format(archive, path.toAbsolutePath.toString))
} finally {
response.close()
}
}
/**
* download and extract Livedoor news corpus
* http://www.rondhuit.com/download/ldcc-20140209.tar.gz
*/
def downloadLdcc() = {
val corpus = corpora(LDCC)
downloadAndExtract(corpus._1, corpus._2, corpus._3)
}
/**
* download and extract Brown corpus
* https://ia600503.us.archive.org/21/items/BrownCorpus/brown.zip
*/
def downloadBrown() = {
val corpus = corpora(BROWN)
downloadAndExtract(corpus._1, corpus._2, corpus._3)
}
/**
* download and extract Reuters corpus
* http://www.daviddlewis.com/resources/testcollections/reuters21578/reuters21578.tar.gz
*/
def downloadReuters() = {
val corpus = corpora(REUTERS)
downloadAndExtract(corpus._1, corpus._2, corpus._3)
}
}
| gazimahmud/nlp4l | src/main/scala/org/nlp4l/repl/Corpora.scala | Scala | apache-2.0 | 3,804 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.