code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.greencatsoft.d3.selection
import scala.scalajs.js
import scala.scalajs.js.UndefOr
import org.scalajs.dom.Node
import org.scalajs.dom.html.Element
trait ContentEditor[A <: Node, B <: Selection[A, B]] extends js.Object {
def text(): UndefOr[String] = js.native
def text(content: String): B = js.native
def text[B](provider: ElementIterator[A, B]): B = js.native
}
trait HtmlContentEditor[A <: Element, B <: Selection[A, B]] extends ContentEditor[A, B] {
def html(): UndefOr[String] = js.native
def html(content: String): B = js.native
def html[A](provider: ElementIterator[Element, A]): B = js.native
} | dsugden/scalajs-d3 | src/main/scala/com/greencatsoft/d3/selection/ContentEditor.scala | Scala | apache-2.0 | 634 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.postgresql.encoders
import com.github.mauricio.async.db.postgresql.messages.backend.ServerMessage
import com.github.mauricio.async.db.util.{Log, ByteBufferUtils}
import com.github.mauricio.async.db.column.ColumnEncoderRegistry
import java.nio.charset.Charset
import io.netty.buffer.{Unpooled, ByteBuf}
import scala.collection.mutable.ArrayBuffer
object PreparedStatementEncoderHelper {
final val log = Log.get[PreparedStatementEncoderHelper]
}
trait PreparedStatementEncoderHelper {
import PreparedStatementEncoderHelper.log
def writeExecutePortal(
statementIdBytes: Array[Byte],
query : String,
values: Seq[Any],
encoder: ColumnEncoderRegistry,
charset: Charset,
writeDescribe: Boolean = false
): ByteBuf = {
val bindBuffer = Unpooled.buffer(1024)
bindBuffer.writeByte(ServerMessage.Bind)
bindBuffer.writeInt(0)
bindBuffer.writeBytes(statementIdBytes)
bindBuffer.writeByte(0)
bindBuffer.writeBytes(statementIdBytes)
bindBuffer.writeByte(0)
bindBuffer.writeShort(0)
bindBuffer.writeShort(values.length)
val decodedValues = if ( log.isDebugEnabled ) {
new ArrayBuffer[String](values.size)
} else {
null
}
for (value <- values) {
if (value == null || value == None) {
bindBuffer.writeInt(-1)
if (log.isDebugEnabled) {
decodedValues += null
}
} else {
val encodedValue = encoder.encode(value)
if ( log.isDebugEnabled ) {
decodedValues += encodedValue
}
val content = encodedValue.getBytes(charset)
bindBuffer.writeInt(content.length)
bindBuffer.writeBytes( content )
}
}
if (log.isDebugEnabled) {
log.debug(s"Executing query - statement id (${statementIdBytes.mkString("-")}) - statement ($query) - encoded values (${decodedValues.mkString(", ")}) - original values (${values.mkString(", ")})")
}
bindBuffer.writeShort(0)
ByteBufferUtils.writeLength(bindBuffer)
if ( writeDescribe ) {
val describeLength = 1 + 4 + 1 + statementIdBytes.length + 1
val describeBuffer = bindBuffer
describeBuffer.writeByte(ServerMessage.Describe)
describeBuffer.writeInt(describeLength - 1)
describeBuffer.writeByte('P')
describeBuffer.writeBytes(statementIdBytes)
describeBuffer.writeByte(0)
}
val executeLength = 1 + 4 + statementIdBytes.length + 1 + 4
val executeBuffer = Unpooled.buffer(executeLength)
executeBuffer.writeByte(ServerMessage.Execute)
executeBuffer.writeInt(executeLength - 1)
executeBuffer.writeBytes(statementIdBytes)
executeBuffer.writeByte(0)
executeBuffer.writeInt(0)
val closeLength = 1 + 4 + 1 + statementIdBytes.length + 1
val closeBuffer = Unpooled.buffer(closeLength)
closeBuffer.writeByte(ServerMessage.CloseStatementOrPortal)
closeBuffer.writeInt(closeLength - 1)
closeBuffer.writeByte('P')
closeBuffer.writeBytes(statementIdBytes)
closeBuffer.writeByte(0)
val syncBuffer = Unpooled.buffer(5)
syncBuffer.writeByte(ServerMessage.Sync)
syncBuffer.writeInt(4)
Unpooled.wrappedBuffer(bindBuffer, executeBuffer, syncBuffer, closeBuffer)
}
}
| ilangostl/postgresql-async | postgresql-async/src/main/scala/com/github/mauricio/async/db/postgresql/encoders/PreparedStatementEncoderHelper.scala | Scala | apache-2.0 | 4,055 |
package xitrum.handler
import io.netty.channel.{ChannelHandler, ChannelInitializer, ChannelPipeline}
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.socket.SocketChannel
import io.netty.handler.codec.http.{HttpRequestDecoder, HttpResponseEncoder}
import io.netty.handler.stream.ChunkedWriteHandler
import xitrum.Config
import xitrum.handler.inbound._
import xitrum.handler.outbound._
/**
* Sharable handlers are put here so that they can be easily picked up by apps
* that want to use custom pipeline. Those apps may only want a subset of
* default handlers.
*
* When an app uses its own dispatcher (not Xitrum's routing/dispatcher) and
* only needs Xitrum's fast static file serving, it may use only these handlers:
*
* Inbound:
* HttpRequestDecoder
* Request2Env
* PublicFileServer
* Its own dispatcher
*
* Outbound:
* HttpResponseEncoder
* ChunkedWriteHandler
* XSendFile
*/
object DefaultHttpChannelInitializer {
// Sharable inbound handlers
lazy val baseUrlRemover = new BaseUrlRemover
lazy val basicAuth = new BasicAuth
lazy val publicFileServer = new PublicFileServer
lazy val webJarsServer = new WebJarsServer
lazy val uriParser = new UriParser
lazy val methodOverrider = new MethodOverrider
lazy val dispatcher = new Dispatcher
lazy val badClientSilencer = new BadClientSilencer
// Sharable outbound handlers
lazy val setCORS = new SetCORS
lazy val OPTIONSResponse = new OPTIONSResponse
lazy val fixiOS6SafariPOST = new FixiOS6SafariPOST
lazy val xSendFile = new XSendFile
lazy val xSendResource = new XSendResource
lazy val env2Response = new Env2Response
def removeUnusedHandlersForWebSocket(pipeline: ChannelPipeline) {
// WebSocket handshaker in Netty dynamically changes the pipeline like this:
// pipeline.remove(classOf[HttpChunkAggregator])
// pipeline.replace(classOf[HttpRequestDecoder], "wsdecoder", new WebSocket08FrameDecoder(true, this.allowExtensions))
// pipeline.replace(classOf[HttpResponseEncoder], "wsencoder", new WebSocket08FrameEncoder(false))
// Inbound
removeHandlerIfExists(pipeline, classOf[Request2Env])
removeHandlerIfExists(pipeline, classOf[BaseUrlRemover])
if (Config.xitrum.basicAuth.isDefined)
removeHandlerIfExists(pipeline, classOf[BasicAuth])
removeHandlerIfExists(pipeline, classOf[PublicFileServer])
removeHandlerIfExists(pipeline, classOf[WebJarsServer])
removeHandlerIfExists(pipeline, classOf[UriParser])
removeHandlerIfExists(pipeline, classOf[MethodOverrider])
removeHandlerIfExists(pipeline, classOf[Dispatcher])
// Do not remove BadClientSilencer; WebSocketEventDispatcher will be added
// before BadClientSilencer, see WebSocketAction#acceptWebSocket
//removeHandlerIfExists(pipeline, classOf[BadClientSilencer])
// Outbound
removeHandlerIfExists(pipeline, classOf[ChunkedWriteHandler])
removeHandlerIfExists(pipeline, classOf[Env2Response])
removeHandlerIfExists(pipeline, classOf[SetCORS])
removeHandlerIfExists(pipeline, classOf[OPTIONSResponse])
removeHandlerIfExists(pipeline, classOf[FixiOS6SafariPOST])
removeHandlerIfExists(pipeline, classOf[XSendFile])
removeHandlerIfExists(pipeline, classOf[XSendResource])
}
/**
* ChannelPipeline#remove(handler) throws exception if the handler does not
* exist in the pipeline.
*/
def removeHandlerIfExists(pipeline: ChannelPipeline, klass: Class[_ <: ChannelHandler]) {
val handler = pipeline.get(klass)
if (handler != null) pipeline.remove(handler)
}
}
@Sharable
class DefaultHttpChannelInitializer extends ChannelInitializer[SocketChannel] {
import DefaultHttpChannelInitializer._
/**
* You can override this method to customize the default pipeline.
*
* Inbound direction: first handler -> last handler
* Outbound direction: last handler -> first handler
*/
override def initChannel(ch: SocketChannel) {
// This method is run for every request, thus should be fast
val p = ch.pipeline
val portConfig = Config.xitrum.port
// Inbound
if (portConfig.flashSocketPolicy.isDefined && portConfig.flashSocketPolicy == portConfig.http)
p.addLast(classOf[FlashSocketPolicyHandler].getName, new FlashSocketPolicyHandler)
p.addLast(classOf[HttpRequestDecoder].getName, new HttpRequestDecoder(
Config.xitrum.request.maxInitialLineLength,
Config.xitrum.request.maxHeaderSize,
8192))
p.addLast(classOf[Request2Env].getName, new Request2Env)
p.addLast(classOf[BaseUrlRemover].getName, baseUrlRemover)
if (Config.xitrum.basicAuth.isDefined)
p.addLast(classOf[BasicAuth].getName, basicAuth)
p.addLast(classOf[PublicFileServer].getName, publicFileServer)
p.addLast(classOf[WebJarsServer].getName, webJarsServer)
p.addLast(classOf[UriParser].getName, uriParser)
p.addLast(classOf[MethodOverrider].getName, methodOverrider)
p.addLast(classOf[Dispatcher].getName, dispatcher)
p.addLast(classOf[BadClientSilencer].getName, badClientSilencer)
// Outbound
p.addLast(classOf[HttpResponseEncoder].getName, new HttpResponseEncoder)
p.addLast(classOf[ChunkedWriteHandler].getName, new ChunkedWriteHandler) // For writing ChunkedFile, at XSendFile
p.addLast(classOf[Env2Response].getName, env2Response)
p.addLast(classOf[SetCORS].getName, setCORS)
p.addLast(classOf[OPTIONSResponse].getName, OPTIONSResponse)
p.addLast(classOf[FixiOS6SafariPOST].getName, fixiOS6SafariPOST)
p.addLast(classOf[XSendFile].getName, xSendFile)
p.addLast(classOf[XSendResource].getName, xSendResource)
}
}
| caiiiycuk/xitrum | src/main/scala/xitrum/handler/DefaultHttpChannelInitializer.scala | Scala | mit | 6,025 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.nio._
import kafka.utils._
import kafka.api.ApiUtils._
import kafka.cluster.Broker
import kafka.controller.LeaderIsrAndControllerEpoch
import kafka.network.{BoundedByteBufferSend, RequestChannel}
import kafka.common.ErrorMapping
import kafka.network.RequestChannel.Response
object LeaderAndIsr {
val initialLeaderEpoch: Int = 0
val initialZKVersion: Int = 0
}
case class LeaderAndIsr(var leader: Int, var leaderEpoch: Int, var isr: List[Int], var zkVersion: Int) {
def this(leader: Int, isr: List[Int]) = this(leader, LeaderAndIsr.initialLeaderEpoch, isr, LeaderAndIsr.initialZKVersion)
override def toString(): String = {
val jsonDataMap = new collection.mutable.HashMap[String, String]
jsonDataMap.put("leader", leader.toString)
jsonDataMap.put("leaderEpoch", leaderEpoch.toString)
jsonDataMap.put("ISR", isr.mkString(","))
Utils.mapToJson(jsonDataMap, valueInQuotes = true)
}
}
object PartitionStateInfo {
def readFrom(buffer: ByteBuffer): PartitionStateInfo = {
val controllerEpoch = buffer.getInt
val leader = buffer.getInt
val leaderEpoch = buffer.getInt
val isrSize = buffer.getInt
val isr = for(i <- 0 until isrSize) yield buffer.getInt
val zkVersion = buffer.getInt
val replicationFactor = buffer.getInt
val replicas = for(i <- 0 until replicationFactor) yield buffer.getInt
PartitionStateInfo(LeaderIsrAndControllerEpoch(LeaderAndIsr(leader, leaderEpoch, isr.toList, zkVersion), controllerEpoch),
replicas.toSet)
}
}
case class PartitionStateInfo(val leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch,
val allReplicas: Set[Int]) {
def replicationFactor = allReplicas.size
def writeTo(buffer: ByteBuffer) {
buffer.putInt(leaderIsrAndControllerEpoch.controllerEpoch)
buffer.putInt(leaderIsrAndControllerEpoch.leaderAndIsr.leader)
buffer.putInt(leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch)
buffer.putInt(leaderIsrAndControllerEpoch.leaderAndIsr.isr.size)
leaderIsrAndControllerEpoch.leaderAndIsr.isr.foreach(buffer.putInt(_))
buffer.putInt(leaderIsrAndControllerEpoch.leaderAndIsr.zkVersion)
buffer.putInt(replicationFactor)
allReplicas.foreach(buffer.putInt(_))
}
def sizeInBytes(): Int = {
val size =
4 /* epoch of the controller that elected the leader */ +
4 /* leader broker id */ +
4 /* leader epoch */ +
4 /* number of replicas in isr */ +
4 * leaderIsrAndControllerEpoch.leaderAndIsr.isr.size /* replicas in isr */ +
4 /* zk version */ +
4 /* replication factor */ +
allReplicas.size * 4
size
}
override def toString(): String = {
val partitionStateInfo = new StringBuilder
partitionStateInfo.append("(LeaderAndIsrInfo:" + leaderIsrAndControllerEpoch.toString)
partitionStateInfo.append(",ReplicationFactor:" + replicationFactor + ")")
partitionStateInfo.append(",AllReplicas:" + allReplicas.mkString(",") + ")")
partitionStateInfo.toString()
}
}
object LeaderAndIsrRequest {
val CurrentVersion = 0.shortValue
val IsInit: Boolean = true
val NotInit: Boolean = false
val DefaultAckTimeout: Int = 1000
def readFrom(buffer: ByteBuffer): LeaderAndIsrRequest = {
val versionId = buffer.getShort
val correlationId = buffer.getInt
val clientId = readShortString(buffer)
val controllerId = buffer.getInt
val controllerEpoch = buffer.getInt
val partitionStateInfosCount = buffer.getInt
val partitionStateInfos = new collection.mutable.HashMap[(String, Int), PartitionStateInfo]
for(i <- 0 until partitionStateInfosCount){
val topic = readShortString(buffer)
val partition = buffer.getInt
val partitionStateInfo = PartitionStateInfo.readFrom(buffer)
partitionStateInfos.put((topic, partition), partitionStateInfo)
}
val leadersCount = buffer.getInt
var leaders = Set[Broker]()
for (i <- 0 until leadersCount)
leaders += Broker.readFrom(buffer)
new LeaderAndIsrRequest(versionId, correlationId, clientId, controllerId, controllerEpoch, partitionStateInfos.toMap, leaders)
}
}
case class LeaderAndIsrRequest (versionId: Short,
override val correlationId: Int,
clientId: String,
controllerId: Int,
controllerEpoch: Int,
partitionStateInfos: Map[(String, Int), PartitionStateInfo],
aliveLeaders: Set[Broker])
extends RequestOrResponse(Some(RequestKeys.LeaderAndIsrKey), correlationId) {
def this(partitionStateInfos: Map[(String, Int), PartitionStateInfo], aliveLeaders: Set[Broker], controllerId: Int,
controllerEpoch: Int, correlationId: Int, clientId: String) = {
this(LeaderAndIsrRequest.CurrentVersion, correlationId, clientId,
controllerId, controllerEpoch, partitionStateInfos, aliveLeaders)
}
def writeTo(buffer: ByteBuffer) {
buffer.putShort(versionId)
buffer.putInt(correlationId)
writeShortString(buffer, clientId)
buffer.putInt(controllerId)
buffer.putInt(controllerEpoch)
buffer.putInt(partitionStateInfos.size)
for((key, value) <- partitionStateInfos){
writeShortString(buffer, key._1)
buffer.putInt(key._2)
value.writeTo(buffer)
}
buffer.putInt(aliveLeaders.size)
aliveLeaders.foreach(_.writeTo(buffer))
}
def sizeInBytes(): Int = {
var size =
2 /* version id */ +
4 /* correlation id */ +
(2 + clientId.length) /* client id */ +
4 /* controller id */ +
4 /* controller epoch */ +
4 /* number of partitions */
for((key, value) <- partitionStateInfos)
size += (2 + key._1.length) /* topic */ + 4 /* partition */ + value.sizeInBytes /* partition state info */
size += 4 /* number of leader brokers */
for(broker <- aliveLeaders)
size += broker.sizeInBytes /* broker info */
size
}
override def toString(): String = {
val leaderAndIsrRequest = new StringBuilder
leaderAndIsrRequest.append("Name:" + this.getClass.getSimpleName)
leaderAndIsrRequest.append(";Version:" + versionId)
leaderAndIsrRequest.append(";Controller:" + controllerId)
leaderAndIsrRequest.append(";ControllerEpoch:" + controllerEpoch)
leaderAndIsrRequest.append(";CorrelationId:" + correlationId)
leaderAndIsrRequest.append(";ClientId:" + clientId)
leaderAndIsrRequest.append(";PartitionState:" + partitionStateInfos.mkString(","))
leaderAndIsrRequest.append(";Leaders:" + aliveLeaders.mkString(","))
leaderAndIsrRequest.toString()
}
override def handleError(e: Throwable, requestChannel: RequestChannel, request: RequestChannel.Request): Unit = {
val responseMap = partitionStateInfos.map {
case (topicAndPartition, partitionAndState) => (topicAndPartition, ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]))
}
val errorResponse = LeaderAndIsrResponse(correlationId, responseMap)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(errorResponse)))
}
} | kavink92/kafka-0.8.0-beta1-src | core/src/main/scala/kafka/api/LeaderAndIsrRequest.scala | Scala | apache-2.0 | 8,017 |
package breeze.stats.random
import breeze.stats.distributions.Rand
import breeze.linalg.DenseVector
/**
* Generates a quasi-random sequence of dim-dimensional vectors
* @author dlwh
*/
class HaltonSequence(dim: Int) extends Rand[DenseVector[Double]] {
require(dim > 0, "dim must be positive!")
private var count = 0
val primes = Array.iterate(2L,dim) { last => new java.math.BigInteger(last.toString).nextProbablePrime().longValue() }
def draw() = {
count += 1
val arr = primes.map { prime =>
var h = 0.0
var f = 1.
var k : Long = count
while(k > 0) {
f /= prime
h += (k % prime) * f
k /= prime
}
h % 1.0
}
new DenseVector[Double](arr)
}
} | tjhunter/scalanlp-core | learn/src/main/scala/breeze/stats/random/HaltonSequence.scala | Scala | apache-2.0 | 732 |
package per.harenp.Hedgehog
import java.io.File
import java.time._
object Ute
{
val nyTimeZoneId = ZoneId.of("America/New_York")
val nyTimeZoneRules = nyTimeZoneId.getRules
def epochDay(epochSecs : Long) : Long = epochSecs/86400 // 86400 seconds per day
}
| lasttruffulaseed/quant-spark | scala/per/harenp/Hedgehog/Ute.scala | Scala | apache-2.0 | 271 |
package sparklyr
import java.io.{DataInputStream, DataOutputStream}
import java.io.{File, FileOutputStream, IOException, FileWriter}
import java.net.{InetAddress, InetSocketAddress, NetworkInterface, ServerSocket, Socket}
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.TimeUnit
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import scala.util.Try
/*
* The Backend class is launched from Spark through spark-submit with the following
* paramters: port, session and service.
*
* port: Defined the port the gateway should listen to.
* sessionid: An identifier to track each session and reuse sessions if needed.
* service: A flag to keep this service running until the client forces it to
* shut down by calling "terminateBackend" through the invoke interface.
* remote: A flag to enable the gateway and backend to accept remote connections.
*
* On launch, the Backend will open the gateway socket on the port specified by
* the shell parameter on launch.
*
* If the port is already in use, the Backend will attempt to use the existing
* service running in this port as a sparklyr gateway and register itself. Therefore,
* the gateway socket serves not only as an interface to connect to the current
* instance, but also as bridge to other sparklyr backend instances running in this
* machine. This mechanism is the replacement of the ports file which used to
* communicate ports information back to the sparklyr client, in this model, one
* and only one gateway runs and provides the mapping between sessionids and ports.
*
* While running, the Backend loops under a while(true) loop and blocks under the
* gateway socket accept() method waiting for clients to connect. Once a client
* connects, it launches a thread to process the client requests and blocks again.
*
* In the gateway socket, the thread listens for commands: GetPorts,
* RegisterInstance or UnregisterInstance.
*
* GetPorts provides a mapping to the gateway/backend ports. In a single-client/
* single-backend scenario, the sessionid from the current instance and the
* requested instance will match, a backend gets created and the backend port
* communicated back to the client. In a multiple-backend scenario, GetPort
* will look at the sessionid mapping table and return a redirect port if needed,
* this enables the system to run multiple backends all using the same gateway
* port but still support redirection to the correct sessionid backend. Finally,
* if the sessionis is not found, a delay is introduced in case an existing
* backend is launching an about to register.
*
* RegiterInstance provides a way to map sessionids to ports to other instances
* of sparklyr running in this machines. During launch, if the gateway port is
* already in use, the instance being launched will use this api to communicate
* to the main gateway the port in which this instance will listen to.
*/
class Backend() {
private[this] var isService: Boolean = false
private[this] var isRemote: Boolean = false
private[this] var isWorker: Boolean = false
private[this] var isBatch: Boolean = false
private[this] var args: Array[String] = Array[String]()
private[this] var hostContext: String = null
private[this] val isRunning: AtomicBoolean = new AtomicBoolean(true)
private[this] var isRegistered: Boolean = false
private[this] var gatewayPort: Int = 0
private[this] var gatewayServerSocket: ServerSocket = null
private[this] var port: Int = 0
private[this] var sessionId: Int = 0
private[this] var connectionTimeout: Int = 60
private[this] var batchFile: String = ""
private[this] var sc: SparkContext = null
private[this] var sessionsMap: Map[Int, Int] = Map()
private[this] var inetAddress: InetAddress = InetAddress.getLoopbackAddress()
private[this] var logger: Logger = new Logger("Session", 0);
private[this] var oneConnection: AtomicBoolean = new AtomicBoolean(false);
private[this] var defaultTracker: Option[JVMObjectTracker] = None
def setTracker(tracker: JVMObjectTracker): Unit = {
defaultTracker = Option(tracker)
}
object GatewayOperations extends Enumeration {
val GetPorts, RegisterInstance, UnregisterInstance = Value
}
def getSparkContext(): SparkContext = {
sc
}
def getPort(): Int = {
port
}
def setSparkContext(nsc: SparkContext): Unit = {
sc = nsc
}
def setArgs(argsParam: Array[String]): Unit = {
args = argsParam
}
def setType(isServiceParam: Boolean,
isRemoteParam: Boolean,
isWorkerParam: Boolean,
isBatchParam: Boolean) = {
isService = isServiceParam
isRemote = isRemoteParam
isWorker = isWorkerParam
isBatch = isBatchParam
}
def setHostContext(hostContextParam: String) = {
hostContext = hostContextParam
}
def init(portParam: Int,
sessionIdParam: Int,
connectionTimeoutParam: Int): Unit = {
init(portParam, sessionIdParam, connectionTimeoutParam, "")
}
def init(portParam: Int,
sessionIdParam: Int,
connectionTimeoutParam: Int,
batchFilePath: String): Unit = {
port = portParam
sessionId = sessionIdParam
connectionTimeout = connectionTimeoutParam
batchFile = batchFilePath
logger = new Logger("Session", sessionId)
logger.log("is starting under " +
InetAddress.getLoopbackAddress().getHostAddress +
" port " + port)
if (isRemote) {
logger.log("is configuring for remote connections")
val anyIpAddress = Array[Byte](0, 0, 0, 0)
inetAddress = InetAddress.getByAddress(anyIpAddress)
}
try {
if (isWorker)
{
gatewayServerSocket = new ServerSocket(0, 1, inetAddress)
port = gatewayServerSocket.getLocalPort()
}
else if (Utils.portIsAvailable(port, inetAddress))
{
logger.log("found port " + port + " is available")
logger = new Logger("Gateway", sessionId)
gatewayServerSocket = new ServerSocket(port, 100, inetAddress)
}
else
{
logger.log("found port " + port + " is not available")
logger = new Logger("Backend", sessionId)
if (isWorker) logger = new Logger("Worker", sessionId)
val newPort = Utils.nextPort(port, inetAddress)
logger.log("found port " + newPort + " is available")
gatewayServerSocket = new ServerSocket(newPort, 1, inetAddress)
gatewayPort = port
port = gatewayServerSocket.getLocalPort()
val success = register(gatewayPort, sessionId, port)
if (!success) {
logger.logError("failed to register on gateway port " + gatewayPort)
if (!isService) System.exit(1)
}
isRegistered = true
}
gatewayServerSocket.setSoTimeout(0)
} catch {
case e: IOException =>
logger.logError("is shutting down from init() with exception ", e)
if (!isService) System.exit(1)
}
// Delay load workers to retrieve ports from backend
if (!isWorker) run()
if (!isService) System.exit(0)
}
def batch(): Unit = {
new Thread("starting batch rscript thread") {
override def run(): Unit = {
try {
logger.log("is starting batch rscript")
val rscript = new Rscript(logger)
val sparklyrGateway = "sparklyr://localhost:" + port.toString() + "/" + sessionId
logger.log("will be using rscript gateway: " + sparklyrGateway)
var sourceFile: File = new java.io.File("sparklyr-batch.R")
if (!sourceFile.exists) {
logger.log("tried to find source under working folder: " + (new File(".").getAbsolutePath()))
logger.log("tried to find source under working files: " + (new File(".")).listFiles.mkString(","))
sourceFile = new File(rscript.getScratchDir() + File.separator + "sparklyr-batch.R")
if (!sourceFile.exists) {
logger.log("tried to find source under scratch folder: " + rscript.getScratchDir().getAbsolutePath())
logger.log("tried to find source under scratch files: " + rscript.getScratchDir().listFiles.mkString(","))
sourceFile = new File(batchFile)
}
}
val sourceLines = scala.io.Source.fromFile(sourceFile).getLines
val modifiedFile: File = new File(rscript.getScratchDir() + File.separator + "sparklyr-batch-mod.R")
val outStream: FileWriter = new FileWriter(modifiedFile)
outStream.write("options(sparklyr.connect.master = \\"" + sparklyrGateway + "\\")")
outStream.write("\\n\\n");
for (line <- sourceLines) {
outStream.write(line + "\\n")
}
outStream.flush()
logger.log("wrote modified batch rscript: " + modifiedFile.getAbsolutePath())
val customEnv: Map[String, String] = Map()
val options: Map[String, String] = Map()
rscript.init(
args.toList,
modifiedFile.getAbsolutePath(),
customEnv,
options
)
} catch {
case e: java.lang.reflect.InvocationTargetException =>
e.getCause() match {
case cause: Exception => {
logger.logError("failed to invoke batch rscript: ", cause)
System.exit(1)
}
case _ => {
logger.logError("failed to invoke batch rscript: ", e)
System.exit(1)
}
}
case e: Exception => {
logger.logError("failed to run batch rscript: ", e)
System.exit(1)
}
}
}
}.start()
}
def run(): Unit = {
try {
if (isBatch) {
// spark context needs to be created for spark.files to be accessible
org.apache.spark.SparkContext.getOrCreate()
batch()
}
initMonitor()
while (isRunning.get) {
bind()
}
} catch {
case e: java.net.SocketException =>
logger.log("is shutting down with expected SocketException", e)
if (!isService) System.exit(1)
case e: IOException =>
logger.logError("is shutting down from run() with exception ", e)
if (!isService) System.exit(1)
}
}
def initMonitor(): Unit = {
new Thread("starting init monitor thread") {
override def run(): Unit = {
Thread.sleep(connectionTimeout * 1000)
if (!oneConnection.get && !isService) {
val hostAddress: String = try {
" to " + InetAddress.getLocalHost.getHostAddress.toString + "/" + getPort()
} catch {
case e: java.net.UnknownHostException => "unknown host"
}
logger.log(
"is terminating backend since no client has connected after " +
connectionTimeout +
" seconds" +
hostAddress +
"."
)
System.exit(1)
}
}
}.start()
}
def bind(): Unit = {
logger.log("is waiting for sparklyr client to connect to port " + port)
try {
val interface = NetworkInterface.getByInetAddress(gatewayServerSocket.getInetAddress)
if (!interface.isUp) {
logger.logError("Network interface of gateway server socket (" +
interface.getName + ") is not up.\\n\\n'ifconfig " + interface.getName + " up' (or " +
"'netsh interface set interface name=" + interface.getName + " admin=enabled' or " +
"similar in Windows) must be run to bring up this network interface."
)
System.exit(1)
}
} catch {
// log exception as warning only
case e: Throwable =>
logger.logWarning("Failed to get network interface of gateway server socket" + e.getMessage())
}
val gatewaySocket = gatewayServerSocket.accept()
oneConnection.set(true)
logger.log("accepted connection")
val buf = new Array[Byte](1024)
// wait for the end of stdin, then exit
new Thread("wait for monitor to close") {
setDaemon(true)
override def run(): Unit = {
try {
val dis = new DataInputStream(gatewaySocket.getInputStream())
val commandId = dis.readInt()
logger.log("received command " + commandId)
GatewayOperations(commandId) match {
case GatewayOperations.GetPorts => {
val requestedSessionId = dis.readInt()
val startupTimeout = dis.readInt()
val dos = new DataOutputStream(gatewaySocket.getOutputStream())
if (requestedSessionId == sessionId)
{
logger.log("found requested session matches current session")
logger.log("is creating backend and allocating system resources")
val tracker = if (defaultTracker.isDefined) defaultTracker.get else new JVMObjectTracker();
val serializer = new Serializer(tracker);
val backendChannel = new BackendChannel(logger, terminate, serializer, tracker)
backendChannel.setHostContext(hostContext)
val backendPort: Int = backendChannel.init(isRemote, port, !isWorker)
logger.log("created the backend")
try {
// wait for the end of stdin, then exit
new Thread("run backend") {
setDaemon(true)
override def run(): Unit = {
try {
dos.writeInt(sessionId)
dos.writeInt(gatewaySocket.getLocalPort())
dos.writeInt(backendPort)
backendChannel.run()
}
catch {
case e: IOException =>
logger.logError("failed with exception ", e)
if (!isService) System.exit(1)
terminate()
}
}
}.start()
logger.log("is waiting for r process to end")
// wait for the end of socket, closed if R process die
gatewaySocket.getInputStream().read(buf)
}
finally {
backendChannel.close()
if (!isService) {
logger.log("is terminating backend")
gatewayServerSocket.close()
System.exit(0)
}
// workers should always terminate but without exceptions
if (isWorker) {
logger.log("is terminating backend")
isRunning.set(false)
gatewayServerSocket.close()
}
}
}
else
{
logger.log("is searching for session " + requestedSessionId)
var portForSession = sessionsMap.get(requestedSessionId)
var sessionMapRetries: Int = startupTimeout * 10
while (!portForSession.isDefined && sessionMapRetries > 0)
{
portForSession = sessionsMap.get(requestedSessionId)
Thread.sleep(100)
sessionMapRetries = sessionMapRetries - 1
}
if (portForSession.isDefined)
{
logger.log("found mapping for session " + requestedSessionId)
dos.writeInt(requestedSessionId)
dos.writeInt(portForSession.get)
dos.writeInt(0)
}
else
{
logger.log("found no mapping for session " + requestedSessionId)
dos.writeInt(requestedSessionId)
dos.writeInt(0)
dos.writeInt(0)
}
}
dos.close()
}
case GatewayOperations.RegisterInstance => {
val registerSessionId = dis.readInt()
val registerGatewayPort = dis.readInt()
logger.log("received session " + registerSessionId + " registration request")
sessionsMap += (registerSessionId -> registerGatewayPort)
val dos = new DataOutputStream(gatewaySocket.getOutputStream())
dos.writeInt(0)
dos.flush()
dos.close()
}
case GatewayOperations.UnregisterInstance => {
val unregisterSessionId = dis.readInt()
logger.log("received session " + unregisterSessionId + " unregistration request")
if (sessionsMap.contains(unregisterSessionId)) {
logger.log("found session " + unregisterSessionId + " during unregistration request")
sessionsMap -= unregisterSessionId
}
val dos = new DataOutputStream(gatewaySocket.getOutputStream())
dos.writeInt(0)
dos.flush()
dos.close()
}
}
gatewaySocket.close()
} catch {
case e: IOException =>
logger.logError("failed with exception ", e)
if (!isService) System.exit(1)
}
}
}.start()
}
def register(gatewayPort: Int, sessionId: Int, port: Int): Boolean = {
logger.log("is registering session in gateway")
val s = new Socket(InetAddress.getLoopbackAddress(), gatewayPort)
val dos = new DataOutputStream(s.getOutputStream())
dos.writeInt(GatewayOperations.RegisterInstance.id)
dos.writeInt(sessionId)
dos.writeInt(port)
logger.log("is waiting for registration in gateway")
val dis = new DataInputStream(s.getInputStream())
val status = dis.readInt()
logger.log("finished registration in gateway with status " + status)
s.close()
status == 0
}
def terminate() = {
if (isRegistered && !isWorker) {
val success = unregister(gatewayPort, sessionId)
if (!success) {
logger.logError("failed to unregister on gateway port " + gatewayPort)
if (!isService) System.exit(1)
}
}
if (!isService || isWorker) {
isRunning.set(false)
gatewayServerSocket.close()
}
}
def unregister(gatewayPort: Int, sessionId: Int): Boolean = {
try {
logger.log("is unregistering session in gateway")
val s = new Socket(InetAddress.getLoopbackAddress(), gatewayPort)
val dos = new DataOutputStream(s.getOutputStream())
dos.writeInt(GatewayOperations.UnregisterInstance.id)
dos.writeInt(sessionId)
logger.log("is waiting for unregistration in gateway")
val dis = new DataInputStream(s.getInputStream())
val status = dis.readInt()
logger.log("finished unregistration in gateway with status " + status)
s.close()
status == 0
} catch {
case e: Exception =>
logger.log("failed to unregister from gateway: " + e.toString)
false
}
}
}
object Backend {
/* Leaving this entry for backward compatibility with databricks */
def main(args: Array[String]): Unit = {
Shell.main(args)
}
}
| rstudio/sparklyr | java/spark-1.5.2/backend.scala | Scala | apache-2.0 | 19,331 |
package com.github.dunmatt.tujr
import alice.tuprolog.{Prolog, Theory}
import de.jreality.geometry.Primitives
import de.jreality.plugin.JRViewer
object Demo {
def main (args: Array[String]): Unit = {
val engine = new Prolog
engine.setTheory(new Theory("man(socrates). mortal(X) :- man(X). "))
val viewer = new JRViewer
viewer.setContent(Primitives.icosahedron())
viewer.addBasicUI
viewer.registerPlugin(new PrologConsole(engine))
viewer.startup
}
}
| dunmatt/tujr | src/com/github/dunmatt/tujr/Demo.scala | Scala | bsd-2-clause | 485 |
package com.twitter.finagle.util
import com.twitter.io.Buf
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.prop.GeneratorDrivenPropertyChecks
@RunWith(classOf[JUnitRunner])
class BufWriterTest extends FunSuite with GeneratorDrivenPropertyChecks {
import BufWriter.OverflowException
test("writeByte") (forAll { byte: Byte =>
val bw = BufWriter.fixed(1)
val buf = bw.writeByte(byte).owned()
intercept[OverflowException] { bw.writeByte(byte) }
assert(buf == Buf.ByteArray.Owned(Array(byte)))
})
test("writeShortBE") (forAll { short: Short =>
val bw = BufWriter.fixed(2)
val buf = bw.writeShortBE(short).owned()
intercept[OverflowException] { bw.writeByte(0xff) }
val arr = Array[Byte](
((short >> 8) & 0xff).toByte,
(short & 0xff).toByte
)
assert(buf == Buf.ByteArray.Owned(arr))
})
test("writeIntBE") (forAll { int: Int =>
val bw = BufWriter.fixed(4)
val buf = bw.writeIntBE(int).owned()
intercept[OverflowException] { bw.writeByte(0xff) }
val arr = Array[Byte](
((int >> 24) & 0xff).toByte,
((int >> 16) & 0xff).toByte,
((int >> 8) & 0xff).toByte,
(int & 0xff).toByte
)
assert(buf == Buf.ByteArray.Owned(arr))
})
test("writeLongBE") (forAll { long: Long =>
val bw = BufWriter.fixed(8)
val buf = bw.writeLongBE(long).owned()
intercept[OverflowException] { bw.writeByte(0xff) }
val arr = Array[Byte](
((long >> 56) & 0xff).toByte,
((long >> 48) & 0xff).toByte,
((long >> 40) & 0xff).toByte,
((long >> 32) & 0xff).toByte,
((long >> 24) & 0xff).toByte,
((long >> 16) & 0xff).toByte,
((long >> 8) & 0xff).toByte,
(long & 0xff).toByte
)
assert(buf == Buf.ByteArray.Owned(arr))
})
test("writeBytes") (forAll { bytes: Array[Byte] =>
val bw = BufWriter.fixed(bytes.length)
val buf = bw.writeBytes(bytes).owned()
intercept[OverflowException] { bw.writeByte(0xff) }
assert(buf == Buf.ByteArray.Owned(bytes))
})
} | lukiano/finagle | finagle-core/src/test/scala/com/twitter/finagle/util/BufWriterTest.scala | Scala | apache-2.0 | 2,090 |
package scala.lms
package common
import java.io.PrintWriter
import scala.lms.util.OverloadHack
import scala.reflect.SourceContext
trait OrderingOps extends Base with Variables with BooleanOps with PrimitiveOps with OverloadHack {
// workaround for infix not working with implicits in PrimitiveOps
implicit def orderingToOrderingOps[T:Ordering:Typ](n: T) = new OrderingOpsCls(unit(n))
implicit def repOrderingToOrderingOps[T:Ordering:Typ](n: Rep[T]) = new OrderingOpsCls(n)
implicit def varOrderingToOrderingOps[T:Ordering:Typ](n: Var[T]) = new OrderingOpsCls(readVar(n))
class OrderingOpsCls[T:Ordering:Typ](lhs: Rep[T]){
def < (rhs: Rep[T])(implicit pos: SourceContext) = ordering_lt(lhs, rhs)
def <= (rhs: Rep[T])(implicit pos: SourceContext) = ordering_lteq(lhs, rhs)
def > (rhs: Rep[T])(implicit pos: SourceContext) = ordering_gt(lhs, rhs)
def >= (rhs: Rep[T])(implicit pos: SourceContext) = ordering_gteq(lhs, rhs)
def equiv (rhs: Rep[T])(implicit pos: SourceContext) = ordering_equiv(lhs, rhs)
def max (rhs: Rep[T])(implicit pos: SourceContext) = ordering_max(lhs, rhs)
def min (rhs: Rep[T])(implicit pos: SourceContext) = ordering_min(lhs, rhs)
def compare (rhs: Rep[T])(implicit pos: SourceContext) = ordering_compare(lhs, rhs)
def < [B](rhs: B)(implicit c: B => Rep[T], pos: SourceContext) = ordering_lt(lhs, c(rhs))
def <= [B](rhs: B)(implicit c: B => Rep[T], pos: SourceContext) = ordering_lteq(lhs, c(rhs))
def > [B](rhs: B)(implicit c: B => Rep[T], pos: SourceContext) = ordering_gt(lhs, c(rhs))
def >= [B](rhs: B)(implicit c: B => Rep[T], pos: SourceContext) = ordering_gteq(lhs, c(rhs))
def equiv [B](rhs: B)(implicit c: B => Rep[T], pos: SourceContext) = ordering_equiv(lhs, c(rhs))
def max [B](rhs: B)(implicit c: B => Rep[T], pos: SourceContext) = ordering_max(lhs, c(rhs))
def min [B](rhs: B)(implicit c: B => Rep[T], pos: SourceContext) = ordering_min(lhs, c(rhs))
def compare [B](rhs: B)(implicit c: B => Rep[T], pos: SourceContext) = ordering_compare(lhs, c(rhs))
}
def ordering_lt [T:Ordering:Typ](lhs: Rep[T], rhs: Rep[T])(implicit pos: SourceContext): Rep[Boolean]
def ordering_lteq [T:Ordering:Typ](lhs: Rep[T], rhs: Rep[T])(implicit pos: SourceContext): Rep[Boolean]
def ordering_gt [T:Ordering:Typ](lhs: Rep[T], rhs: Rep[T])(implicit pos: SourceContext): Rep[Boolean]
def ordering_gteq [T:Ordering:Typ](lhs: Rep[T], rhs: Rep[T])(implicit pos: SourceContext): Rep[Boolean]
def ordering_equiv [T:Ordering:Typ](lhs: Rep[T], rhs: Rep[T])(implicit pos: SourceContext): Rep[Boolean]
def ordering_max [T:Ordering:Typ](lhs: Rep[T], rhs: Rep[T])(implicit pos: SourceContext): Rep[T]
def ordering_min [T:Ordering:Typ](lhs: Rep[T], rhs: Rep[T])(implicit pos: SourceContext): Rep[T]
def ordering_compare [T:Ordering:Typ](lhs: Rep[T], rhs: Rep[T])(implicit pos: SourceContext): Rep[Int]
}
trait OrderingOpsExp extends OrderingOps with VariablesExp {
abstract class DefMN[T:Ordering:Typ,A] extends Def[A] {
def mev = typ[T]
def aev = implicitly[Ordering[T]]
}
case class OrderingLT [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T]) extends DefMN[T,Boolean]
case class OrderingLTEQ [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T]) extends DefMN[T,Boolean]
case class OrderingGT [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T]) extends DefMN[T,Boolean]
case class OrderingGTEQ [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T]) extends DefMN[T,Boolean]
case class OrderingEquiv [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T]) extends DefMN[T,Boolean]
case class OrderingMax [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T]) extends DefMN[T,T]
case class OrderingMin [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T]) extends DefMN[T,T]
case class OrderingCompare [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T]) extends DefMN[T,Int]
def ordering_lt [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = OrderingLT(lhs,rhs)
def ordering_lteq [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = OrderingLTEQ(lhs,rhs)
def ordering_gt [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = OrderingGT(lhs,rhs)
def ordering_gteq [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = OrderingGTEQ(lhs,rhs)
def ordering_equiv [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = OrderingEquiv(lhs,rhs)
def ordering_max [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[T] = OrderingMax(lhs,rhs)
def ordering_min [T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[T] = OrderingMin(lhs,rhs)
def ordering_compare[T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Int] = OrderingCompare(lhs,rhs)
override def mirror[A:Typ](e: Def[A], f: Transformer)(implicit pos: SourceContext): Exp[A] = (e match {
case e@OrderingLT(a,b) => ordering_lt(f(a),f(b))(e.aev,e.mev,pos)
case e@OrderingLTEQ(a,b) => ordering_lteq(f(a),f(b))(e.aev,e.mev,pos)
case e@OrderingGT(a,b) => ordering_gt(f(a),f(b))(e.aev,e.mev,pos)
case e@OrderingGTEQ(a,b) => ordering_gteq(f(a),f(b))(e.aev,e.mev,pos)
case e@OrderingEquiv(a,b) => ordering_equiv(f(a),f(b))(e.aev,e.mev,pos)
case e@OrderingMax(a,b) => ordering_max(f(a),f(b))(e.aev.asInstanceOf[Ordering[A]],mtype(e.mev),pos)
case e@OrderingMin(a,b) => ordering_min(f(a),f(b))(e.aev.asInstanceOf[Ordering[A]],mtype(e.mev),pos)
case e@OrderingCompare(a,b) => ordering_compare(f(a),f(b))(e.aev,e.mev,pos)
case Reflect(e@OrderingLT(a,b), u, es) => reflectMirrored(Reflect(OrderingLT(f(a),f(b))(e.aev,e.mev), mapOver(f,u), f(es)))(mtyp1[A], pos)
case Reflect(e@OrderingLTEQ(a,b), u, es) => reflectMirrored(Reflect(OrderingLTEQ(f(a),f(b))(e.aev,e.mev), mapOver(f,u), f(es)))(mtyp1[A], pos)
case Reflect(e@OrderingGT(a,b), u, es) => reflectMirrored(Reflect(OrderingGT(f(a),f(b))(e.aev,e.mev), mapOver(f,u), f(es)))(mtyp1[A], pos)
case Reflect(e@OrderingGTEQ(a,b), u, es) => reflectMirrored(Reflect(OrderingGTEQ(f(a),f(b))(e.aev,e.mev), mapOver(f,u), f(es)))(mtyp1[A], pos)
case Reflect(e@OrderingEquiv(a,b), u, es) => reflectMirrored(Reflect(OrderingEquiv(f(a),f(b))(e.aev,e.mev), mapOver(f,u), f(es)))(mtyp1[A], pos)
case Reflect(e@OrderingMax(a,b), u, es) => reflectMirrored(Reflect(OrderingMax(f(a),f(b))(e.aev.asInstanceOf[Ordering[A]],mtype(e.mev)), mapOver(f,u), f(es)))(mtyp1[A], pos)
case Reflect(e@OrderingMin(a,b), u, es) => reflectMirrored(Reflect(OrderingMin(f(a),f(b))(e.aev.asInstanceOf[Ordering[A]],mtype(e.mev)), mapOver(f,u), f(es)))(mtyp1[A], pos)
case Reflect(e@OrderingCompare(a,b), u, es) => reflectMirrored(Reflect(OrderingCompare(f(a),f(b))(e.aev,e.mev), mapOver(f,u), f(es)))(mtyp1[A], pos)
case _ => super.mirror(e, f)
}).asInstanceOf[Exp[A]]
}
/**
* @author Alen Stojanov (astojanov@inf.ethz.ch)
*/
trait OrderingOpsExpOpt extends OrderingOpsExp {
override def ordering_lt[T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = (lhs, rhs) match {
case (Const(a), Const(b)) => Const(implicitly[Ordering[T]].lt(a, b))
case (a, b) if a.equals(b) => Const(false)
case _ => super.ordering_lt(lhs, rhs)
}
override def ordering_lteq[T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = (lhs, rhs) match {
case (Const(a), Const(b)) => Const(implicitly[Ordering[T]].lteq(a, b))
case (a, b) if a.equals(b) => Const(true)
case _ => super.ordering_lteq(lhs, rhs)
}
override def ordering_gt[T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = (lhs, rhs) match {
case (Const(a), Const(b)) => Const(implicitly[Ordering[T]].gt(a, b))
case (a, b) if a.equals(b) => Const(false)
case _ => super.ordering_gt(lhs, rhs)
}
override def ordering_gteq[T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = (lhs, rhs) match {
case (Const(a), Const(b)) => Const(implicitly[Ordering[T]].gteq(a, b))
case (a, b) if a.equals(b) => Const(true)
case _ => super.ordering_gteq(lhs, rhs)
}
override def ordering_equiv[T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Boolean] = (lhs, rhs) match {
case (Const(a), Const(b)) => Const(implicitly[Ordering[T]].equiv(a, b))
case (a, b) if a.equals(b) => Const(true)
case _ => super.ordering_equiv(lhs, rhs)
}
override def ordering_max[T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[T] = (lhs, rhs) match {
case (Const(a), Const(b)) => Const(implicitly[Ordering[T]].max(a, b))
case (a, b) if a.equals(b) => a
case _ => super.ordering_max(lhs, rhs)
}
override def ordering_min[T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[T] = (lhs, rhs) match {
case (Const(a), Const(b)) => Const(implicitly[Ordering[T]].min(a, b))
case (a, b) if a.equals(b) => a
case _ => super.ordering_min(lhs, rhs)
}
override def ordering_compare[T:Ordering:Typ](lhs: Exp[T], rhs: Exp[T])(implicit pos: SourceContext): Rep[Int] = (lhs, rhs) match {
case (Const(a), Const(b)) => Const(implicitly[Ordering[T]].compare(a, b))
case (a, b) if a.equals(b) => Const[Int](0)
case _ => super.ordering_compare(lhs, rhs)
}
}
trait ScalaGenOrderingOps extends ScalaGenBase {
val IR: OrderingOpsExp
import IR._
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = rhs match {
case OrderingLT(a,b) => emitValDef(sym, src"$a < $b")
case OrderingLTEQ(a,b) => emitValDef(sym, src"$a <= $b")
case OrderingGT(a,b) => emitValDef(sym, src"$a > $b")
case OrderingGTEQ(a,b) => emitValDef(sym, src"$a >= $b")
case OrderingEquiv(a,b) => emitValDef(sym, src"$a equiv $b")
// "$a max $b" is wrong for Strings because it tries to use `StringLike.max(Ordering)`
// can't compare with typ[String] without extending StringOps
case c@OrderingMax(a,b) =>
val rhs = if (c.mev.runtimeClass == classOf[String])
src"scala.math.Ordering.String.max($a, $b)"
else
src"$a max $b"
emitValDef(sym, rhs)
case c@OrderingMin(a,b) =>
val rhs = if (c.mev.runtimeClass == classOf[String])
src"scala.math.Ordering.String.min($a, $b)"
else
src"$a min $b"
emitValDef(sym, rhs)
case c@OrderingCompare(a,b) => c.mev match {
case m if m == typ[Int] => emitValDef(sym, "java.lang.Integer.compare("+quote(a)+","+quote(b)+")")
case m if m == typ[Long] => emitValDef(sym, "java.lang.Long.compare("+quote(a)+","+quote(b)+")")
case m if m == typ[Double] => emitValDef(sym, "java.lang.Double.compare("+quote(a)+","+quote(b)+")")
case m if m == typ[Float] => emitValDef(sym, "java.lang.Float.compare("+quote(a)+","+quote(b)+")")
case m if m == typ[Boolean] => emitValDef(sym, "java.lang.Boolean.compare("+quote(a)+","+quote(b)+")")
case m if m == typ[Byte] => emitValDef(sym, "java.lang.Byte.compare("+quote(a)+","+quote(b)+")")
case m if m == typ[Char] => emitValDef(sym, "java.lang.Character.compare("+quote(a)+","+quote(b)+")")
case m if m == typ[Short] => emitValDef(sym, "java.lang.Short.compare("+quote(a)+","+quote(b)+")")
case _ => emitValDef(sym, quote(a) + " compare " + quote(b))
}
case _ => super.emitNode(sym, rhs)
}
}
trait CLikeGenOrderingOps extends CLikeGenBase {
val IR: OrderingOpsExp
import IR._
// TODO: Add MIN/MAX macro needs to C-like header file
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = {
rhs match {
case OrderingLT(a,b) =>
emitValDef(sym, src"$a < $b")
case OrderingLTEQ(a,b) =>
emitValDef(sym, src"$a <= $b")
case OrderingGT(a,b) =>
emitValDef(sym, src"$a > $b")
case OrderingGTEQ(a,b) =>
emitValDef(sym, src"$a >= $b")
case OrderingEquiv(a,b) =>
emitValDef(sym, src"$a == $b")
case OrderingMax(a,b) =>
//emitValDef(sym, quote(a) + ">" + quote(b) + "?" + quote(a) + ":" + quote(b))
emitValDef(sym, src"MAX($a, $b)")
case OrderingMin(a,b) =>
//emitValDef(sym, quote(a) + "<" + quote(b) + "?" + quote(a) + ":" + quote(b))
emitValDef(sym, src"MIN($a, $b)")
case OrderingCompare(a,b) =>
emitValDef(sym, src"($a < $b) ? -1 : ($a == $b) ? 0 : 1")
case _ => super.emitNode(sym, rhs)
}
}
}
trait CudaGenOrderingOps extends CudaGenBase with CLikeGenOrderingOps
trait OpenCLGenOrderingOps extends OpenCLGenBase with CLikeGenOrderingOps
trait CGenOrderingOps extends CGenBase with CLikeGenOrderingOps
| TiarkRompf/virtualization-lms-core | src/common/OrderingOps.scala | Scala | bsd-3-clause | 13,081 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status.api.v1
import java.util.{Arrays, Date, List => JList}
import javax.ws.rs.{GET, Produces, QueryParam}
import javax.ws.rs.core.MediaType
import org.apache.spark.scheduler.{AccumulableInfo => InternalAccumulableInfo, StageInfo}
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.{StageUIData, TaskUIData}
import org.apache.spark.ui.jobs.UIData.{InputMetricsUIData => InternalInputMetrics, OutputMetricsUIData => InternalOutputMetrics, ShuffleReadMetricsUIData => InternalShuffleReadMetrics, ShuffleWriteMetricsUIData => InternalShuffleWriteMetrics, TaskMetricsUIData => InternalTaskMetrics}
import org.apache.spark.util.Distribution
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllStagesResource(ui: SparkUI) {
@GET
def stageList(@QueryParam("status") statuses: JList[StageStatus]): Seq[StageData] = {
val listener = ui.jobProgressListener
val stageAndStatus = AllStagesResource.stagesAndStatus(ui)
val adjStatuses = {
if (statuses.isEmpty()) {
Arrays.asList(StageStatus.values(): _*)
} else {
statuses
}
}
for {
(status, stageList) <- stageAndStatus
stageInfo: StageInfo <- stageList if adjStatuses.contains(status)
stageUiData: StageUIData <- listener.synchronized {
listener.stageIdToData.get((stageInfo.stageId, stageInfo.attemptId))
}
} yield {
AllStagesResource.stageUiToStageData(status, stageInfo, stageUiData, includeDetails = false)
}
}
}
private[v1] object AllStagesResource {
def stageUiToStageData(
status: StageStatus,
stageInfo: StageInfo,
stageUiData: StageUIData,
includeDetails: Boolean): StageData = {
val taskLaunchTimes = stageUiData.taskData.values.map(_.taskInfo.launchTime).filter(_ > 0)
val firstTaskLaunchedTime: Option[Date] =
if (taskLaunchTimes.nonEmpty) {
Some(new Date(taskLaunchTimes.min))
} else {
None
}
val taskData = if (includeDetails) {
Some(stageUiData.taskData.map { case (k, v) => k -> convertTaskData(v) } )
} else {
None
}
val executorSummary = if (includeDetails) {
Some(stageUiData.executorSummary.map { case (k, summary) =>
k -> new ExecutorStageSummary(
taskTime = summary.taskTime,
failedTasks = summary.failedTasks,
succeededTasks = summary.succeededTasks,
inputBytes = summary.inputBytes,
outputBytes = summary.outputBytes,
shuffleRead = summary.shuffleRead,
shuffleWrite = summary.shuffleWrite,
memoryBytesSpilled = summary.memoryBytesSpilled,
diskBytesSpilled = summary.diskBytesSpilled
)
})
} else {
None
}
val accumulableInfo = stageUiData.accumulables.values.map { convertAccumulableInfo }.toSeq
new StageData(
status = status,
stageId = stageInfo.stageId,
attemptId = stageInfo.attemptId,
numActiveTasks = stageUiData.numActiveTasks,
numCompleteTasks = stageUiData.numCompleteTasks,
numFailedTasks = stageUiData.numFailedTasks,
executorRunTime = stageUiData.executorRunTime,
executorCpuTime = stageUiData.executorCpuTime,
submissionTime = stageInfo.submissionTime.map(new Date(_)),
firstTaskLaunchedTime,
completionTime = stageInfo.completionTime.map(new Date(_)),
inputBytes = stageUiData.inputBytes,
inputRecords = stageUiData.inputRecords,
outputBytes = stageUiData.outputBytes,
outputRecords = stageUiData.outputRecords,
shuffleReadBytes = stageUiData.shuffleReadTotalBytes,
shuffleReadRecords = stageUiData.shuffleReadRecords,
shuffleWriteBytes = stageUiData.shuffleWriteBytes,
shuffleWriteRecords = stageUiData.shuffleWriteRecords,
memoryBytesSpilled = stageUiData.memoryBytesSpilled,
diskBytesSpilled = stageUiData.diskBytesSpilled,
schedulingPool = stageUiData.schedulingPool,
name = stageInfo.name,
details = stageInfo.details,
accumulatorUpdates = accumulableInfo,
tasks = taskData,
executorSummary = executorSummary
)
}
def stagesAndStatus(ui: SparkUI): Seq[(StageStatus, Seq[StageInfo])] = {
val listener = ui.jobProgressListener
listener.synchronized {
Seq(
StageStatus.ACTIVE -> listener.activeStages.values.toSeq,
StageStatus.COMPLETE -> listener.completedStages.reverse.toSeq,
StageStatus.FAILED -> listener.failedStages.reverse.toSeq,
StageStatus.PENDING -> listener.pendingStages.values.toSeq
)
}
}
def convertTaskData(uiData: TaskUIData): TaskData = {
new TaskData(
taskId = uiData.taskInfo.taskId,
index = uiData.taskInfo.index,
attempt = uiData.taskInfo.attemptNumber,
launchTime = new Date(uiData.taskInfo.launchTime),
duration = uiData.taskDuration,
executorId = uiData.taskInfo.executorId,
host = uiData.taskInfo.host,
status = uiData.taskInfo.status,
taskLocality = uiData.taskInfo.taskLocality.toString(),
speculative = uiData.taskInfo.speculative,
accumulatorUpdates = uiData.taskInfo.accumulables.map { convertAccumulableInfo },
errorMessage = uiData.errorMessage,
taskMetrics = uiData.metrics.map { convertUiTaskMetrics }
)
}
def taskMetricDistributions(
allTaskData: Iterable[TaskUIData],
quantiles: Array[Double]): TaskMetricDistributions = {
val rawMetrics = allTaskData.flatMap{_.metrics}.toSeq
def metricQuantiles(f: InternalTaskMetrics => Double): IndexedSeq[Double] =
Distribution(rawMetrics.map { d => f(d) }).get.getQuantiles(quantiles)
// We need to do a lot of similar munging to nested metrics here. For each one,
// we want (a) extract the values for nested metrics (b) make a distribution for each metric
// (c) shove the distribution into the right field in our return type and (d) only return
// a result if the option is defined for any of the tasks. MetricHelper is a little util
// to make it a little easier to deal w/ all of the nested options. Mostly it lets us just
// implement one "build" method, which just builds the quantiles for each field.
val inputMetrics: InputMetricDistributions =
new MetricHelper[InternalInputMetrics, InputMetricDistributions](rawMetrics, quantiles) {
def getSubmetrics(raw: InternalTaskMetrics): InternalInputMetrics = raw.inputMetrics
def build: InputMetricDistributions = new InputMetricDistributions(
bytesRead = submetricQuantiles(_.bytesRead),
recordsRead = submetricQuantiles(_.recordsRead)
)
}.build
val outputMetrics: OutputMetricDistributions =
new MetricHelper[InternalOutputMetrics, OutputMetricDistributions](rawMetrics, quantiles) {
def getSubmetrics(raw: InternalTaskMetrics): InternalOutputMetrics = raw.outputMetrics
def build: OutputMetricDistributions = new OutputMetricDistributions(
bytesWritten = submetricQuantiles(_.bytesWritten),
recordsWritten = submetricQuantiles(_.recordsWritten)
)
}.build
val shuffleReadMetrics: ShuffleReadMetricDistributions =
new MetricHelper[InternalShuffleReadMetrics, ShuffleReadMetricDistributions](rawMetrics,
quantiles) {
def getSubmetrics(raw: InternalTaskMetrics): InternalShuffleReadMetrics =
raw.shuffleReadMetrics
def build: ShuffleReadMetricDistributions = new ShuffleReadMetricDistributions(
readBytes = submetricQuantiles(_.totalBytesRead),
readRecords = submetricQuantiles(_.recordsRead),
remoteBytesRead = submetricQuantiles(_.remoteBytesRead),
remoteBytesReadToDisk = submetricQuantiles(_.remoteBytesReadToDisk),
remoteBlocksFetched = submetricQuantiles(_.remoteBlocksFetched),
localBlocksFetched = submetricQuantiles(_.localBlocksFetched),
totalBlocksFetched = submetricQuantiles(_.totalBlocksFetched),
fetchWaitTime = submetricQuantiles(_.fetchWaitTime)
)
}.build
val shuffleWriteMetrics: ShuffleWriteMetricDistributions =
new MetricHelper[InternalShuffleWriteMetrics, ShuffleWriteMetricDistributions](rawMetrics,
quantiles) {
def getSubmetrics(raw: InternalTaskMetrics): InternalShuffleWriteMetrics =
raw.shuffleWriteMetrics
def build: ShuffleWriteMetricDistributions = new ShuffleWriteMetricDistributions(
writeBytes = submetricQuantiles(_.bytesWritten),
writeRecords = submetricQuantiles(_.recordsWritten),
writeTime = submetricQuantiles(_.writeTime)
)
}.build
new TaskMetricDistributions(
quantiles = quantiles,
executorDeserializeTime = metricQuantiles(_.executorDeserializeTime),
executorDeserializeCpuTime = metricQuantiles(_.executorDeserializeCpuTime),
executorRunTime = metricQuantiles(_.executorRunTime),
executorCpuTime = metricQuantiles(_.executorCpuTime),
resultSize = metricQuantiles(_.resultSize),
jvmGcTime = metricQuantiles(_.jvmGCTime),
resultSerializationTime = metricQuantiles(_.resultSerializationTime),
memoryBytesSpilled = metricQuantiles(_.memoryBytesSpilled),
diskBytesSpilled = metricQuantiles(_.diskBytesSpilled),
inputMetrics = inputMetrics,
outputMetrics = outputMetrics,
shuffleReadMetrics = shuffleReadMetrics,
shuffleWriteMetrics = shuffleWriteMetrics
)
}
def convertAccumulableInfo(acc: InternalAccumulableInfo): AccumulableInfo = {
new AccumulableInfo(
acc.id, acc.name.orNull, acc.update.map(_.toString), acc.value.map(_.toString).orNull)
}
def convertUiTaskMetrics(internal: InternalTaskMetrics): TaskMetrics = {
new TaskMetrics(
executorDeserializeTime = internal.executorDeserializeTime,
executorDeserializeCpuTime = internal.executorDeserializeCpuTime,
executorRunTime = internal.executorRunTime,
executorCpuTime = internal.executorCpuTime,
resultSize = internal.resultSize,
jvmGcTime = internal.jvmGCTime,
resultSerializationTime = internal.resultSerializationTime,
memoryBytesSpilled = internal.memoryBytesSpilled,
diskBytesSpilled = internal.diskBytesSpilled,
inputMetrics = convertInputMetrics(internal.inputMetrics),
outputMetrics = convertOutputMetrics(internal.outputMetrics),
shuffleReadMetrics = convertShuffleReadMetrics(internal.shuffleReadMetrics),
shuffleWriteMetrics = convertShuffleWriteMetrics(internal.shuffleWriteMetrics)
)
}
def convertInputMetrics(internal: InternalInputMetrics): InputMetrics = {
new InputMetrics(
bytesRead = internal.bytesRead,
recordsRead = internal.recordsRead
)
}
def convertOutputMetrics(internal: InternalOutputMetrics): OutputMetrics = {
new OutputMetrics(
bytesWritten = internal.bytesWritten,
recordsWritten = internal.recordsWritten
)
}
def convertShuffleReadMetrics(internal: InternalShuffleReadMetrics): ShuffleReadMetrics = {
new ShuffleReadMetrics(
remoteBlocksFetched = internal.remoteBlocksFetched,
localBlocksFetched = internal.localBlocksFetched,
fetchWaitTime = internal.fetchWaitTime,
remoteBytesRead = internal.remoteBytesRead,
remoteBytesReadToDisk = internal.remoteBytesReadToDisk,
localBytesRead = internal.localBytesRead,
recordsRead = internal.recordsRead
)
}
def convertShuffleWriteMetrics(internal: InternalShuffleWriteMetrics): ShuffleWriteMetrics = {
new ShuffleWriteMetrics(
bytesWritten = internal.bytesWritten,
writeTime = internal.writeTime,
recordsWritten = internal.recordsWritten
)
}
}
/**
* Helper for getting distributions from nested metric types.
*/
private[v1] abstract class MetricHelper[I, O](
rawMetrics: Seq[InternalTaskMetrics],
quantiles: Array[Double]) {
def getSubmetrics(raw: InternalTaskMetrics): I
def build: O
val data: Seq[I] = rawMetrics.map(getSubmetrics)
/** applies the given function to all input metrics, and returns the quantiles */
def submetricQuantiles(f: I => Double): IndexedSeq[Double] = {
Distribution(data.map { d => f(d) }).get.getQuantiles(quantiles)
}
}
| aokolnychyi/spark | core/src/main/scala/org/apache/spark/status/api/v1/AllStagesResource.scala | Scala | apache-2.0 | 13,137 |
package org.broadinstitute.clio.server.dataaccess
import java.util.UUID
import com.sksamuel.elastic4s.RefreshPolicy
import com.sksamuel.elastic4s.analyzers._
import com.sksamuel.elastic4s.bulk.BulkRequest
import com.sksamuel.elastic4s.cluster.ClusterHealthRequest
import com.sksamuel.elastic4s.delete.DeleteByIdRequest
import com.sksamuel.elastic4s.http.ElasticDsl._
import com.sksamuel.elastic4s.indexes.CreateIndexRequest
import com.sksamuel.elastic4s.searches.SearchRequest
import io.circe.Json
import io.circe.syntax._
import org.broadinstitute.clio.server.dataaccess.elasticsearch.ElasticsearchUtil.RequestException
import org.broadinstitute.clio.server.dataaccess.elasticsearch._
import org.broadinstitute.clio.transfer.model.{HealthStatus, ModelMockIndex, ModelMockKey}
import org.broadinstitute.clio.util.json.ModelAutoDerivation
import org.broadinstitute.clio.util.model.UpsertId
import org.scalatest._
import scala.concurrent.Future
class HttpElasticsearchDAOSpec
extends AbstractElasticsearchDAOSpec("HttpElasticsearchDAO")
with AsyncFlatSpecLike
with Matchers
with EitherValues
with ModelAutoDerivation
with OptionValues {
import ElasticsearchUtil.HttpClientOps
import com.sksamuel.elastic4s.circe._
behavior of "HttpElasticsearch"
it should "create an index and update the index field types" in {
val indexVersion1: ElasticsearchIndex[_] =
new ElasticsearchIndex[ModelMockIndex](
ModelMockIndex(),
"test_index_update_type",
ElasticsearchFieldMapper.NumericBooleanDateAndKeywordFields
)
val indexVersion2: ElasticsearchIndex[_] =
new ElasticsearchIndex[ModelMockIndex](
ModelMockIndex(commandName = "command_name"),
"test_index_update_type",
ElasticsearchFieldMapper.NumericBooleanDateAndKeywordFields
)
for {
_ <- httpElasticsearchDAO.createIndexType(indexVersion1)
existsVersion1 <- httpElasticsearchDAO.existsIndexType(indexVersion1)
_ = existsVersion1 should be(true)
existsVersion2 <- httpElasticsearchDAO.existsIndexType(indexVersion2)
_ = existsVersion2 should be(true)
_ <- httpElasticsearchDAO.updateFieldDefinitions(indexVersion1)
_ <- httpElasticsearchDAO.updateFieldDefinitions(indexVersion2)
} yield succeed
}
it should "fail to recreate an index twice when skipping check for existence" in {
val indexVersion1: ElasticsearchIndex[_] =
new ElasticsearchIndex[ModelMockIndex](
ModelMockIndex(),
"test_index_fail_recreate",
ElasticsearchFieldMapper.NumericBooleanDateAndKeywordFields
)
val indexVersion2: ElasticsearchIndex[_] =
new ElasticsearchIndex[ModelMockIndex](
ModelMockIndex(commandName = "command_name"),
"test_index_fail_recreate",
ElasticsearchFieldMapper.NumericBooleanDateAndKeywordFields
)
for {
_ <- httpElasticsearchDAO.createIndexType(indexVersion1)
exception <- recoverToExceptionIf[RequestException] {
httpElasticsearchDAO.createIndexType(indexVersion2)
}
_ = {
val error = exception.requestFailure.error
error.`type` should be("index_already_exists_exception")
error.reason should fullyMatch regex """index \[test_index_fail_recreate/.*\] already exists"""
}
} yield succeed
}
it should "fail to change the index field types" in {
val indexVersion1: ElasticsearchIndex[_] =
new ElasticsearchIndex[ModelMockIndex](
ModelMockIndex(),
"test_index_fail_change_types",
ElasticsearchFieldMapper.NumericBooleanDateAndKeywordFields
)
val indexVersion2: ElasticsearchIndex[_] =
new ElasticsearchIndex[ModelMockIndex](
ModelMockIndex(),
"test_index_fail_change_types",
ElasticsearchFieldMapper.StringsToTextFieldsWithSubKeywords
)
for {
_ <- httpElasticsearchDAO.createIndexType(indexVersion1)
_ <- httpElasticsearchDAO.updateFieldDefinitions(indexVersion1)
exception <- recoverToExceptionIf[RequestException] {
httpElasticsearchDAO.updateFieldDefinitions(indexVersion2)
}
_ = {
val error = exception.requestFailure.error
error.`type` should be("illegal_argument_exception")
error.reason should be(
"mapper [mock_string_array] of different type, current_type [keyword], merged_type [text]"
)
}
} yield succeed
}
it should "return the most recent document" in {
import org.broadinstitute.clio.server.dataaccess.elasticsearch._
val keyLong = 12345L
val keyString = "key"
val index = new ElasticsearchIndex[ModelMockIndex](
ModelMockIndex(),
"docs-" + UUID.randomUUID(),
ElasticsearchFieldMapper.NumericBooleanDateAndKeywordFields
)
def generateBookkeeping(s: String): Json =
Map(
ElasticsearchIndex.UpsertIdElasticsearchName -> UpsertId.nextId()
).asJson
.deepMerge(ModelMockKey(keyLong, keyString + s).asJson)
val documents =
(1 to 4)
.map("document-" + _)
.map(generateBookkeeping)
for {
_ <- httpElasticsearchDAO.createOrUpdateIndex(index)
_ <- Future.sequence(
documents.map(httpElasticsearchDAO.updateMetadata(_)(index))
)
document <- httpElasticsearchDAO.getMostRecentDocument(index)
} yield document.value should be(documents.last)
}
it should "not throw an exception if no documents exist" in {
import org.broadinstitute.clio.server.dataaccess.elasticsearch._
val index = new ElasticsearchIndex[ModelMockIndex](
ModelMockIndex(),
"docs-" + UUID.randomUUID(),
ElasticsearchFieldMapper.NumericBooleanDateAndKeywordFields
)
for {
_ <- httpElasticsearchDAO.createOrUpdateIndex(index)
res <- httpElasticsearchDAO.getMostRecentDocument(index)
} yield {
res should be(None)
}
}
case class City(
name: String,
country: String,
continent: String,
status: String,
slogan: Option[String]
)
it should "perform various CRUD-like operations" in {
val clusterHealthDefinition: ClusterHealthRequest =
clusterHealth()
val indexCreationDefinition: CreateIndexRequest =
createIndex("places") replicas 0 mappings {
mapping("cities") as (
keywordField("id"),
textField("name") boost 4,
textField("content") analyzer StopAnalyzer
)
}
val populateDefinition: BulkRequest =
bulk(
/* Option 1: Fields syntax */
indexInto("places" / "cities") id "uk" fields (
"name" -> "London",
"country" -> "United Kingdom",
"continent" -> "Europe",
"status" -> "Awesome"
),
/* Option 2: Doc syntax */
indexInto("places" / "cities") id "fr" doc
City(
name = "Paris",
country = "France",
continent = "Europe",
status = "Awesome",
slogan = Option("Liberté, égalité, fraternité")
),
indexInto("places" / "cities") id "de" doc
City(
name = "Berlin",
country = "Germany",
continent = "Europe",
status = "Awesome",
slogan = None
)
).refresh(RefreshPolicy.WAIT_UNTIL)
val searchDefinition: SearchRequest =
searchWithType("places" / "cities") scroll "1m" size 10 query {
boolQuery must (
queryStringQuery(""""London"""").defaultField("name"),
queryStringQuery(""""Europe"""").defaultField("continent")
)
}
val deleteDefinition: DeleteByIdRequest =
delete("uk") from "places" / "cities" refresh RefreshPolicy.WAIT_UNTIL
lazy val httpClient = httpElasticsearchDAO.elasticClient
for {
health <- httpClient.executeAndUnpack(clusterHealthDefinition)
_ = HealthStatus.withNameInsensitive(health.status) should be(HealthStatus.Green)
indexCreation <- httpClient.executeAndUnpack(indexCreationDefinition)
_ = indexCreation.acknowledged should be(true)
populate <- httpClient.executeAndUnpack(populateDefinition)
_ = populate.errors should be(false)
search <- httpClient.executeAndUnpack(searchDefinition)
_ = {
search.hits.total should be(1)
// Example using circe HitReader
val city = search.to[City].head
city.name should be("London")
city.country should be("United Kingdom")
city.continent should be("Europe")
city.status should be("Awesome")
}
delete1 <- httpClient.executeAndUnpack(deleteDefinition)
delete2 <- httpClient.executeAndUnpack(deleteDefinition)
} yield {
delete1.result should be("deleted")
delete2.result shouldNot be("deleted")
}
}
}
| broadinstitute/clio | clio-server/src/test/scala/org/broadinstitute/clio/server/dataaccess/HttpElasticsearchDAOSpec.scala | Scala | bsd-3-clause | 8,843 |
package de.htwg.zeta.generatorControl.actors.worker
import de.htwg.zeta.generatorControl.actors.worker.MasterWorkerProtocol.CancelWork
import de.htwg.zeta.generatorControl.actors.worker.MasterWorkerProtocol.Work
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.Props
object DummyWorkerExecutor {
def props() = Props(new DummyWorkerExecutor())
}
class DummyWorkerExecutor() extends Actor with ActorLogging {
def receive = {
case work: Work =>
sender ! WorkComplete(0)
case cancel: CancelWork =>
sender ! WorkComplete(1)
}
}
| Zeta-Project/zeta | api/generatorControl/src/main/scala/de/htwg/zeta/generatorControl/actors/worker/DummyWorkerExecutor.scala | Scala | bsd-2-clause | 576 |
/* ------------------- sse-jmx ------------------- *\\
* Licensed under the Apache License, Version 2.0. *
* Author: Spiros Tzavellas *
\\* ----------------------------------------------- */
package com.tzavellas.sse.jmx.export
import org.junit.Test
import org.junit.Assert._
import javax.management.ObjectName
class ObjectNamingStrategiesTest {
import ObjectNamingStrategies._
@Test
def the_name_has_the_package_as_domain_and_the_class_name_as_type_property(): Unit = {
assertEquals(new ObjectName("java.lang:type=String"), useFullClassName(classOf[String]))
}
@Test
def use_simple_class_name_and_specified_domain(): Unit = {
val naming = useSimpleClassName("my-domain")
assertEquals(new ObjectName("my-domain:type=String"), naming(classOf[String]))
}
@Test
def cannot_create_name_for_class_without_the_managedResource_annotation(): Unit = {
assertFalse(useAnnotation.isDefinedAt(classOf[String]))
}
@Test
def create_name_using_the_managedResource_annotation(): Unit = {
assertEquals(new ObjectName("com.tzavellas:type=annotated"), useAnnotation(classOf[AnnotatedClass]))
}
@Test
def cannot_create_name_if_annotation_has_no_value(): Unit = {
assertFalse(useAnnotation.isDefinedAt(classOf[NoObjectNameSpecified]))
}
@Test
def default_naming_should_use_the_object_name_in_annotation(): Unit = {
assertEquals(new ObjectName("com.tzavellas:type=annotated"), default(classOf[AnnotatedClass]))
}
@Test
def default_naming_should_use_default_strategy_if_annotation_has_empty_value(): Unit = {
assertEquals(new ObjectName("com.tzavellas.sse.jmx.export:type=NoObjectNameSpecified"), default(classOf[NoObjectNameSpecified]))
}
@Test
def default_should_use_default_strategy_if_no_annotation_present(): Unit = {
assertEquals(new ObjectName("java.lang:type=String"), default(classOf[String]))
}
// -- test classes ----------------------------------------------------------
@ManagedResource(objectName="com.tzavellas:type=annotated")
class AnnotatedClass
@ManagedResource
class NoObjectNameSpecified
}
| sptz45/sse-jmx | src/test/scala/com/tzavellas/sse/jmx/export/ObjectNamingStrategiesTest.scala | Scala | apache-2.0 | 2,142 |
package com.softwaremill.mqperf.mq
import java.util.{Properties, Map => JMap}
import com.softwaremill.mqperf.config.TestConfig
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer, OffsetAndMetadata, OffsetCommitCallback}
import org.apache.kafka.clients.producer._
import org.apache.kafka.common.TopicPartition
import scala.collection.JavaConversions._
import scala.concurrent.duration._
import scala.language.postfixOps
class KafkaMq(testConfig: TestConfig) extends Mq with StrictLogging {
// assuming the topic already exists, with replication factor 3 and at least as many partitions as receiver threads
import KafkaMq._
private val GroupId = "mqperf-group"
private val Topic = "mqperf"
private val PollTimeoutMs = 500.millis.toMillis
private val commitNs = testConfig.mqConfig.getLong("commitMs").millis.toNanos
private def kafkaHosts = testConfig.brokerHosts.map(_ + ":9092").mkString(",")
override type MsgId = (TopicPartition, Long)
override def createSender() = new MqSender {
val producersProps = new Properties()
producersProps.put("bootstrap.servers", kafkaHosts)
producersProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
producersProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
producersProps.put("acks", testConfig.mqConfig.getString("acks"))
val producer = new KafkaProducer[String, String](producersProps)
override def send(msgs: List[String]): Unit = {
msgs
.map(msg => producer.send(new ProducerRecord[String, String](Topic, null, msg)))
.foreach(_.get())
}
}
override def createReceiver() = new MqReceiver {
private var offsetsToCommit: CommitOffsets = KafkaMq.EmptyOffsetMap
private var lastCommitTick = System.nanoTime()
// will only be run for receivers
private lazy val consumer = {
val consumerProps = new Properties()
consumerProps.put("bootstrap.servers", kafkaHosts)
consumerProps.put("group.id", GroupId)
consumerProps.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
consumerProps.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
consumerProps.put("enable.auto.commit", "false")
consumerProps.put("auto.offset.reset", "earliest")
val consumer = new KafkaConsumer[String, String](consumerProps)
consumer.subscribe(List(Topic))
consumer
}
private def timeToCommit(): Boolean = (System.nanoTime() - lastCommitTick) > commitNs
private def commitAsync(): Unit = {
logger.debug("Commit tick")
lastCommitTick = System.nanoTime()
if (offsetsToCommit.nonEmpty) {
logger.info(s"Committing offsets: $offsetsToCommit")
consumer.commitAsync(offsetsToCommit.mapValues(new OffsetAndMetadata(_)), new OffsetCommitCallback {
override def onComplete(offsets: JMap[TopicPartition, OffsetAndMetadata], exception: Exception): Unit = {
if (exception != null) {
logger.error("Commit failed", exception)
throw exception
}
else
logger.debug(s"Commit successful: $offsets")
}
})
offsetsToCommit = KafkaMq.EmptyOffsetMap
}
}
private var it: java.util.Iterator[ConsumerRecord[String, String]] = _
override def receive(maxMsgCount: Int): List[(MsgId, String)] = {
var msgs: List[(MsgId, String)] = Nil
if (timeToCommit())
commitAsync()
if (it == null || !it.hasNext)
it = consumer.poll(PollTimeoutMs).iterator()
while (msgs.size < maxMsgCount && it.hasNext) {
val msg = it.next()
val msgId = (new TopicPartition(msg.topic(), msg.partition()), msg.offset())
msgs = (msgId, msg.value()) :: msgs
}
msgs
}
override def ack(ids: List[MsgId]): Unit = {
val commitRequest = ids.foldLeft(KafkaMq.EmptyOffsetMap) {
case (offsetMap, (topicPartition, offset)) => offsetMap + (topicPartition -> (offset + 1))
}
offsetsToCommit = offsetsToCommit ++ commitRequest
}
override def close(): Unit = {
consumer.close()
super.close()
}
}
}
object KafkaMq {
type CommitOffsets = Map[TopicPartition, Long]
val EmptyOffsetMap = Map.empty[TopicPartition, Long]
}
| adamw/mqperf | src/main/scala/com/softwaremill/mqperf/mq/KafkaMq.scala | Scala | apache-2.0 | 4,447 |
package models.batch.job
/**
* Created by basso on 07/04/15.
*
* MineOp job for SVM algorithm
*/
case class MnSVM (
ds_train: String, // training vector file
ds_test: String, // testing vector file
max_iter: Int, // default to 100 (impl at user end)
id: String = " "
) extends MineOp {
def setId(nid: String) = this.copy(id = nid)
def logWrite = jobPrintFormat(id, "Support Vector Machine", Map(
"training_dataSet" -> ds_train,
"testing" -> ds_test,
"iterations" -> max_iter.toString))
}
| ChetanBhasin/Veracious | app/models/batch/job/MnSVM.scala | Scala | apache-2.0 | 528 |
package suiryc.scala.io
import java.nio.file.{Path, Paths}
object PathsEx {
def get(path: String): Path =
if (path.startsWith("~")) {
val rest = path.substring(2)
val home = RichFile.userHome.toPath
if (rest == "") home
else home.resolve(rest)
}
else Paths.get(path)
def filename(name: String) =
name.split("/").toList.reverse.head
def atomicName(name: String): String =
/* keep filename */
filename(name).
/* without extension */
split("""\.""").reverse.tail.reverse.mkString(".")
def atomicName(path: Path): String =
atomicName(path.getFileName.toString)
def extension(name: String): String =
name.split("""\.""").toList.reverse.head
def extension(path: Path): String =
extension(path.getFileName.toString)
}
| swhgoon/suiryc-scala | core/src/main/scala/suiryc/scala/io/PathsEx.scala | Scala | gpl-3.0 | 805 |
object Solution {
def nonMersennePrimeRSD(a: Int, b: Int, c: Int, d: Int, p: Long): Long = {
// Fucking BigInt to long overflow! Must mod first.
(a * BigInt(b).modPow(c, p) + d).mod(p).toLong
}
def main(args: Array[String]) {
val t = readInt
val p = 1e12.toLong
var sum = 0L
for (i <- 1 to t) {
val Array(a, b, c, d) = readLine.split(" ").map(_.toInt)
sum = (sum + nonMersennePrimeRSD(a, b, c, d, p)) % p
}
println("%012d".format(sum))
}
}
| advancedxy/hackerrank | project-euler/problem-97/LargeNonMersennePrime.scala | Scala | mit | 494 |
package docs.home.actor
import com.lightbend.lagom.docs.ServiceSupport
import scala.concurrent.duration._
import akka.actor.ActorSystem
import akka.testkit.ImplicitSender
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalactic.ConversionCheckedTripleEquals
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.WordSpecLike
import akka.cluster.Cluster
import java.util.concurrent.TimeUnit
object ActorServiceSpec {
def config = ConfigFactory.parseString("""
akka.actor.provider = akka.cluster.ClusterActorRefProvider
akka.remote.netty.tcp.port = 0
akka.remote.netty.tcp.hostname = 127.0.0.1
""")
}
class ActorServiceSpec extends TestKit(ActorSystem("ActorServiceSpec", ActorServiceSpec.config))
with ServiceSupport
with BeforeAndAfterAll with ConversionCheckedTripleEquals
with ImplicitSender {
val workerRoleConfig = ConfigFactory.parseString("akka.cluster.roles = [worker-node]")
val node2 = ActorSystem("ActorServiceSpec", workerRoleConfig.withFallback(system.settings.config))
val node3 = ActorSystem("ActorServiceSpec", workerRoleConfig.withFallback(system.settings.config))
override def beforeAll {
Cluster(system).join(Cluster(system).selfAddress)
Cluster(node2).join(Cluster(system).selfAddress)
Cluster(node3).join(Cluster(system).selfAddress)
node2.actorOf(Worker.props(), "worker");
node3.actorOf(Worker.props(), "worker");
within(15.seconds) {
awaitAssert {
Cluster(system).state.members.size should ===(3)
}
}
}
override def afterAll {
shutdown()
shutdown(node2)
shutdown(node3)
}
"Integration with actors" must {
"work with for example clustered consistent hashing" in withServiceInstance[WorkerService](
new WorkerServiceImpl(system)).apply { app =>
client => {
val job = Job.of("123", "compute", "abc")
// might taka a while until cluster is formed and router knows about the nodes
within(15.seconds) {
awaitAssert {
client.doWork().invoke(job).toCompletableFuture.get(3, TimeUnit.SECONDS) should ===(JobAccepted.of("123"))
}
}
}
}
}
}
| edouardKaiser/lagom | docs/src/test/scala/docs/home/actor/ActorServiceSpec.scala | Scala | apache-2.0 | 2,245 |
/*§
===========================================================================
Chronos
===========================================================================
Copyright (C) 2015-2016 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.chronos.interpreter.exceptions
class ExecutionException(message: String, cause: Throwable) extends RuntimeException(message, cause) {
def this(message: String) = this(message, null)
}
| giancosta86/Chronos | src/main/scala/info/gianlucacosta/chronos/interpreter/exceptions/ExecutionException.scala | Scala | apache-2.0 | 1,129 |
/*
* Copyright 2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex.initfail
import org.squbs.lifecycle.{GracefulStop, GracefulStopHelper}
import org.squbs.unicomplex.Unicomplex._
import scala.util.Try
import org.squbs.unicomplex.Initialized
import akka.actor.{Actor, ActorLogging}
class InitFailActor extends Actor with ActorLogging with GracefulStopHelper {
// do initialization
def init: InitReport = {
log.info("initializing")
Try {
// do some tasks
throw new Exception("Init failed")
}
}
context.parent ! Initialized(init)
def receive = {
case GracefulStop => defaultLeafActorStop
case other => sender ! other
}
}
| keshin/squbs | squbs-unicomplex/src/test/scala/org/squbs/unicomplex/initfail/InitFailActor.scala | Scala | apache-2.0 | 1,228 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.akkahttpninny
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.{ HttpRequest, RequestEntity }
import akka.http.scaladsl.server.Directives
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.scaladsl.Source
import scala.concurrent.duration._
import scala.io.StdIn
object ExampleApp {
private final case class Foo(bar: String)
def main(args: Array[String]): Unit = {
implicit val system = ActorSystem()
Http().newServerAt("127.0.0.1", 8000).bindFlow(route)
StdIn.readLine("Hit ENTER to exit")
system.terminate()
}
private def route(implicit sys: ActorSystem) = {
import Directives._
import NinnySupport._
import io.github.kag0.ninny.Auto._
pathSingleSlash {
post {
entity(as[Foo]) { foo =>
complete {
foo
}
}
}
} ~ pathPrefix("stream") {
post {
entity(as[SourceOf[Foo]]) { fooSource: SourceOf[Foo] =>
import sys._
Marshal(Source.single(Foo("a"))).to[RequestEntity]
complete(fooSource.throttle(1, 2.seconds))
}
} ~ get {
pathEndOrSingleSlash {
complete(
Source(0 to 5)
.throttle(1, 1.seconds)
.map(i => Foo(s"bar-$i"))
)
} ~ pathPrefix("remote") {
onSuccess(Http().singleRequest(HttpRequest(uri = "http://localhost:8000/stream"))) {
response => complete(Unmarshal(response).to[SourceOf[Foo]])
}
}
}
}
}
}
| hseeberger/akka-http-json | akka-http-ninny/src/test/scala/de/heikoseeberger/akkahttpninny/ExampleApp.scala | Scala | apache-2.0 | 2,236 |
package org.bitcoins.spvnode.networking.sync
import java.net.InetSocketAddress
import akka.actor.{Actor, ActorRef, ActorRefFactory, PoisonPill, Props}
import akka.event.LoggingReceive
import org.bitcoins.core.config.{MainNet, NetworkParameters, RegTest, TestNet3}
import org.bitcoins.core.crypto.DoubleSha256Digest
import org.bitcoins.core.protocol.blockchain.BlockHeader
import org.bitcoins.core.util.BitcoinSLogger
import org.bitcoins.spvnode.constant.{Constants, DbConfig}
import org.bitcoins.spvnode.messages.{GetHeadersMessage, HeadersMessage}
import org.bitcoins.spvnode.messages.data.GetHeadersMessage
import org.bitcoins.spvnode.models.BlockHeaderDAO
import org.bitcoins.spvnode.networking.PeerMessageHandler
import org.bitcoins.spvnode.networking.sync.BlockHeaderSyncActor.{CheckHeaderResult, GetHeaders, StartAtLastSavedHeader}
import org.bitcoins.spvnode.store.BlockHeaderStore
import org.bitcoins.spvnode.util.BitcoinSpvNodeUtil
import slick.driver.PostgresDriver.api._
import scala.annotation.tailrec
/**
* Created by chris on 9/5/16.
*
*
*/
trait BlockHeaderSyncActor extends Actor {
private val logger = BitcoinSLogger.logger
/** This is the maximum amount of headers the bitcoin protocol will transmit
* in one request
* [[https://bitcoin.org/en/developer-reference#getheaders]]
*
* @return
*/
private def maxHeaders = 2000
def networkParameters: NetworkParameters
def dbConfig: DbConfig
/** Helper function to provide a fresh instance of a [[BlockHeaderDAO]] actor */
private def blockHeaderDAO: ActorRef = BlockHeaderDAO(context, dbConfig)
/** Helper function to connect to a new peer on the network */
private def peerMessageHandler: ActorRef = {
val seed = new InetSocketAddress(networkParameters.dnsSeeds(1), networkParameters.port)
PeerMessageHandler(context,seed)
}
def receive = LoggingReceive {
case startHeader: BlockHeaderSyncActor.StartHeaders =>
val p = peerMessageHandler
val lastHeader = startHeader.headers.last
if (lastHeader == Constants.chainParams.genesisBlock.blockHeader) {
//TODO: Think this causes a bug in our test because the table is being dropped
//while the message is still being processed
//seed the database with the genesis header
logger.info("Switching to awaitGenesisHeaderCreateReply from receive")
context.become(awaitGenesisHeaderCreateReply(p,Nil))
blockHeaderDAO ! BlockHeaderDAO.Create(lastHeader)
} else {
logger.info("Switching to blockHeaderSync from receive")
context.become(blockHeaderSync(p,lastHeader))
self.forward(startHeader)
}
case getHeaders: GetHeaders =>
val getHeadersMessage = GetHeadersMessage(Seq(getHeaders.startHeader), getHeaders.stopHeader)
val p = peerMessageHandler
p ! getHeadersMessage
logger.info("Switching to awaitGetHeaders from receive")
context.become(awaitGetHeaders)
case StartAtLastSavedHeader =>
blockHeaderDAO ! BlockHeaderDAO.LastSavedHeader
logger.info("Switching to awaitLastSavedHeader from receive")
context.become(awaitLastSavedHeader)
}
/** Main block header sync context, lastHeader is used to make sure the batch of block headers we see
* matches connects to the last batch of block headers we saw (thus forming a blockchain)
* @param peerMessageHandler
* @param lastHeader
* @return
*/
def blockHeaderSync(peerMessageHandler: ActorRef, lastHeader: BlockHeader): Receive = LoggingReceive {
case startHeader: BlockHeaderSyncActor.StartHeaders =>
val getHeadersMsg = GetHeadersMessage(startHeader.headers.map(_.hash))
peerMessageHandler ! getHeadersMsg
case headersMsg: HeadersMessage =>
val headers = headersMsg.headers
logger.debug("Headers before all of the checking: " + headers.size)
logger.info("Switching to awaitCheckHeaders from blockHeaderSync")
context.become(awaitCheckHeaders(Some(lastHeader),headers),discardOld = false)
val b = blockHeaderDAO
b ! BlockHeaderDAO.MaxHeight
case checkHeaderResult: CheckHeaderResult =>
logger.debug("Received check header result inside of blockHeaderSync")
if (checkHeaderResult.error.isDefined) {
logger.error("We had an error syncing our blockchain: " +checkHeaderResult.error.get)
context.parent ! checkHeaderResult.error.get
self ! PoisonPill
} else handleValidHeaders(checkHeaderResult.headers,peerMessageHandler)
}
/** This behavior is responsible for calling the [[checkHeader]] function, after evaluating
* if the headers are valid, reverts to the context the actor previously held and sends it the
* result of checking the headers
*
* The only message this context expects is the [[BlockHeaderDAO]] to send it the current
* max height of the blockchain that it has stored right now
* @param lastHeader
* @param headers
* @return
*/
def awaitCheckHeaders(lastHeader: Option[BlockHeader], headers: Seq[BlockHeader]) = LoggingReceive {
case maxHeight: BlockHeaderDAO.MaxHeightReply =>
val result = BlockHeaderSyncActor.checkHeaders(lastHeader,headers,maxHeight.height,networkParameters)
context.unbecome()
sender ! PoisonPill
self ! result
}
/** Actor context that specifically deals with the [[BlockHeaderSyncActor.GetHeaders]] message */
def awaitGetHeaders: Receive = LoggingReceive {
case headersMsg: HeadersMessage =>
val headers = headersMsg.headers
logger.info("Switching to awaitCheckHeaders from awaitGetHeaders")
context.become(awaitCheckHeaders(None, headers), discardOld = false)
blockHeaderDAO ! BlockHeaderDAO.MaxHeight
case checkHeaderResult: CheckHeaderResult =>
context.parent ! checkHeaderResult.error.getOrElse(BlockHeaderSyncActor.GetHeadersReply(checkHeaderResult.headers))
}
/** Awaits for our [[BlockHeaderDAO]] to send us the last saved header it has
* if we do not have a last saved header, it will use the genesis block's header
* on the network we are currently on as the last saved header */
def awaitLastSavedHeader: Receive = {
case lastSavedHeader: BlockHeaderDAO.LastSavedHeaderReply =>
if (lastSavedHeader.headers.size <= 1) {
//means we have either zero or one last saved header, if it is zero we can sync from genesis block, if one start there
val header = lastSavedHeader.headers.headOption.getOrElse(Constants.chainParams.genesisBlock.blockHeader)
val p = peerMessageHandler
logger.info("Switching to blockHeaderSync from awaitLastSavedHeader")
context.become(blockHeaderSync(p,header))
self ! BlockHeaderSyncActor.StartHeaders(Seq(header))
context.parent ! BlockHeaderSyncActor.StartAtLastSavedHeaderReply(header)
} else {
//TODO: Need to write a test case for this inside of BlockHeaderSyncActorTest
//means we have two (or more) competing chains, therefore we need to try and sync with both of them
lastSavedHeader.headers.map { header =>
val syncActor = BlockHeaderSyncActor(context,dbConfig,networkParameters)
syncActor ! BlockHeaderSyncActor.StartHeaders(Seq(header))
context.parent ! BlockHeaderSyncActor.StartAtLastSavedHeaderReply(header)
}
}
sender ! PoisonPill
}
/** Stores the valid headers in our database, sends our actor a message to start syncing from the last
* header we received if necessary
*
* @param headers
* @param peerMessageHandler
*/
def handleValidHeaders(headers: Seq[BlockHeader], peerMessageHandler: ActorRef) = {
logger.debug("Headers size to be inserted: " + headers.size)
val createAllMsg = BlockHeaderDAO.CreateAll(headers)
val b = blockHeaderDAO
logger.info("Switching to awaitCreatedAllReply from handleValidHeaders")
context.become(awaitCreatedAllReply(peerMessageHandler,headers))
b ! createAllMsg
}
/** Waits for our [[BlockHeaderDAO]] to reply with all the headers it created in persistent storage
* @param networkHeaders the headers we received from our peer on the network. This may be a different
* size than what is returned from [[BlockHeaderDAO.CreateAllReply]]. This can
* be the case if our database fails to insert some of the headers.
* */
def awaitCreatedAllReply(peerMessageHandler: ActorRef, networkHeaders: Seq[BlockHeader]): Receive = LoggingReceive {
case createdHeaders: BlockHeaderDAO.CreateAllReply =>
val headers = createdHeaders.headers
val lastHeader = createdHeaders.headers.last
if (headers.size == maxHeaders || headers.size != networkHeaders.size) {
//this means we either stored all the headers in the database and need to start from the last header or
//we failed to store all the headers (due to some error in BlockHeaderDAO) and we need to start from the last
//header we successfully stored in the database
logger.debug("Starting next sync with this block header: " + lastHeader.hash)
//means we need to send another GetHeaders message with the last header in this message as our starting point
val startHeader = BlockHeaderSyncActor.StartHeaders(Seq(lastHeader))
//need to reset the last header hash we saw on the network
logger.info("Switching to blockHeaderSync from awaitCreatedAllReply")
context.become(blockHeaderSync(peerMessageHandler,lastHeader))
self ! startHeader
} else {
//else we we are synced up on the network, send the parent the last header we have seen
context.parent ! BlockHeaderSyncActor.SuccessfulSyncReply(lastHeader)
self ! PoisonPill
}
sender ! PoisonPill
}
/** This behavior is used to seed the database,
* we cannot do anything until the genesis header is created in persisten storage */
def awaitGenesisHeaderCreateReply(peerMessageHandler: ActorRef, queuedMsgs: Seq[Any]) : Receive = {
case createReply: BlockHeaderDAO.CreateReply =>
logger.info("Switching to blockHeaderSync from awaitGenesisHeaderCreateReply")
context.become(blockHeaderSync(peerMessageHandler, createReply.blockHeader))
self ! BlockHeaderSyncActor.StartHeaders(Seq(createReply.blockHeader))
sender ! PoisonPill
//now send all queued messages
queuedMsgs.map(msg => self ! msg)
case msg =>
context.become(awaitGenesisHeaderCreateReply(peerMessageHandler, queuedMsgs ++ Seq(msg)))
}
}
object BlockHeaderSyncActor extends BitcoinSLogger {
private case class BlockHeaderSyncActorImpl(dbConfig: DbConfig, networkParameters: NetworkParameters) extends BlockHeaderSyncActor
def apply(context: ActorRefFactory, dbConfig: DbConfig, networkParameters: NetworkParameters): ActorRef = {
context.actorOf(props(dbConfig, networkParameters),
BitcoinSpvNodeUtil.createActorName(BlockHeaderSyncActor.getClass))
}
def props(dbConfig: DbConfig, networkParameters: NetworkParameters): Props = {
Props(classOf[BlockHeaderSyncActorImpl], dbConfig, networkParameters)
}
sealed trait BlockHeaderSyncMessage
sealed trait BlockHeaderSyncMessageRequest
sealed trait BlockHeaderSyncMessageReply
/** Indicates a set of headers to query our peer on the network to start our sync process */
case class StartHeaders(headers: Seq[BlockHeader]) extends BlockHeaderSyncMessageRequest
/** Retrieves the set of headers from a node on the network, this does NOT store them */
case class GetHeaders(startHeader: DoubleSha256Digest, stopHeader: DoubleSha256Digest) extends BlockHeaderSyncMessageRequest
case class GetHeadersReply(headers: Seq[BlockHeader]) extends BlockHeaderSyncMessageReply
/** Starts syncing our blockchain at the last header we have seen, if we haven't see any it starts at the genesis block */
case object StartAtLastSavedHeader extends BlockHeaderSyncMessageRequest
/** Reply for [[StartAtLastSavedHeader]] */
case class StartAtLastSavedHeaderReply(header: BlockHeader) extends BlockHeaderSyncMessageReply
/** Indicates that we have successfully synced our blockchain, the [[lastHeader]] represents the header at the max height on the chain */
case class SuccessfulSyncReply(lastHeader: BlockHeader) extends BlockHeaderSyncMessageReply
/** Indicates an error happened during the sync of our blockchain */
sealed trait BlockHeaderSyncError extends BlockHeaderSyncMessageReply
/** Indicates that our block headers do not properly reference one another
*
* @param previousBlockHash indicates the last valid block that connected to a header
* @param blockHash indicates the first block hash that did NOT connect to the previous valid chain
* */
case class BlockHeadersDoNotConnect(previousBlockHash: DoubleSha256Digest, blockHash: DoubleSha256Digest) extends BlockHeaderSyncError
/** Indicates that our node saw a difficulty adjustment on the network when there should not have been one between the
* two given [[BlockHeader]]s */
case class BlockHeaderDifficultyFailure(previousBlockHeader: BlockHeader, blockHeader: BlockHeader) extends BlockHeaderSyncError
//INTERNAL MESSAGES FOR BlockHeaderSyncActor
case class CheckHeaderResult(error: Option[BlockHeaderSyncError], headers: Seq[BlockHeader]) extends BlockHeaderSyncMessage
/** Checks that the given block headers all connect to each other
* If the headers do not connect, it returns the two block header hashes that do not connec
* @param startingHeader header we are starting our header check from, this header is not checked
* if this is not defined we just start from the first header in blockHeaders
* @param blockHeaders the set of headers we are checking the validity of
* @param maxHeight the height of the blockchain before checking the block headers
* */
def checkHeaders(startingHeader: Option[BlockHeader], blockHeaders: Seq[BlockHeader],
maxHeight: Long, networkParameters: NetworkParameters): CheckHeaderResult = {
@tailrec
def loop(previousBlockHeader: BlockHeader, remainingBlockHeaders: Seq[BlockHeader]): CheckHeaderResult = {
if (remainingBlockHeaders.isEmpty) CheckHeaderResult(None,blockHeaders)
else {
val header = remainingBlockHeaders.head
if (header.previousBlockHash != previousBlockHeader.hash) {
val error = BlockHeaderSyncActor.BlockHeadersDoNotConnect(previousBlockHeader.hash, header.hash)
CheckHeaderResult(Some(error),blockHeaders)
} else if (header.nBits == previousBlockHeader.nBits) {
loop(header, remainingBlockHeaders.tail)
} else {
networkParameters match {
case MainNet =>
val blockHeaderHeight = (blockHeaders.size - remainingBlockHeaders.tail.size) + maxHeight
logger.debug("Block header height: " + blockHeaderHeight)
if ((blockHeaderHeight % MainNet.difficultyChangeThreshold) == 0) loop(remainingBlockHeaders.head, remainingBlockHeaders.tail)
else {
val error = BlockHeaderSyncActor.BlockHeaderDifficultyFailure(previousBlockHeader,remainingBlockHeaders.head)
CheckHeaderResult(Some(error),blockHeaders)
}
case RegTest | TestNet3 =>
//currently we are just ignoring checking difficulty on testnet and regtest as they vary wildly
loop(remainingBlockHeaders.head, remainingBlockHeaders.tail)
}
}
}
}
val result = if (startingHeader.isDefined) loop(startingHeader.get,blockHeaders)
else loop(blockHeaders.head, blockHeaders.tail)
result
}
}
| Christewart/bitcoin-s-spv-node | src/main/scala/org/bitcoins/spvnode/networking/sync/BlockHeaderSyncActor.scala | Scala | mit | 15,803 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Aravind Kalimurthy, John Miller
* @version 1.3
* @date Thu Sep 24 04:28:31 EDT 2015
* @see LICENSE (MIT style license file).
*
*/
package scalation.util
import scala.collection.mutable.MutableList
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MultiSet` class provides an implementation for Union, Intersection and
* Subset for the generic `MultiSet` data type that extends `MutableList`.
*/
class MultiSet [T]
extends MutableList [T]
{
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether 'b' is a subset of 'this' `MultiSet`.
* @param b the other `MultiSet`
*/
def subsetOf (b: MultiSet [T]): Boolean = super.diff (b).isEmpty
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether 'b' is a subset of 'this' `MultiSet`.
* @param b the other `MultiSet`
*/
def ⊆ (b: MultiSet [T]): Boolean = super.diff (b).isEmpty
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return a new `MultiSet` that is the union of 'this' and 'b'.
* @param b the other `MultiSet`
*/
def union (b: MultiSet [T]): MultiSet [T] = MultiSet (super.union (b))
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return a new `MultiSet` that is the intersection of 'this' and 'b'.
* @param b the other `MultiSet`
*/
def intersect (b: MultiSet [T]): MultiSet [T] = MultiSet (super.intersect (b))
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return a new `MutableList` that is the union of 'b' and 'this'.
* More efficient since it does not create a new `MultiSet`.
* @param b the other `MultiSet`
*/
def ⋃ (b: MultiSet [T]): MutableList [T] = super.union (b)
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return a new `MutableList` that is the intersection of 'b' and 'this'.
* More efficient since it does not create a new `MultiSet`.
* @param b the other `MultiSet`
*/
def ⋂ (b: MultiSet [T]): MutableList [T] = super.intersect (b)
} // MultiSet class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MultiSet` object is the companion object for the `MultiSet' class.
*/
object MultiSet
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `MultiSet` from the argument list of elements 'es'.
* @param es one or more elements for the `MultiSet`
*/
def apply [T] (es: T*): MultiSet [T] =
{
val ms = new MultiSet [T] ()
for (e <- es) ms += e
ms
} // apply
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create a `MultiSet` from a `MutableList`.
* @param ml the mutable list of elements for the `MultiSet`
*/
def apply [T] (ml: MutableList [T]): MultiSet [T] =
{
MultiSet ((for (e <- ml) yield e) :_*)
} // apply
} // MultiSet object
import MultiSet._
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `MultiSetTest` object is used to test the `MultiSet` class.
* > run-main scalation.util.MultiSetTest
*/
object MultiSetTest extends App
{
banner ("Test MultiSet with Integer elements")
val l1 = MultiSet (1, 2)
val l2 = MultiSet (1, 1, 2, 3)
val l3 = MultiSet (1, 1, 2)
val v4 = MutableList (1, 2, 3)
println ("l1 = " + l1)
println ("l2 = " + l2)
println ("l3 = " + l3)
println ("lv = " + MultiSet (v4))
println ("l1 ⊆ l2 = " + l1 ⊆ l2)
println ("l2 ⊆ l3 = " + l2 ⊆ l3)
println ("l1 ⊆ l3 = " + l1 ⊆ l3)
println ("l3 ⋃ l2 = " + l3 ⋃ l2)
println ("l3 ⋂ l2 = " + l3 ⋂ l2)
println ("l1 ⋂ l2 = " + l1 ⋂ l2)
println ("l1 union l2 = " + (l1 union l2))
println ("l1 intersect l2 = " + (l1 intersect l2))
banner ("Test MultiSet with String elements")
val l4 = MultiSet ("a", "b")
val l5 = MultiSet ("a", "b", "c")
val l6 = MultiSet ("a", "a", "b")
println ("l4 = " + l4)
println ("l5 = " + l5)
println ("l6 = " + l6)
println ("l4 ⊆ l5 = " + l4 ⊆ l5)
println ("l6 ⊆ l5 = " + l6 ⊆ l5)
println ("l6 ⋃ l5 = " + l6 ⋃ l5)
println ("l6 ⋂ l5 = " + l6 ⋂ l5)
} // MultiSetTest object
| scalation/fda | scalation_1.3/scalation_mathstat/src/main/scala/scalation/util/MultiSet.scala | Scala | mit | 4,631 |
import sbt._
import sbt.Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "ebean"
val appVersion = "0.1.0-SNAPSHOT"
val appDependencies = Seq(jdbc, filters, javaEbean)
val main = play.Project(appName, appVersion, appDependencies).settings()
} | KrzysztofKowalski/cat-dog-ebean-man | project/Build.scala | Scala | gpl-2.0 | 284 |
package com.eevolution.context.dictionary.infrastructure.service
import java.util.UUID
import akka.NotUsed
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.RecordAccess
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.api.{Service, ServiceCall}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: eduardo.moreno@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by eduardo.moreno@e-evolution.com , www.e-evolution.com
*/
/**
* Record Access Service
*/
trait RecordAccessService extends Service with api.service.RecordAccessService {
override def getAll() : ServiceCall[NotUsed, List[RecordAccess]]
override def getById(id: Int): ServiceCall[NotUsed, RecordAccess]
override def getByUUID(uuid :UUID): ServiceCall[NotUsed, RecordAccess]
override def getAllByPage(pageNo: Option[Int], pageSize: Option[Int]): ServiceCall[NotUsed, PaginatedSequence[RecordAccess]]
def descriptor = {
import Service._
named("recordAccess").withCalls(
pathCall("/api/v1_0_0/recordAccess/all", getAll _) ,
pathCall("/api/v1_0_0/recordAccess/:id", getById _),
pathCall("/api/v1_0_0/recordAccess/:uuid", getByUUID _) ,
pathCall("/api/v1_0_0/recordAccess?pageNo&pageSize", getAllByPage _)
)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/RecordAccessService.scala | Scala | gpl-3.0 | 2,056 |
package edu.mit.csail.sdg.ormolu.rel.ops
import edu.mit.csail.sdg.ormolu.rel.Relation
/**
* The Transpose (~) of a binary relation. ~r takes the mirror image of r, forming a new relation by reversing the order of atoms in each tuple.
*/
case class Transpose(relation: Relation) extends Relation {
require(relation.arity == 2,
"Transpose can only be used with a binary relation, r has arity " + relation.arity)
override def arity: Int = relation.arity
override def toString: String = "~" + relation
override def query = querySpec
override def projection = relation.projection.reverse
override def filter = relation.filter
override def tables = relation.tables
} | dlreeves/ormolu | src/edu/mit/csail/sdg/ormolu/rel/ops/Transpose.scala | Scala | mit | 706 |
/*
* Seldon -- open source prediction engine
* =======================================
* Copyright 2011-2015 Seldon Technologies Ltd and Rummble Ltd (http://www.seldon.io/)
*
**********************************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************************
*/
package io.seldon.spark.features
import org.apache.log4j.Level
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd._
import java.io.FileOutputStream
import java.io.ObjectOutputStream
import io.seldon.spark.SparkUtils
import java.io.File
import io.seldon.spark.rdd.DataSourceMode
import io.seldon.spark.rdd.DataSourceMode._
import io.seldon.spark.rdd.FileUtils
import org.apache.spark.sql.SparkSession
case class Word2VecConfig (
client : String = "",
inputPath : String = "/seldon-models",
outputPath : String = "/seldon-models",
startDay : Int = 1,
days : Int = 1,
awsKey : String = "",
awsSecret : String = "",
local : Boolean = false,
zkHosts : String = "",
activate : Boolean = false,
minWordCount : Int = 50,
vectorSize : Int = 30)
class Word2VecJob(private val sc : SparkContext,config : Word2VecConfig) {
def activate(location : String)
{
import io.seldon.spark.zookeeper.ZkCuratorHandler
import org.apache.curator.utils.EnsurePath
val curator = new ZkCuratorHandler(config.zkHosts)
if(curator.getCurator.getZookeeperClient.blockUntilConnectedOrTimedOut())
{
val zkPath = "/all_clients/"+config.client+"/word2vec"
val ensurePath = new EnsurePath(zkPath)
ensurePath.ensure(curator.getCurator.getZookeeperClient)
curator.getCurator.setData().forPath(zkPath,location.getBytes())
}
else
println("Failed to get zookeeper! Can't activate model")
}
def convertToSemVecFormat(vectors : org.apache.spark.rdd.RDD[(String,Array[Float])],dimension : Int) =
{
val vecString = vectors.coalesce(1, true).map{v =>
val key = v._1.replaceAll(",", "")
var line = new StringBuilder()
line ++= key
for (score <- v._2)
{
line ++= "|"
line ++= score.toString()
}
line.toString()
}
val header: RDD[String] = sc.parallelize(Array("-vectortype REAL -dimension "+dimension.toString()))
val vectorStr = header.union(vecString).coalesce(1, true)
vectorStr
}
def run()
{
val glob = config.inputPath + "/" + config.client+"/sessionitems/"+SparkUtils.getS3UnixGlob(config.startDay,config.days)+"/*"
println("loading from "+glob)
val input = sc.textFile(glob).map(line => line.split(" ").toSeq)
val word2vec = new SeldonWord2Vec()
word2vec.setMinCount(config.minWordCount)
word2vec.setVectorSize(config.vectorSize)
val model = word2vec.fit(input)
val vectors = model.getVectors
val rdd = sc.parallelize(vectors.toSeq, 200)
val outPath = config.outputPath + "/" + config.client + "/word2vec/"+config.startDay
val mode = DataSourceMode.fromString(config.outputPath)
val vectorsAsString = convertToSemVecFormat(rdd,config.vectorSize)
FileUtils.outputModelToFile(vectorsAsString, outPath, mode, "docvectors.txt")
val emptyTermFile: RDD[String] = sc.parallelize(Array("-vectortype REAL -dimension "+config.vectorSize.toString()))
FileUtils.outputModelToFile(emptyTermFile, outPath, mode, "termvectors.txt")
if (config.activate)
activate(outPath)
}
}
object Word2VecJob
{
def updateConf(config : Word2VecConfig) =
{
import io.seldon.spark.zookeeper.ZkCuratorHandler
var c = config.copy()
if (config.zkHosts.nonEmpty)
{
val curator = new ZkCuratorHandler(config.zkHosts)
val path = "/all_clients/"+config.client+"/offline/word2vec"
if (curator.getCurator.checkExists().forPath(path) != null)
{
val bytes = curator.getCurator.getData().forPath(path)
val j = new String(bytes,"UTF-8")
println("Confguration from zookeeper -> "+j)
import org.json4s._
import org.json4s.jackson.JsonMethods._
implicit val formats = DefaultFormats
val json = parse(j)
import org.json4s.JsonDSL._
import org.json4s.jackson.Serialization.write
type DslConversion = Word2VecConfig => JValue
val existingConf = write(c) // turn existing conf into json
val existingParsed = parse(existingConf) // parse it back into json4s internal format
val combined = existingParsed merge json // merge with zookeeper value
c = combined.extract[Word2VecConfig] // extract case class from merged json
c
}
else
{
println("Warning: using default configuaration - path["+path+"] not found!");
c
}
}
else
{
println("Warning: using default configuration - no zkHost!");
c
}
}
def main(args: Array[String])
{
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
var c = new Word2VecConfig()
val parser = new scopt.OptionParser[Unit]("Word2Vec") {
head("Word2Vec", "1.0")
opt[Unit]('l', "local") foreach { x => c = c.copy(local = true) } text("local mode - use local Master")
opt[String]('c', "client") required() valueName("<client>") foreach { x => c = c.copy(client = x) } text("client name (will be used as db and folder suffix)")
opt[String]('i', "inputPath") valueName("path url") foreach { x => c = c.copy(inputPath = x) } text("path prefix for input")
opt[String]('o', "outputPath") valueName("path url") foreach { x => c = c.copy(outputPath = x) } text("path prefix for output")
opt[Int]('r', "days") foreach { x =>c = c.copy(days = x) } text("number of days in past to get foreachs for")
opt[Int]("startDay") foreach { x =>c = c.copy(startDay = x) } text("start day in unix time")
opt[String]('a', "awskey") valueName("aws access key") foreach { x => c = c.copy(awsKey = x) } text("aws key")
opt[String]('s', "awssecret") valueName("aws secret") foreach { x => c = c.copy(awsSecret = x) } text("aws secret")
opt[String]('z', "zookeeper") valueName("zookeeper hosts") foreach { x => c = c.copy(zkHosts = x) } text("zookeeper hosts (comma separated)")
opt[Unit]("activate") foreach { x => c = c.copy(activate = true) } text("activate the model in the Seldon Server")
opt[Int]("minWordCount") foreach { x => c = c.copy(minWordCount = x) } text("min count for a token to be included")
opt[Int]("vectorSize") foreach { x => c =c.copy(vectorSize = x) } text("vector size")
}
if (parser.parse(args)) // Parse to check and get zookeeper if there
{
c = updateConf(c) // update from zookeeper args
parser.parse(args) // overrride with args that were on command line
val conf = new SparkConf().setAppName("Word2Vec")
if (c.local)
conf.setMaster("local")
.set("spark.akka.frameSize", "300")
.set("spark.driver.memory", "15g")
.set("spark.executor.memory", "15g")
.set("spark.driver.maxResultSize", "10g")
val spark = SparkSession.builder()
.config(conf)
.getOrCreate()
val sc = spark.sparkContext //new SparkContext(conf)
try
{
sc.hadoopConfiguration.set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem")
if (c.awsKey.nonEmpty && c.awsSecret.nonEmpty)
{
sc.hadoopConfiguration.set("fs.s3n.awsAccessKeyId", c.awsKey)
sc.hadoopConfiguration.set("fs.s3n.awsSecretAccessKey", c.awsSecret)
}
println(c)
val w2v = new Word2VecJob(sc,c)
w2v.run()
}
finally
{
println("Shutting down job")
sc.stop()
}
}
else
{
}
}
} | smrjan/seldon-server | offline-jobs/spark/src/main/scala/io/seldon/spark/features/Word2VecJob.scala | Scala | apache-2.0 | 8,632 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the LtiOutcomeUrl entity.
*/
class LtiOutcomeUrlGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://localhost:8080"""
val httpConf = http
.baseUrl(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connectionHeader("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
.silentResources // Silence all resources like css or css so they don't clutter the results
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authentication = Map(
"Content-Type" -> """application/json""",
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"Authorization" -> "${access_token}"
)
val scn = scenario("Test the LtiOutcomeUrl entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authenticate")
.headers(headers_http_authentication)
.body(StringBody("""{"username":"admin", "password":"admin"}""")).asJson
.check(header("Authorization").saveAs("access_token"))).exitHereIfFailed
.pause(2)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all ltiOutcomeUrls")
.get("/api/lti-outcome-urls")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new ltiOutcomeUrl")
.post("/api/lti-outcome-urls")
.headers(headers_http_authenticated)
.body(StringBody("""{
"id":null
, "url":"SAMPLE_TEXT"
, "sourcedId":"SAMPLE_TEXT"
}""")).asJson
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_ltiOutcomeUrl_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created ltiOutcomeUrl")
.get("${new_ltiOutcomeUrl_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created ltiOutcomeUrl")
.delete("${new_ltiOutcomeUrl_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(Integer.getInteger("users", 100)) during(Integer.getInteger("ramp", 1) minutes))
).protocols(httpConf)
}
| ls1intum/ArTEMiS | src/test/gatling/user-files/simulations/LtiOutcomeUrlGatlingTest.scala | Scala | mit | 3,617 |
package scalaxy.streams
private[streams] trait BuilderSinks extends StreamComponents {
val global: scala.reflect.api.Universe
import global._
// Base class for builder-based sinks.
trait BuilderSink extends StreamSink
{
override def lambdaCount = 0
def usesSizeHint: Boolean
def createBuilder(inputVars: TuploidValue[Tree], typed: Tree => Tree): Tree
override def emit(input: StreamInput, outputNeeds: OutputNeeds, nextOps: OpsAndOutputNeeds): StreamOutput =
{
import input._
requireSinkInput(input, outputNeeds, nextOps)
val builder = fresh("builder")
require(input.vars.alias.nonEmpty, s"input.vars = $input.vars")
// println("input.vars.alias.get = " + input.vars.alias.get + ": " + input.vars.tpe)
val sizeHintOpt = input.outputSize.map(s => q"$builder.sizeHint($s)")
val Block(List(
builderDef,
sizeHint,
builderAdd), result) = typed(q"""
private[this] val $builder = ${createBuilder(input.vars, typed)};
${sizeHintOpt.getOrElse(dummyStatement(fresh))};
$builder += ${input.vars.alias.get};
$builder.result()
""")
StreamOutput(
prelude = List(builderDef),
beforeBody = input.outputSize.filter(_ => usesSizeHint).map(_ => sizeHint).toList,
body = List(builderAdd),
ending = List(result))
}
}
}
| nativelibs4java/scalaxy-streams | src/main/scala/streams/sinks/BuilderSinks.scala | Scala | bsd-3-clause | 1,391 |
/*
* Distributed as part of Scalala, a linear algebra library.
*
* Copyright (C) 2008- Daniel Ramage
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 USA
*/
package scalala;
package generic;
package collection;
import scalala.collection.sparse.SparseArray;
import scalala.scalar.Scalar;
import scalala.tensor.Tensor1;
import scalala.tensor.dense.DenseVectorCol;
import scalala.tensor.sparse.SparseVectorCol;
/**
* View something as a Tensor1.
*
* @author dramage
*/
trait CanViewAsTensor1[-From,K,V] {
def apply(from : From) : Tensor1[K,V];
}
object CanViewAsTensor1 {
//
// View arrays
//
class ArrayTensor1[V:ClassManifest:Scalar]
extends CanViewAsTensor1[Array[V],Int,V] {
def apply(from : Array[V]) = new DenseVectorCol[V](from);
}
implicit def mkArrayTensor1[V:ClassManifest:Scalar] =
new ArrayTensor1[V]();
implicit object ArrayI extends ArrayTensor1[Int];
implicit object ArrayS extends ArrayTensor1[Short];
implicit object ArrayL extends ArrayTensor1[Long];
implicit object ArrayF extends ArrayTensor1[Float];
implicit object ArrayD extends ArrayTensor1[Double];
implicit object ArrayB extends ArrayTensor1[Boolean];
//
// View sparse arrays
//
class SparseArrayTensor1[V:ClassManifest:Scalar]
extends CanViewAsTensor1[SparseArray[V],Int,V] {
def apply(from : SparseArray[V]) = new SparseVectorCol[V](from);
}
implicit def mkSparseArrayTensor1[V:ClassManifest:Scalar] =
new SparseArrayTensor1[V]();
implicit object SparseArrayI extends SparseArrayTensor1[Int];
implicit object SparseArrayS extends SparseArrayTensor1[Short];
implicit object SparseArrayL extends SparseArrayTensor1[Long];
implicit object SparseArrayF extends SparseArrayTensor1[Float];
implicit object SparseArrayD extends SparseArrayTensor1[Double];
implicit object SparseArrayB extends SparseArrayTensor1[Boolean];
//
// View pre-constructed Tensor1 instances
//
class Tensor1Tensor1[K,V:Scalar]
extends CanViewAsTensor1[Tensor1[K,V],K,V] {
def apply(from : Tensor1[K,V]) = from;
}
implicit def mkTensor1Tensor1[K,V:Scalar] =
new Tensor1Tensor1[K,V]();
}
| scalala/Scalala | src/main/scala/scalala/generic/collection/CanViewAsTensor1.scala | Scala | lgpl-2.1 | 2,829 |
import scala.util.matching.Regex
import scala.util.matching.Regex.MatchIterator
def foo() {
val x: Regex.MatchIterator = ("a".r.findAllIn("blabla"))
/*start*/x/*end*/
}
//Regex.MatchIterator | ilinum/intellij-scala | testdata/typeInference/bugs2/SCL2265B.scala | Scala | apache-2.0 | 195 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.spark.impl
import org.apache.ignite.cache.query.SqlFieldsQuery
import org.apache.ignite.spark.IgniteDataFrameSettings._
import QueryUtils.{compileCreateTable, compileDropTable, compileInsert}
import org.apache.ignite.internal.IgniteEx
import org.apache.ignite.internal.processors.query.QueryTypeDescriptorImpl
import org.apache.ignite.internal.processors.query.QueryUtils.DFLT_SCHEMA
import org.apache.ignite.spark.IgniteContext
import org.apache.ignite.{Ignite, IgniteException}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Row}
/**
* Helper class for executing DDL queries.
*/
private[apache] object QueryHelper {
/**
* Drops provided table.
*
* @param tableName Table name.
* @param ignite Ignite.
*/
def dropTable(tableName: String, ignite: Ignite): Unit = {
val qryProcessor = ignite.asInstanceOf[IgniteEx].context().query()
val qry = compileDropTable(tableName)
qryProcessor.querySqlFields(new SqlFieldsQuery(qry), true).getAll
}
/**
* Creates table.
*
* @param schema Schema.
* @param tblName Table name.
* @param primaryKeyFields Primary key fields.
* @param createTblOpts Ignite specific options.
* @param ignite Ignite.
*/
def createTable(schema: StructType, tblName: String, primaryKeyFields: Seq[String], createTblOpts: Option[String],
ignite: Ignite): Unit = {
val qryProcessor = ignite.asInstanceOf[IgniteEx].context().query()
val qry = compileCreateTable(schema, tblName, primaryKeyFields, createTblOpts)
qryProcessor.querySqlFields(new SqlFieldsQuery(qry), true).getAll
}
/**
* Ensures all options are specified correctly to create table based on provided `schema`.
*
* @param schema Schema of new table.
* @param params Parameters.
*/
def ensureCreateTableOptions(schema: StructType, params: Map[String, String], ctx: IgniteContext): Unit = {
if (!params.contains(OPTION_TABLE) && !params.contains("path"))
throw new IgniteException("'table' must be specified.")
if (params.contains(OPTION_SCHEMA) && !params(OPTION_SCHEMA).equalsIgnoreCase(DFLT_SCHEMA)) {
throw new IgniteException("Creating new tables in schema " + params(OPTION_SCHEMA) + " is not valid, tables"
+ " must only be created in " + DFLT_SCHEMA)
}
params.get(OPTION_CREATE_TABLE_PRIMARY_KEY_FIELDS)
.map(_.split(','))
.getOrElse(throw new IgniteException("Can't create table! Primary key fields has to be specified."))
.map(_.trim)
.foreach { pkField ⇒
if (pkField == "")
throw new IgniteException("PK field can't be empty.")
if (!schema.exists(_.name.equalsIgnoreCase(pkField)))
throw new IgniteException(s"'$pkField' doesn't exists in DataFrame schema.")
}
}
/**
* Saves data to the table.
*
* @param data Data.
* @param tblName Table name.
* @param schemaName Optional schema name.
* @param ctx Ignite context.
* @param streamerAllowOverwrite Flag enabling overwriting existing values in cache.
* @param streamerFlushFrequency Insert query streamer automatic flush frequency.
* @param streamerPerNodeBufferSize Insert query streamer size of per node query buffer.
* @param streamerPerNodeParallelOperations Insert query streamer maximum number of parallel operations for a single node.
*
* @see [[org.apache.ignite.IgniteDataStreamer]]
* @see [[org.apache.ignite.IgniteDataStreamer#allowOverwrite(boolean)]]
* @see [[org.apache.ignite.IgniteDataStreamer#autoFlushFrequency(long)]]
* @see [[org.apache.ignite.IgniteDataStreamer#perNodeBufferSize(int)]]
* @see [[org.apache.ignite.IgniteDataStreamer#perNodeParallelOperations(int)]]
*/
def saveTable(data: DataFrame,
tblName: String,
schemaName: Option[String],
ctx: IgniteContext,
streamerAllowOverwrite: Option[Boolean],
streamerFlushFrequency: Option[Long],
streamerPerNodeBufferSize: Option[Int],
streamerPerNodeParallelOperations: Option[Int]
): Unit = {
val insertQry = compileInsert(tblName, data.schema)
data.rdd.foreachPartition(iterator =>
savePartition(iterator,
insertQry,
tblName,
schemaName,
ctx,
streamerAllowOverwrite,
streamerFlushFrequency,
streamerPerNodeBufferSize,
streamerPerNodeParallelOperations
))
}
/**
* Saves partition data to the Ignite table.
*
* @param iterator Data iterator.
* @param insertQry Insert query.
* @param tblName Table name.
* @param schemaName Optional schema name.
* @param ctx Ignite context.
* @param streamerAllowOverwrite Flag enabling overwriting existing values in cache.
* @param streamerFlushFrequency Insert query streamer automatic flush frequency.
* @param streamerPerNodeBufferSize Insert query streamer size of per node query buffer.
* @param streamerPerNodeParallelOperations Insert query streamer maximum number of parallel operations for a single node.
*
* @see [[org.apache.ignite.IgniteDataStreamer]]
* @see [[org.apache.ignite.IgniteDataStreamer#allowOverwrite(boolean)]]
* @see [[org.apache.ignite.IgniteDataStreamer#autoFlushFrequency(long)]]
* @see [[org.apache.ignite.IgniteDataStreamer#perNodeBufferSize(int)]]
* @see [[org.apache.ignite.IgniteDataStreamer#perNodeParallelOperations(int)]]
*/
private def savePartition(iterator: Iterator[Row],
insertQry: String,
tblName: String,
schemaName: Option[String],
ctx: IgniteContext,
streamerAllowOverwrite: Option[Boolean],
streamerFlushFrequency: Option[Long],
streamerPerNodeBufferSize: Option[Int],
streamerPerNodeParallelOperations: Option[Int]
): Unit = {
val tblInfo = sqlTableInfo(ctx.ignite(), tblName, schemaName).get.asInstanceOf[QueryTypeDescriptorImpl]
val streamer = ctx.ignite().dataStreamer(tblInfo.cacheName)
streamerAllowOverwrite.foreach(v ⇒ streamer.allowOverwrite(v))
streamerFlushFrequency.foreach(v ⇒ streamer.autoFlushFrequency(v))
streamerPerNodeBufferSize.foreach(v ⇒ streamer.perNodeBufferSize(v))
streamerPerNodeParallelOperations.foreach(v ⇒ streamer.perNodeParallelOperations(v))
try {
val qryProcessor = ctx.ignite().asInstanceOf[IgniteEx].context().query()
iterator.foreach { row ⇒
val schema = row.schema
val args = schema.map { f ⇒
row.get(row.fieldIndex(f.name)).asInstanceOf[Object]
}
qryProcessor.streamUpdateQuery(tblInfo.cacheName,
tblInfo.schemaName, streamer, insertQry, args.toArray)
}
}
finally {
streamer.close()
}
}
}
| shroman/ignite | modules/spark/src/main/scala/org/apache/ignite/spark/impl/QueryHelper.scala | Scala | apache-2.0 | 8,090 |
package io.continuum.bokeh
package examples
import scalajs.js
import org.scalajs.jquery.jQuery
object Anscombe extends js.JSApp {
def main(): Unit = {
val anscombe_quartet = List(
List(10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58),
List( 8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76),
List(13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71),
List( 9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84),
List(11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47),
List(14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04),
List( 6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25),
List( 4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.50),
List(12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56),
List( 7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91),
List( 5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89)).transpose
object circles_source extends ColumnDataSource {
val xi = column(anscombe_quartet(0))
val yi = column(anscombe_quartet(1))
val xii = column(anscombe_quartet(2))
val yii = column(anscombe_quartet(3))
val xiii = column(anscombe_quartet(4))
val yiii = column(anscombe_quartet(5))
val xiv = column(anscombe_quartet(6))
val yiv = column(anscombe_quartet(7))
}
def linspace(a: Double, b: Double, length: Int = 100): Seq[Double] = {
val increment = (b - a) / (length - 1)
(0 until length).map(i => a + increment*i)
}
object lines_source extends ColumnDataSource {
val x = column(linspace(-0.5, 20.5, 10))
val y = column(x.value.map(v => v*0.5 + 3.0))
}
import lines_source.{x,y}
val xdr = new Range1d().start(-0.5).end(20.5)
val ydr = new Range1d().start(-0.5).end(20.5)
def make_plot(title: String, xname: Symbol, yname: Symbol) = {
val plot = new Plot()
.x_range(xdr)
.y_range(ydr)
.title(title)
.width(400)
.height(400)
.border_fill_color(Color.White)
.background_fill_color("#e9e0db")
val xaxis = new LinearAxis().plot(plot).axis_line_color()
val yaxis = new LinearAxis().plot(plot).axis_line_color()
plot.below <<= (xaxis :: _)
plot.left <<= (yaxis :: _)
val xgrid = new Grid().plot(plot).axis(xaxis).dimension(0)
val ygrid = new Grid().plot(plot).axis(yaxis).dimension(1)
val line_renderer = new GlyphRenderer()
.data_source(lines_source)
.glyph(Line().x(x).y(y).line_color("#666699").line_width(2))
val circle_renderer = new GlyphRenderer()
.data_source(circles_source)
.glyph(Circle().x(xname).y(yname).size(12).fill_color("#cc6633").line_color("#cc6633").fill_alpha(50%%))
plot.renderers := List(xaxis, yaxis, xgrid, ygrid, line_renderer, circle_renderer)
plot
}
val I = make_plot("I", 'xi, 'yi)
val II = make_plot("II", 'xii, 'yii)
val III = make_plot("III", 'xiii, 'yiii)
val IV = make_plot("IV", 'xiv, 'yiv)
val children = List(List(I, II), List(III, IV))
val grid = new GridPlot().children(children).width(800)
val frag = new Document(grid).autoload()
jQuery("#anscombe").html(frag.toString)
}
}
| bokeh/bokeh-scala | anscombe/src/main/scala/Anscombe.scala | Scala | mit | 3,602 |
package com.github.mdr.mash.view.printer
import com.github.mdr.mash.ns.os.{ PermissionsClass, PermissionsSectionClass }
import com.github.mdr.mash.runtime.MashObject
object PermissionsPrinter {
def permissionsSectionString(section: MashObject): String = {
val sb = new StringBuilder
val wrapper = PermissionsSectionClass.Wrapper(section)
if (wrapper.canRead) sb.append("r") else sb.append("-")
if (wrapper.canWrite) sb.append("w") else sb.append("-")
if (wrapper.canExecute) sb.append("x") else sb.append("-")
sb.toString
}
def permissionsString(perms: MashObject): String = {
val wrapper = PermissionsClass.Wrapper(perms)
val sb = new StringBuilder
sb.append(permissionsSectionString(wrapper.owner))
sb.append(permissionsSectionString(wrapper.group))
sb.append(permissionsSectionString(wrapper.others))
sb.toString
}
} | mdr/mash | src/main/scala/com/github/mdr/mash/view/printer/PermissionsPrinter.scala | Scala | mit | 880 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.diagrams
private[diagrams] case class AnchorValue(anchor: Int, value: Any)
/**
* A trait that represent an expression recorded by <code>DiagrammedExprMacro</code>, which includes the following members:
*
* <ul>
* <li>a boolean value</li>
* <li>an anchor that records the position of this expression</li>
* <li>anchor values of this expression (including sub-expressions)</li>
* </ul>
*
* <code>DiagrammedExpr</code> is used by code generated from <code>DiagrammedAssertionsMacro</code>, it needs to be public
* so that the generated code can be compiled. It is expected that ScalaTest users would ever need to use <code>DiagrammedExpr</code>
* directly.
*/
trait DiagrammedExpr[+T] {
val anchor: Int
def anchorValues: List[AnchorValue]
def value: T
protected[diagrams] def eliminateDuplicates(anchorValues: List[AnchorValue]): List[AnchorValue] =
(anchorValues.groupBy(_.anchor).map { case (anchor, group) =>
group.last
}).toList
}
/**
* <code>DiagrammedExpr</code> companion object that provides factory methods to create different sub types of <code>DiagrammedExpr</code>
*
* <code>DiagrammedExpr</code> is used by code generated from <code>DiagrammedAssertionsMacro</code>, it needs to be public
* so that the generated code can be compiled. It is expected that ScalaTest users would ever need to use <code>DiagrammedExpr</code>
* directly.
*/
object DiagrammedExpr {
/**
* Create simple <code>DiagrammedExpr</code> that wraps expressions that is not <code>Select</code>, <code>Apply</code> or <code>TypeApply</code>.
*
* @param expression the expression value
* @param anchor the anchor of the expression
* @return a simple <code>DiagrammedExpr</code>
*/
def simpleExpr[T](expression: T, anchor: Int): DiagrammedExpr[T] = new DiagrammedSimpleExpr(expression, anchor)
/**
* Create apply <code>DiagrammedExpr</code> that wraps <code>Apply</code> or <code>TypeApply</code> expression.
*
* @param qualifier the qualifier of the <code>Apply</code> or <code>TypeApply</code> expression
* @param args the arguments of the <code>Apply</code> or <code>TypeApply</code> expression
* @param value the expression value
* @param anchor the anchor of the expression
* @return an apply <code>DiagrammedExpr</code>
*/
def applyExpr[T](qualifier: DiagrammedExpr[_], args: List[DiagrammedExpr[_]], value: T, anchor: Int): DiagrammedExpr[T] =
new DiagrammedApplyExpr(qualifier, args, value, anchor)
/**
* Create select <code>DiagrammedExpr</code> that wraps <code>Select</code> expression.
*
* @param qualifier the qualifier of the <code>Apply</code> or <code>TypeApply</code> expression
* @param value the expression value
* @param anchor the anchor of the expression
* @return a select <code>DiagrammedExpr</code>
*/
def selectExpr[T](qualifier: DiagrammedExpr[_], value: T, anchor: Int): DiagrammedExpr[T] =
new DiagrammedSelectExpr(qualifier, value, anchor)
}
private[diagrams] class DiagrammedSimpleExpr[T](val value: T, val anchor: Int) extends DiagrammedExpr[T] {
def anchorValues = List(AnchorValue(anchor, value))
}
private[diagrams] class DiagrammedApplyExpr[T](qualifier: DiagrammedExpr[_], args: List[DiagrammedExpr[_]], val value: T, val anchor: Int) extends DiagrammedExpr[T] {
def anchorValues = {
val quantifierAnchorValues = eliminateDuplicates(qualifier.anchorValues)
val argsAnchorValues =
args.flatMap { arg =>
eliminateDuplicates(arg.anchorValues)
}
quantifierAnchorValues.toList ::: AnchorValue(anchor, value) :: argsAnchorValues.filter(_.anchor >= 0)
}
}
private[diagrams] class DiagrammedSelectExpr[T](qualifier: DiagrammedExpr[_], val value: T, val anchor: Int) extends DiagrammedExpr[T] {
def anchorValues = {
val quantifierAnchorValues = eliminateDuplicates(qualifier.anchorValues)
quantifierAnchorValues.toList ::: List(AnchorValue(anchor, value))
}
} | scalatest/scalatest | dotty/diagrams/src/main/scala/org/scalatest/diagrams/DiagrammedExpr.scala | Scala | apache-2.0 | 4,576 |
package com.eharmony.aloha.models.reg
import com.eharmony.aloha.models.reg.json.RegressionModelJson
import spray.json.pimpString
import com.eharmony.aloha.io.StringReadable
import org.junit.runners.BlockJUnit4ClassRunner
import org.junit.runner.RunWith
import org.junit.Test
import org.junit.Assert._
import com.eharmony.aloha.util.{Logging, Timing}
@RunWith(classOf[BlockJUnit4ClassRunner])
class BigModelParseTest extends RegressionModelJson with Timing with Logging {
/** The purpose of this test is that we can parse a fairly large model JSON to an abstract syntax tree of data used
* to construct the regression model. This JSON has
*
* - 184,846 file lines
* - 94 features
* - 874 first order weights
* - 30,598 second order weights
* - 341 spline knots.
*
* and comes from an actual model with the information stripped out.
*/
@Test def testBigJsonParsedToAstForRegModel() {
val ((s, data), t) = time(getBigZippedData("/com/eharmony/aloha/models/reg/semi_cleaned_big_model.json.gz"))
assertTrue(s"Should take less than 10 seconds to parse, took $t", t < 10)
assertEquals("file lines", 184846, scala.io.Source.fromString(s).getLines().size)
assertEquals("Features", 94, data.features.size)
assertEquals("First order weights", 874, data.weights.size)
assertEquals("Higher order weights", 30598, data.higherOrderFeatures.map(_.size).getOrElse(0))
assertEquals("spline size", 341, data.spline.map(_.knots.size).getOrElse(0))
debug("file lines: 184846, features: 94, first order weights: 874, higher order weights: 30598, spline size: 341")
}
private[this] def getBigZippedData(resourcePath: String) = {
val s = StringReadable.gz.fromResource(resourcePath)
(s, s.parseJson.convertTo[RegData])
}
}
| eHarmony/aloha | aloha-core/src/test/scala/com/eharmony/aloha/models/reg/BigModelParseTest.scala | Scala | mit | 1,872 |
package eventstore
package core
package cluster
import java.net.InetSocketAddress
import java.time.ZonedDateTime
@SerialVersionUID(1L)
final case class MemberInfo(
instanceId: Uuid,
timestamp: ZonedDateTime,
state: NodeState,
isAlive: Boolean,
internalTcp: InetSocketAddress,
externalTcp: InetSocketAddress,
internalSecureTcp: InetSocketAddress,
externalSecureTcp: InetSocketAddress,
internalHttp: InetSocketAddress,
externalHttp: InetSocketAddress,
lastCommitPosition: Long,
writerCheckpoint: Long,
chaserCheckpoint: Long,
epochPosition: Long,
epochNumber: Int,
epochId: Uuid,
nodePriority: Int
) extends Ordered[MemberInfo] {
def compare(that: MemberInfo) = this.state compare that.state
def like(other: MemberInfo): Boolean = this.instanceId == other.instanceId
} | EventStore/EventStore.JVM | core/src/main/scala/eventstore/core/cluster/MemberInfo.scala | Scala | bsd-3-clause | 944 |
package scaldi.util
object JvmTestUtil {
// It is not pretty at all, but if you know better way to get JVM shutdown hook count, I would be happy to use it :)
def shutdownHookCount: Int = {
val hooksField = Class.forName("java.lang.ApplicationShutdownHooks").getDeclaredField("hooks")
if (!hooksField.isAccessible) hooksField.setAccessible(true)
hooksField.get(null).asInstanceOf[java.util.Map[_, _]].size
}
}
| scaldi/scaldi | src/test/scala/scaldi/util/JvmTestUtil.scala | Scala | apache-2.0 | 431 |
package com.codahale.jerkson.deser
import language.higherKinds
import com.fasterxml.jackson.core.{ JsonToken, JsonParser }
import com.fasterxml.jackson.databind.JavaType
import com.fasterxml.jackson.databind.{ JsonDeserializer, DeserializationContext }
import collection.generic.GenericCompanion
import com.fasterxml.jackson.databind.deser.ResolvableDeserializer
class SeqDeserializer[+CC[X] <: Traversable[X]](companion: GenericCompanion[CC],
elementType: JavaType)
extends JsonDeserializer[Object] with ResolvableDeserializer {
var elementDeserializer: JsonDeserializer[Object] = _
def deserialize(jp: JsonParser, ctxt: DeserializationContext): CC[Object] = {
val builder = companion.newBuilder[Object]
if (jp.getCurrentToken != JsonToken.START_ARRAY) {
throw ctxt.mappingException(elementType.getRawClass)
}
while (jp.nextToken() != JsonToken.END_ARRAY) {
builder += elementDeserializer.deserialize(jp, ctxt)
}
builder.result()
}
def resolve(ctxt: DeserializationContext) {
elementDeserializer = ctxt.findRootValueDeserializer(elementType)
}
override def isCachable = true
}
| mDialog/jerkson | src/main/scala/com/codahale/jerkson/deser/SeqDeserializer.scala | Scala | mit | 1,146 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.index.id
import org.geotools.factory.Hints
import org.locationtech.geomesa.index.api.GeoMesaFeatureIndex
import org.locationtech.geomesa.index.geotools.GeoMesaDataStoreFactory.GeoMesaDataStoreConfig
import org.locationtech.geomesa.index.index.IndexKeySpace
import org.locationtech.geomesa.index.index.IndexKeySpace._
import org.locationtech.geomesa.index.strategies.IdFilterStrategy
import org.locationtech.geomesa.index.utils.Explainer
import org.locationtech.geomesa.utils.index.ByteArrays
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
object IdIndexKeySpace extends IndexKeySpace[Set[Array[Byte]], Array[Byte]]()(ByteRange.ByteOrdering)
with IdIndexKeySpace
trait IdIndexKeySpace extends IndexKeySpace[Set[Array[Byte]], Array[Byte]] {
override def supports(sft: SimpleFeatureType): Boolean = true
// note: technically this doesn't match the index key, but it's only
// used for extracting the feature ID so it works out
override def indexKeyByteLength: Int = 0
override def toIndexKey(sft: SimpleFeatureType, lenient: Boolean): SimpleFeature => Seq[Array[Byte]] =
toBytesKey(GeoMesaFeatureIndex.idToBytes(sft))
override def toIndexKeyBytes(sft: SimpleFeatureType, lenient: Boolean): ToIndexKeyBytes = getIdAsBytes
override def getIndexValues(sft: SimpleFeatureType,
filter: Filter,
explain: Explainer): Set[Array[Byte]] = {
// Multiple sets of IDs in a ID Filter are ORs. ANDs of these call for the intersection to be taken.
// intersect together all groups of ID Filters, producing a set of IDs
val identifiers = IdFilterStrategy.intersectIdFilters(filter)
explain(s"Extracted ID filter: ${identifiers.mkString(", ")}")
val serializer = GeoMesaFeatureIndex.idToBytes(sft)
identifiers.map(serializer.apply)
}
override def getRanges(values: Set[Array[Byte]]): Iterator[ScanRange[Array[Byte]]] =
values.iterator.map(SingleRowRange.apply)
override def getRangeBytes(ranges: Iterator[ScanRange[Array[Byte]]],
prefixes: Seq[Array[Byte]],
tier: Boolean): Iterator[ByteRange] = {
if (prefixes.isEmpty) {
ranges.map {
case SingleRowRange(row) => SingleRowByteRange(row)
case r => throw new IllegalArgumentException(s"Unexpected range type $r")
}
} else {
ranges.flatMap {
case SingleRowRange(row) => prefixes.map(p => SingleRowByteRange(ByteArrays.concat(p, row)))
case r => throw new IllegalArgumentException(s"Unexpected range type $r")
}
}
}
override def useFullFilter(values: Option[Set[Array[Byte]]],
config: Option[GeoMesaDataStoreConfig],
hints: Hints): Boolean = false
private def toBytesKey(toBytes: (String) => Array[Byte])(feature: SimpleFeature): Seq[Array[Byte]] =
Seq(toBytes(feature.getID))
private def getIdAsBytes(prefix: Seq[Array[Byte]], feature: SimpleFeature, suffix: Array[Byte]): Seq[Array[Byte]] = {
// note: suffix contains feature ID, so we don't need to add anything else
val length = prefix.map(_.length).sum + suffix.length
val result = Array.ofDim[Byte](length)
var i = 0
prefix.foreach { p =>
System.arraycopy(p, 0, result, i, p.length)
i += p.length
}
System.arraycopy(suffix, 0, result, i, suffix.length)
Seq(result)
}
}
| jahhulbert-ccri/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/index/id/IdIndexKeySpace.scala | Scala | apache-2.0 | 3,980 |
/**
* Copyright (C) 2018 Pants project contributors (see CONTRIBUTORS.md).
* Licensed under the Apache License, Version 2.0 (see LICENSE).
*/
package org.pantsbuild.zinc.bootstrapper
import org.pantsbuild.zinc.scalautil.ScalaUtils
import sbt.internal.util.ConsoleLogger
object Main {
def main(args: Array[String]): Unit = {
Cli.CliParser.parse(args, Configuration()) match {
case Some(cliArgs) => {
val scalaInstance = ScalaUtils
.scalaInstance(cliArgs.scalaCompiler,
Seq(cliArgs.scalaReflect),
cliArgs.scalaLibrary)
// As per https://github.com/pantsbuild/pants/issues/6160, this is a workaround
// so we can run zinc without $PATH (as needed in remoting).
System.setProperty("sbt.log.format", "true")
val cl = ConsoleLogger.apply()
BootstrapperUtils
.compilerInterface(cliArgs.outputPath,
cliArgs.compilerBridgeSource,
cliArgs.compilerInterface,
scalaInstance,
cl)
}
case None => System.exit(1)
}
}
}
| tdyas/pants | src/scala/org/pantsbuild/zinc/bootstrapper/Main.scala | Scala | apache-2.0 | 1,179 |
/*
* Originally adapted from shapeless-contrib scalaz
* https://github.com/typelevel/shapeless-contrib/blob/v0.4/scalaz/main/scala/traverse.scala
*
*/
package cats.sequence
import shapeless._
import shapeless.ops.hlist._
sealed trait Traverser[L <: HList, P] extends Serializable {
type Out
def apply(in: L): Out
}
object Traverser {
type Aux[L <: HList, P, Out0] = Traverser[L, P] { type Out = Out0 }
implicit def mkTraverser[L <: HList, P, S <: HList](
implicit
mapper: Mapper.Aux[P, L, S],
sequencer: Sequencer[S]
): Aux[L, P, sequencer.Out] =
new Traverser[L, P] {
type Out = sequencer.Out
def apply(in: L): Out = sequencer(mapper(in))
}
}
trait TraverseFunctions {
def traverse[L <: HList](in: L)(f: Poly)
(implicit traverser: Traverser[L, f.type]): traverser.Out = traverser(in)
}
trait TraverseOps extends {
implicit class withTraverse[L <: HList](self: L) {
def traverse(f: Poly)
(implicit traverser: Traverser[L, f.type]): traverser.Out = traverser(self)
}
}
| milessabin/kittens | core/src/main/scala/cats/sequence/traverse.scala | Scala | apache-2.0 | 1,045 |
package com.toscaruntime.cli.util
/**
* Copied from http://stackoverflow.com/questions/7539831/scala-draw-table-to-console
*/
object TabulatorUtil {
def format(table: Seq[Seq[Any]]) = table match {
case Seq() => ""
case _ =>
val sizes = for (row <- table) yield for (cell <- row) yield if (cell == null) 0 else cell.toString.length
val colSizes = for (col <- sizes.transpose) yield col.max + 2
val rows = for (row <- table) yield formatRow(row, colSizes)
formatRows(rowSeparator(colSizes), rows)
}
def formatRows(rowSeparator: String, rows: Seq[String]): String = (
rowSeparator ::
rows.head ::
rowSeparator ::
rows.tail.toList :::
rowSeparator ::
List()).mkString("\\n")
def formatRow(row: Seq[Any], colSizes: Seq[Int]) = {
val cells = for ((item, size) <- row.zip(colSizes)) yield if (size == 0) "" else ("%-" + size + "s").format(item)
cells.mkString("|", "|", "|")
}
def rowSeparator(colSizes: Seq[Int]) = colSizes map {
"-" * _
} mkString("+", "+", "+")
}
| vuminhkh/tosca-runtime | cli/src/main/scala/com/toscaruntime/cli/util/TabulatorUtil.scala | Scala | mit | 1,060 |
package cz.vl.bp
import breeze.linalg.DenseMatrix
import cats.implicits._
import com.fasterxml.jackson.annotation.JsonTypeInfo
object Op {
type Mat = DenseMatrix[Double]
type ParamContext = Map[Param, Double]
case class Param(name: String)
case class BpContext(params: ParamContext, inputs: Map[Placeholder, Mat], stepSize: Double)
}
import cz.vl.bp.Op._
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY)
trait Op {
def forward(implicit context: BpContext): Mat
def backward(dir: Mat)(implicit context: BpContext): ParamContext
def *(p: Param): Op = Scale(this, p)
def *(that: Op): Op = Dot(this, that)
def +(that: Op): Op = Plus(this, that)
def -(that: Op): Op = Minus(this, that)
@transient override lazy val hashCode: Int = super.hashCode
}
case class Const(mat: Mat) extends Op {
def forward(implicit context: BpContext): Mat =
mat
def backward(dir: Mat)(implicit context: BpContext): ParamContext =
Map.empty
}
case class Placeholder(name: String) extends Op {
def forward(implicit context: BpContext): Mat =
context.inputs(this)
def backward(dir: Mat)(implicit context: BpContext): ParamContext =
Map.empty
}
object L2Distance {
def apply(expected: Mat, actual: Op): FrobeniusNorm = FrobeniusNorm(actual - Const(expected))
}
case class FrobeniusNorm(op: Op) extends Op {
def forward(implicit context: BpContext): Mat = {
val dist = op.forward.data.map(math.pow(_, 2)).sum * 2
DenseMatrix.fill(1, 1)(dist)
}
def backward(dir: Mat)(implicit context: BpContext): ParamContext =
op.backward(dir)
def optimize(implicit context: BpContext): ParamContext = {
val dir = op.forward.t
this.backward(dir)
}
}
case class Scale(m: Op, p: Param) extends Op {
def forward(implicit context: BpContext): Mat =
m.forward * value
def backward(dir: Mat)(implicit context: BpContext): ParamContext = {
val diff = context.stepSize * (dir * m.forward)
assert(diff.size == 1)
val optOther = m.backward(dir * value)
val optP = value - diff.data(0)
optOther + (p -> optP)
}
private def value(implicit context: BpContext) = context.params(p)
}
case class Plus(l: Op, r: Op) extends Op {
def forward(implicit context: BpContext): Mat =
l.forward + r.forward
def backward(dir: Mat)(implicit context: BpContext): ParamContext = {
val lOpt = l.backward(dir)
val rOpt = r.backward(dir)
lOpt |+| rOpt
}
}
case class Minus(l: Op, r: Op) extends Op {
def forward(implicit context: BpContext): Mat =
l.forward - r.forward
def backward(dir: Mat)(implicit context: BpContext): ParamContext = {
val lOpt = l.backward(dir)
val rOpt = r.backward(-dir)
lOpt |+| rOpt
}
}
case class Dot(l: Op, r: Op) extends Op {
def forward(implicit context: BpContext): Mat =
l.forward.t * r.forward
def backward(dir: Mat)(implicit context: BpContext): ParamContext = {
val lOpt = l.backward(dir * r.forward.t)
val rOpt = r.backward(dir * l.forward.t)
lOpt |+| rOpt
}
}
| letalvoj/matrix-flow | src/main/scala/cz/vl/bp/Operations.scala | Scala | mit | 3,069 |
package com.ringcentral.gatling.mongo.action
import com.ringcentral.gatling.mongo.command.MongoCountCommand
import com.ringcentral.gatling.mongo.response.MongoCountResponse
import io.gatling.commons.stats.KO
import io.gatling.commons.util.TimeHelper.nowMillis
import io.gatling.commons.validation._
import io.gatling.core.action.Action
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session.{Expression, Session, _}
import io.gatling.core.stats.StatsEngine
import reactivemongo.api.DefaultDB
import reactivemongo.play.json.collection.JSONCollection
//TODO remove global context everywhere
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}
class MongoCountAction(command: MongoCountCommand, database: DefaultDB, val statsEngine: StatsEngine, configuration: GatlingConfiguration, val next: Action) extends MongoAction(database) {
override def name: String = genName("Mongo count command")
override def commandName: Expression[String] = command.commandName
override def executeCommand(commandName: String, session: Session): Validation[Unit] =
for {
collectionName <- command.collection(session)
selectorDocument <- resolveOptionalExpression(command.selector, session)
hint <- resolveOptionalExpression(command.hint, session)
selector <- selectorDocument match {
case Some(d) => string2JsObject(d).map(Some.apply)
case None => NoneSuccess
}
} yield {
val sent = nowMillis
val collection: JSONCollection = database.collection[JSONCollection](collectionName)
collection.count(selector, command.limit, command.skip, hint).onComplete {
case Success(result) => processResult(session, sent, nowMillis, command.checks, MongoCountResponse(result), next, commandName)
case Failure(err) => executeNext(session, sent, nowMillis, KO, next, commandName, Some(err.getMessage))
}
}
} | RC-Platform-Disco-Team/gatling-mongodb-protocol | src/main/scala/com/ringcentral/gatling/mongo/action/MongoCountAction.scala | Scala | mit | 1,948 |
class Unapply {
class B {
def _1 = "text"
def _2 = Seq(1, 2, 3)
}
class A {
val isEmpty: Boolean = false
def get: B = new B
}
object Z {
def unapply(s: String): Option[B] = None
}
"text" match {
case Z(s, l) =>
/*start*/l/*end*/
}
}
//Seq[Int] | ilinum/intellij-scala | testdata/typeInference/newExtractors/Unapply.scala | Scala | apache-2.0 | 291 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.apollo.stomp
import _root_.org.apache.activemq.apollo.broker._
import Stomp._
import BufferConversions._
import _root_.scala.collection.JavaConversions._
import java.io.{DataOutput, IOException}
import org.fusesource.hawtdispatch.transport._
import _root_.org.fusesource.hawtbuf._
import org.apache.activemq.apollo.util._
import org.apache.activemq.apollo.broker.store.{DirectBuffer, MessageRecord}
import java.lang.ThreadLocal
import java.util.ArrayList
import collection.mutable.{ListBuffer, HashMap}
import org.fusesource.hawtdispatch.util.BufferPools
object StompCodec extends Log {
var max_command_length = 20
def encode(message: StompFrameMessage):MessageRecord = {
val frame = message.frame
val rc = new MessageRecord
rc.codec = PROTOCOL
if( frame.content.isInstanceOf[ZeroCopyContent] ) {
rc.direct_buffer = frame.content.asInstanceOf[ZeroCopyContent].zero_copy_buffer
}
def buffer_size = if (rc.direct_buffer!=null) { frame.size - (rc.direct_buffer.size - 1) } else { frame.size }
val os = new ByteArrayOutputStream(buffer_size)
frame.action.writeTo(os)
os.write(NEWLINE)
// Write any updated headers first...
if( !frame.updated_headers.isEmpty ) {
for( (key, value) <- frame.updated_headers ) {
key.writeTo(os)
os.write(COLON)
value.writeTo(os)
os.write(NEWLINE)
}
}
// we can optimize a little if the headers and content are in the same buffer..
if( frame.are_headers_in_content_buffer && frame.contiguous ) {
val offset = frame.headers.head._1.offset;
val buffer1 = frame.headers.head._1;
val buffer2 = frame.content.asInstanceOf[BufferContent].content;
val length = (buffer2.offset-buffer1.offset)+buffer2.length
os.write( buffer1.data, offset, length)
} else {
for( (key, value) <- frame.headers ) {
key.writeTo(os)
os.write(COLON)
value.writeTo(os)
os.write(NEWLINE)
}
os.write(NEWLINE)
if ( rc.direct_buffer==null ) {
frame.content.writeTo(os)
}
}
rc.buffer = os.toBuffer
rc
}
def decode(message: MessageRecord):StompFrameMessage = {
new StompFrameMessage(decode_frame(message.buffer, message.direct_buffer, false))
}
def decode_frame(buffer: Buffer, direct_buffer:DirectBuffer=null, end_check:Boolean=true):StompFrame = {
def read_line = {
val pos = buffer.indexOf('\\n'.toByte)
if( pos<0 ) {
throw new IOException("expected a new line")
} else {
val rc = buffer.slice(0, pos).ascii
buffer.offset += (pos+1)
buffer.length -= (pos+1)
rc
}
}
val action = read_line
val headers = new HeaderMapBuffer()
var contentLength:AsciiBuffer = null
var line = read_line
while( line.length() > 0 ) {
try {
val seperatorIndex = line.indexOf(COLON)
if( seperatorIndex<0 ) {
throw new IOException("Header line missing seperator.")
}
var name = line.slice(0, seperatorIndex)
var value = line.slice(seperatorIndex + 1, line.length)
headers.add((name, value))
if (end_check && contentLength==null && name == CONTENT_LENGTH ) {
contentLength = value
}
} catch {
case e:Exception=>
throw new IOException("Unable to parse header line [" + Log.escape(line) + "]")
}
line = read_line
}
if ( end_check ) {
buffer.length = if (contentLength != null) {
val length = try {
contentLength.toString.toInt
} catch {
case e: NumberFormatException =>
throw new IOException("Specified content-length is not a valid integer")
}
if( length > buffer.length ) {
throw new IOException("Frame did not contain enough bytes to satisfy the content-length")
}
length
} else {
val pos = buffer.indexOf(0.toByte)
if( pos < 0 ) {
throw new IOException("Frame is not null terminated")
}
pos
}
}
if( direct_buffer==null ) {
new StompFrame(action, headers.toList, BufferContent(buffer), true)
} else {
new StompFrame(action, headers.toList, ZeroCopyContent(direct_buffer), true)
}
}
}
class StompCodec extends AbstractProtocolCodec {
this.bufferPools = Broker.buffer_pools
var max_header_length: Int = 1024 * 10
var max_headers: Int = 1000
var max_data_length: Int = 1024 * 1024 * 100
var trim = true
var trim_cr = false
protected def encode(command: AnyRef) = command match {
case buffer:Buffer=> buffer.writeTo(nextWriteBuffer.asInstanceOf[DataOutput])
case frame:StompFrame=> encode(frame, nextWriteBuffer);
}
def encode(frame:StompFrame, os:DataOutput) = {
frame.action.writeTo(os)
os.write(NEWLINE)
// Write any updated headers first...
if( !frame.updated_headers.isEmpty ) {
for( (key, value) <- frame.updated_headers ) {
key.writeTo(os)
os.write(COLON)
value.writeTo(os)
os.write(NEWLINE)
}
}
// we can optimize a little if the headers and content are in the same buffer..
if( frame.are_headers_in_content_buffer && frame.contiguous) {
val offset = frame.headers.head._1.offset;
val buffer1 = frame.headers.head._1;
val buffer2 = frame.content.asInstanceOf[BufferContent].content;
val length = (buffer2.offset-buffer1.offset)+buffer2.length
os.write( buffer1.data, offset, length)
END_OF_FRAME_BUFFER.writeTo(os)
} else {
for( (key, value) <- frame.headers ) {
key.writeTo(os)
os.write(COLON)
value.writeTo(os)
os.write(NEWLINE)
}
os.write(NEWLINE)
frame.content match {
// case x:ZeroCopyContent=>
// assert(next_write_direct==null)
// next_write_direct = x.zero_copy_buffer
case x:BufferContent=>
x.content.writeTo(os)
END_OF_FRAME_BUFFER.writeTo(os)
case _=>
END_OF_FRAME_BUFFER.writeTo(os)
}
}
}
import StompCodec._
protected def initialDecodeAction = read_action
private final val read_action: AbstractProtocolCodec.Action = new AbstractProtocolCodec.Action {
def apply: AnyRef = {
var line = readUntil(NEWLINE, max_command_length, "The maximum command length was exceeded")
if (line != null) {
var action = line.moveTail(-1)
var contiguous = true
if (trim) {
action = action.trim
} else if( trim_cr && action.length > 0 && action.get(action.length-1)=='\\r'.toByte ) {
action.moveTail(-1)
contiguous = false
}
if (action.length > 0) {
nextDecodeAction = read_headers(action.ascii, contiguous)
return nextDecodeAction();
}
}
return null
}
}
private def read_headers(command: AsciiBuffer, c:Boolean): AbstractProtocolCodec.Action = new AbstractProtocolCodec.Action {
var contentLength:AsciiBuffer = _
val headers = new ListBuffer[(AsciiBuffer, AsciiBuffer)]()
var contiguous = c;
def apply: AnyRef = {
var line = readUntil(NEWLINE, max_header_length, "The maximum header length was exceeded")
if (line != null) {
// Strip off the \\n
line.moveTail(-1)
// 1.0 and 1.2 spec trims off the \\r
if ( (trim || trim_cr) && line.length > 0 && line.get(line.length-1)=='\\r'.toByte ) {
contiguous = false
line.moveTail(-1)
}
if (line.length > 0) {
if (max_headers != -1 && headers.size > max_headers) {
throw new IOException("The maximum number of headers was exceeded")
}
try {
var seperatorIndex: Int = line.indexOf(COLON)
if (seperatorIndex < 0) {
throw new IOException("Header line missing seperator [" + Log.escape(line.ascii) + "]")
}
var name: Buffer = line.slice(0, seperatorIndex)
if (trim) {
name = name.trim
}
var value: Buffer = line.slice(seperatorIndex + 1, line.length)
if (trim) {
value = value.trim
}
var entry = (name.ascii, value.ascii)
if (contentLength==null && entry._1 == CONTENT_LENGTH) {
contentLength = entry._2
}
headers.add(entry)
} catch {
case e: Exception => {
throw new IOException("Unable to parser header line [" + Log.escape(line.ascii) + "]")
}
}
} else {
val h = headers.toList
if (contentLength != null) {
var length = try {
contentLength.toString.toInt
} catch {
case e: NumberFormatException =>
throw new IOException("Specified content-length is not a valid integer")
}
if (max_data_length != -1 && length > max_data_length) {
throw new IOException("The maximum data length was exceeded")
}
nextDecodeAction = read_binary_body(command, h, length, contiguous)
} else {
nextDecodeAction = read_text_body(command, h, contiguous)
}
return nextDecodeAction.apply()
}
}
return null
}
}
private def read_binary_body(command: AsciiBuffer, headers:HeaderMap, contentLength: Int, contiguous:Boolean): AbstractProtocolCodec.Action = {
return new AbstractProtocolCodec.Action {
def apply: AnyRef = {
var content = readBytes(contentLength + 1)
if (content != null) {
if (content.get(contentLength) != 0) {
throw new IOException("Expected null termintor after " + contentLength + " content bytes")
}
nextDecodeAction = read_action
content.moveTail(-1)
val body = if( content.length() == 0) NilContent else BufferContent(content)
return new StompFrame(command, headers, body, contiguous)
}
else {
return null
}
}
}
}
private def read_text_body(command: AsciiBuffer, headers:HeaderMap, contiguous:Boolean): AbstractProtocolCodec.Action = {
return new AbstractProtocolCodec.Action {
def apply: AnyRef = {
var content: Buffer = readUntil(0.asInstanceOf[Byte])
if (content != null) {
nextDecodeAction = read_action
content.moveTail(-1)
val body = if( content.length() == 0) NilContent else BufferContent(content)
return new StompFrame(command, headers, body, contiguous)
}
else {
return null
}
}
}
}
} | chirino/activemq-apollo | apollo-stomp/src/main/scala/org/apache/activemq/apollo/stomp/StompCodec.scala | Scala | apache-2.0 | 11,651 |
package ore.db
import scala.language.implicitConversions
import java.time.OffsetDateTime
sealed trait DbInitialized[+A] {
def value: A
def unsafeToOption: Option[A]
override def toString: String = unsafeToOption match {
case Some(value) => value.toString
case None => "DbInitialized.Uninitialized"
}
}
sealed trait ObjId[+A] extends DbInitialized[DbRef[A]] {
override def equals(other: Any): Boolean = other match {
case that: ObjId[_] => value == that.value
case _ => false
}
override def hashCode(): Int = {
val state = Seq(value)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
object ObjId {
implicit def unwrapObjId[A](objId: ObjId[A]): DbRef[A] = objId.value
class UnsafeUninitialized[A] extends ObjId[A] {
override def value: Nothing = sys.error("Tried to access uninitialized ObjId. This should be impossible")
override def unsafeToOption: Option[Nothing] = None
}
private class RealObjId[A](val value: DbRef[A]) extends ObjId[A] {
override def unsafeToOption: Option[DbRef[A]] = Some(value)
}
def apply[A](id: DbRef[A]): ObjId[A] = new RealObjId(id)
def unsafeFromOption[A](option: Option[DbRef[A]]): ObjId[A] = option match {
case Some(id) => ObjId(id)
case None => new UnsafeUninitialized
}
}
sealed trait ObjOffsetDateTime extends DbInitialized[OffsetDateTime] {
override def equals(other: Any): Boolean = other match {
case that: ObjOffsetDateTime => value == that.value
case _ => false
}
override def hashCode(): Int = {
val state = Seq(value)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
object ObjOffsetDateTime {
implicit def unwrapObjTimestamp(objTimestamp: ObjOffsetDateTime): OffsetDateTime = objTimestamp.value
object UnsafeUninitialized extends ObjOffsetDateTime {
override def value: Nothing = sys.error("Tried to access uninitialized ObjTimestamp. This should be impossible")
override def unsafeToOption: Option[Nothing] = None
}
private class RealObjOffsetDateTime(val value: OffsetDateTime) extends ObjOffsetDateTime {
override def unsafeToOption: Option[OffsetDateTime] = Some(value)
}
def apply(timestamp: OffsetDateTime): ObjOffsetDateTime = new RealObjOffsetDateTime(timestamp)
def unsafeFromOption(option: Option[OffsetDateTime]): ObjOffsetDateTime = option match {
case Some(time) => ObjOffsetDateTime(time)
case None => UnsafeUninitialized
}
}
| SpongePowered/Ore | db/src/main/scala/ore/db/dbObjects.scala | Scala | mit | 2,547 |
package com.insweat.hssd.lib.essence.thypes
import com.insweat.hssd.lib.essence.TraitThypeLike
import com.insweat.hssd.lib.essence.Thype
import com.insweat.hssd.lib.essence.SchemaLike
import com.insweat.hssd.lib.essence.ComplexThypeLike
import com.insweat.hssd.lib.essence.Element
import com.insweat.hssd.lib.essence.SimpleThypeLike
import scala.collection.immutable.HashMap
class TraitThype(
sch: SchemaLike,
override val name: String,
_caption: Option[String],
override val description: String,
override val attributes: HashMap[String, String],
elems: Element*)
extends ComplexThype(sch, name, description, attributes, elems: _*)
with TraitThypeLike {
override val caption = _caption getOrElse name
}
| insweat/hssd | com.insweat.hssd.lib/src/com/insweat/hssd/lib/essence/thypes/TraitThype.scala | Scala | lgpl-3.0 | 792 |
/*
* Copyright (C) 2014 - 2016 Softwaremill <http://softwaremill.com>
* Copyright (C) 2016 - 2019 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.kafka.scaladsl
import akka.actor.ActorSystem
import akka.event.{Logging, LoggingAdapter}
import akka.kafka.tests.scaladsl.LogCapturing
import akka.kafka.{ConnectionCheckerSettings, ConsumerSettings, KafkaConnectionFailed, KafkaPorts, Subscriptions}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink}
import akka.stream.testkit.scaladsl.TestSink
import kafka.server.KafkaConfig
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.{Matchers, WordSpecLike}
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor}
class ConnectionCheckerSpec extends WordSpecLike with Matchers with LogCapturing {
implicit val system: ActorSystem = ActorSystem("KafkaConnectionCheckerSpec")
implicit val ec: ExecutionContextExecutor = system.dispatcher
implicit val log: LoggingAdapter = Logging(system, this.getClass)
implicit val mat: ActorMaterializer = ActorMaterializer()
implicit val kafkaConfig: EmbeddedKafkaConfig = EmbeddedKafkaConfig(
kafkaPort = KafkaPorts.KafkaConnectionCheckerTest,
customBrokerProperties = Map(
KafkaConfig.OffsetsTopicPartitionsProp -> "1",
KafkaConfig.AutoCreateTopicsEnableProp -> "true",
KafkaConfig.GroupInitialRebalanceDelayMsProp -> "200"
)
)
val retryInterval: FiniteDuration = 100.millis
val connectionCheckerConfig: ConnectionCheckerSettings = ConnectionCheckerSettings(1, retryInterval, 2d)
val settings: ConsumerSettings[String, String] =
ConsumerSettings(system, new StringDeserializer, new StringDeserializer)
.withBootstrapServers(s"localhost:${KafkaPorts.KafkaConnectionCheckerTest}")
.withConnectionChecker(connectionCheckerConfig)
.withGroupId("KafkaConnectionCheckerSpec")
.withMetadataRequestTimeout(1.seconds)
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
val failingDetectionTime: FiniteDuration = 10.seconds
val topic = "superAwesomeTopic"
"PlainSource" must {
"fail stream and control.isShutdown when kafka down" in {
val (control, futDone) =
Consumer.plainSource(settings, Subscriptions.topics(topic)).toMat(Sink.ignore)(Keep.both).run
Await.ready(control.isShutdown.zip(futDone), failingDetectionTime)
}
"fail stream and control.isShutdown when kafka down and not recover during max retries exceeded" in {
val embeddedK = EmbeddedKafka.start()
val (control, probe) =
Consumer.plainSource(settings, Subscriptions.topics(topic)).toMat(TestSink.probe)(Keep.both).run
val msg = "hello"
EmbeddedKafka.publishStringMessageToKafka(topic, msg)
probe.ensureSubscription().requestNext().value() shouldBe msg
embeddedK.stop(true)
Await.ready(control.isShutdown, failingDetectionTime)
probe.request(1).expectError().getClass shouldBe classOf[KafkaConnectionFailed]
}
}
}
| softwaremill/reactive-kafka | tests/src/test/scala/akka/kafka/scaladsl/ConnectionCheckerSpec.scala | Scala | apache-2.0 | 3,195 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.sql
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.api.{DataTypes, TableSchema, Types, ValidationException}
import org.apache.flink.table.planner.expressions.utils.Func1
import org.apache.flink.table.planner.utils._
import org.apache.flink.table.runtime.types.TypeInfoDataTypeConverter
import org.apache.flink.types.Row
import org.junit.{Before, Test}
class LegacyTableSourceTest extends TableTestBase {
private val util = batchTestUtil()
private val tableSchema = TableSchema.builder().fields(
Array("a", "b", "c"),
Array(DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING())).build()
@Before
def setup(): Unit = {
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal(
"ProjectableTable", new TestLegacyProjectableTableSource(
true,
tableSchema,
new RowTypeInfo(
tableSchema.getFieldDataTypes.map(TypeInfoDataTypeConverter.fromDataTypeToTypeInfo),
tableSchema.getFieldNames),
Seq.empty[Row])
)
TestLegacyFilterableTableSource.createTemporaryTable(
util.tableEnv,
TestLegacyFilterableTableSource.defaultSchema,
"FilterableTable",
isBounded = true)
TestPartitionableSourceFactory.createTemporaryTable(util.tableEnv, "PartitionableTable", true)
}
@Test
def testBoundedStreamTableSource(): Unit = {
TestTableSource.createTemporaryTable(util.tableEnv, isBounded = true, tableSchema, "MyTable")
util.verifyExecPlan("SELECT * FROM MyTable")
}
@Test
def testUnboundedStreamTableSource(): Unit = {
TestTableSource.createTemporaryTable(util.tableEnv, isBounded = false, tableSchema, "MyTable")
thrown.expect(classOf[ValidationException])
thrown.expectMessage("Cannot query on an unbounded source in batch mode")
util.verifyExecPlan("SELECT * FROM MyTable")
}
@Test
def testSimpleProject(): Unit = {
util.verifyExecPlan("SELECT a, c FROM ProjectableTable")
}
@Test
def testProjectWithoutInputRef(): Unit = {
util.verifyExecPlan("SELECT COUNT(1) FROM ProjectableTable")
}
@Test
def testNestedProject(): Unit = {
val nested1 = new RowTypeInfo(
Array(Types.STRING, Types.INT).asInstanceOf[Array[TypeInformation[_]]],
Array("name", "value")
)
val nested2 = new RowTypeInfo(
Array(Types.INT, Types.BOOLEAN).asInstanceOf[Array[TypeInformation[_]]],
Array("num", "flag")
)
val deepNested = new RowTypeInfo(
Array(nested1, nested2).asInstanceOf[Array[TypeInformation[_]]],
Array("nested1", "nested2")
)
val tableSchema = new TableSchema(
Array("id", "deepNested", "nested", "name"),
Array(Types.INT, deepNested, nested1, Types.STRING))
val returnType = new RowTypeInfo(
Array(Types.INT, deepNested, nested1, Types.STRING).asInstanceOf[Array[TypeInformation[_]]],
Array("id", "deepNested", "nested", "name"))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal(
"T",
new TestNestedProjectableTableSource(true, tableSchema, returnType, Seq()))
val sqlQuery =
"""
|SELECT id,
| deepNested.nested1.name AS nestedName,
| nested.`value` AS nestedValue,
| deepNested.nested2.flag AS nestedFlag,
| deepNested.nested2.num AS nestedNum
|FROM T
""".stripMargin
util.verifyExecPlan(sqlQuery)
}
@Test
def testFilterCanPushDown(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2")
}
@Test
def testFilterCannotPushDown(): Unit = {
// TestFilterableTableSource only accept predicates with `amount`
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE price > 10")
}
@Test
def testFilterPartialPushDown(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 AND price > 10")
}
@Test
def testFilterFullyPushDown(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 AND amount < 10")
}
@Test
def testFilterCannotPushDown2(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 OR price > 10")
}
@Test
def testFilterCannotPushDown3(): Unit = {
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 OR amount < 10")
}
@Test
def testFilterPushDownUnconvertedExpression(): Unit = {
val sqlQuery =
"""
|SELECT * FROM FilterableTable WHERE
| amount > 2 AND id < 100 AND CAST(amount AS BIGINT) > 10
""".stripMargin
util.verifyExecPlan(sqlQuery)
}
@Test
def testFilterPushDownWithUdf(): Unit = {
util.addFunction("myUdf", Func1)
util.verifyExecPlan("SELECT * FROM FilterableTable WHERE amount > 2 AND myUdf(amount) < 32")
}
@Test
def testPartitionTableSource(): Unit = {
util.verifyExecPlan(
"SELECT * FROM PartitionableTable WHERE part2 > 1 and id > 2 AND part1 = 'A' ")
}
@Test
def testPartitionTableSourceWithUdf(): Unit = {
util.addFunction("MyUdf", Func1)
util.verifyExecPlan("SELECT * FROM PartitionableTable WHERE id > 2 AND MyUdf(part2) < 3")
}
@Test
def testTimeLiteralExpressionPushDown(): Unit = {
val schema = TableSchema.builder()
.field("id", DataTypes.INT)
.field("dv", DataTypes.DATE)
.field("tv", DataTypes.TIME)
.field("tsv", DataTypes.TIMESTAMP(3))
.build()
val row = new Row(4)
row.setField(0, 1)
row.setField(1, DateTimeTestUtil.localDate("2017-01-23"))
row.setField(2, DateTimeTestUtil.localTime("14:23:02"))
row.setField(3, DateTimeTestUtil.localDateTime("2017-01-24 12:45:01.234"))
TestLegacyFilterableTableSource.createTemporaryTable(
util.tableEnv,
schema,
"FilterableTable1",
isBounded = true,
List(row),
List("dv", "tv", "tsv"))
val sqlQuery =
s"""
|SELECT id FROM FilterableTable1 WHERE
| tv > TIME '14:25:02' AND
| dv > DATE '2017-02-03' AND
| tsv > TIMESTAMP '2017-02-03 14:25:02.000'
""".stripMargin
util.verifyExecPlan(sqlQuery)
}
}
| aljoscha/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/batch/sql/LegacyTableSourceTest.scala | Scala | apache-2.0 | 7,125 |
package vault
import scala.language.experimental.macros
import scalaz._, Scalaz._
case class FromDb[+A](private val run: (Int, Row) => DbValue[(Int, Option[A])]) {
def map[B](f: A => B): FromDb[B] =
flatMap(a => FromDb.value(f(a)))
def flatMap[B](f: A => FromDb[B]): FromDb[B] =
FromDb((n, r) => run(n, r).fold(DbValue.fail, {
case (nn, None) => DbValue.ok((nn, none))
case (nn, Some(a)) => f(a).run(nn, r)
}))
def perform(r: Row): DbValue[A] =
run(1, r).fold(DbValue.fail, {
case (n, None) => DbValue.dbnull[A](n - 1)
case (_, Some(a)) => DbValue.ok(a)
})
}
object FromDb extends GeneratedFromDb {
def of[A: FromDb] =
implicitly[FromDb[A]]
def perform[A: FromDb](r: Row): DbValue[A] =
of[A].perform(r)
private def run[A: FromDb](n: Int, r: Row): DbValue[(Int, Option[A])] =
of[A].run(n, r)
def value[A](a: A): FromDb[A] =
FromDb((n, _) => DbValue.ok[(Int, Option[A])]((n, Some(a))))
private def fromDb[A](run: (Int, Row) => DbValue[Option[A]]) =
FromDb((n, r) => run(n, r).map(v => (n + 1, v)))
private def fromDbCell[A](run: Cell => DbValue[Option[A]]) =
fromDb((n, r) => run(r.toCell(n)))
implicit def OptionFromDb[A: FromDb]: FromDb[Option[A]] =
FromDb((n, r) => run[A](n, r) map {
case (nn, None) => (nn, Some(None))
case (nn, Some(a)) => (nn, Some(Some(a)))
})
implicit def ByteFromDb: FromDb[Byte] =
fromDbCell(_.byte)
implicit def ShortFromDb: FromDb[Short] =
fromDbCell(_.short)
implicit def IntFromDb: FromDb[Int] =
fromDbCell(_.int)
implicit def LongFromDb: FromDb[Long] =
fromDbCell(_.long)
implicit def FloatFromDb: FromDb[Float] =
fromDbCell(_.float)
implicit def DoubleFromDb: FromDb[Double] =
fromDbCell(_.double)
implicit def StringFromDb: FromDb[String] =
fromDbCell(_.string)
implicit def BooleanFromDb: FromDb[Boolean] =
fromDbCell(_.boolean)
implicit def BigDecimalFromDb: FromDb[BigDecimal] =
fromDbCell(_.bigdecimal)
implicit def DateFromDb: FromDb[java.sql.Date] =
fromDbCell(_.date)
implicit def TimeFromDb: FromDb[java.sql.Time] =
fromDbCell(_.time)
implicit def TimestampFromDb: FromDb[java.sql.Timestamp] =
fromDbCell(_.timestamp)
implicit def FromDbMonad: Monad[FromDb] = new Monad[FromDb] {
def point[A](a: => A) = value(a)
def bind[A, B](m: FromDb[A])(f: A => FromDb[B]) = m flatMap f
}
import shapeless._
def derive[A](implicit ev: ProductTypeClass[FromDb]): FromDb[A] =
macro GenericMacros.deriveProductInstance[FromDb, A]
object auto {
implicit def AutoFromDb[A](implicit ev: ProductTypeClass[FromDb]): FromDb[A] =
macro GenericMacros.deriveProductInstance[FromDb, A]
}
implicit def FromDbTypeClass: ProductTypeClass[FromDb] =
new ProductTypeClass[FromDb] {
def product[H, T <: HList](h: FromDb[H], t: FromDb[T]): FromDb[H :: T] =
(h |@| t)(_ :: _)
def emptyProduct: FromDb[HNil] =
HNil.point[FromDb]
def project[F, G](instance: => FromDb[G], to: F => G, from: G => F): FromDb[F] =
instance.map(from)
}
}
| markhibberd/vault | src/main/scala/vault/FromDb.scala | Scala | bsd-3-clause | 3,135 |
/*
* Copyright (c) 2017 Magomed Abdurakhmanov, Hypertino
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*/
package com.hypertino.facade.filter.model
import com.hypertino.binders.value.Obj
import com.hypertino.facade.filter.parser.{ExpressionEvaluator, ExpressionEvaluatorContext}
import com.hypertino.facade.metrics.MetricKeys
import com.hypertino.facade.model._
import com.hypertino.facade.raml.RamlAnnotation
import com.hypertino.hyperbus.model.{DynamicRequest, DynamicResponse}
import monix.eval.Task
import monix.execution.Scheduler
import scala.util.{Failure, Success, Try}
case class ConditionalRequestFilterProxy(annotation: RamlAnnotation, filter: RequestFilter,
protected val expressionEvaluator: ExpressionEvaluator) extends RequestFilter {
val timer = annotation.predicate.map(p ⇒ MetricKeys.specificFilter("if-/"+p.source))
override def apply(requestContext: RequestContext)
(implicit scheduler: Scheduler): Task[RequestContext] = {
annotation.predicate match {
case Some(p) ⇒
Try(filter.evaluatePredicate(ExpressionEvaluatorContext(requestContext, Obj.empty), p)) match {
case Success(true) ⇒
filter.apply(requestContext)
case Success(false) ⇒
Task.now(requestContext)
case Failure(ex) ⇒
Task.raiseError(ex)
}
case None ⇒
filter.apply(requestContext)
}
}
}
case class ConditionalResponseFilterProxy(annotation: RamlAnnotation, filter: ResponseFilter,
protected val expressionEvaluator: ExpressionEvaluator) extends ResponseFilter {
val timer = annotation.predicate.map(p ⇒ MetricKeys.specificFilter("if-/"+p.source))
override def apply(requestContext: RequestContext, response: DynamicResponse)
(implicit scheduler: Scheduler): Task[DynamicResponse] = {
annotation.predicate match {
case Some(p) ⇒
Try(filter.evaluatePredicate(ExpressionEvaluatorContext(requestContext, Obj.empty), p)) match {
case Success(true) ⇒
filter.apply(requestContext, response)
case Success(false) ⇒
Task.now(response)
case Failure(ex) ⇒
Task.raiseError(ex)
}
case None ⇒
filter.apply(requestContext, response)
}
}
}
case class ConditionalEventFilterProxy(annotation: RamlAnnotation, filter: EventFilter,
protected val expressionEvaluator: ExpressionEvaluator) extends EventFilter {
val timer = annotation.predicate.map(p ⇒ MetricKeys.specificFilter("if-/"+p.source))
override def apply(requestContext: RequestContext, event: DynamicRequest)
(implicit scheduler: Scheduler): Task[DynamicRequest] = {
annotation.predicate match {
case Some(p) ⇒
Try(filter.evaluatePredicate(ExpressionEvaluatorContext(requestContext, Obj.empty), p)) match {
case Success(true) ⇒
filter.apply(requestContext, event)
case Success(false) ⇒
Task.now(event)
case Failure(ex) ⇒
Task.raiseError(ex)
}
case None ⇒
filter.apply(requestContext, event)
}
}
}
| hypertino/hyperfacade | src/main/scala/com/hypertino/facade/filter/model/ConditionalFilterProxy.scala | Scala | mpl-2.0 | 3,459 |
package scalapb.compiler
import scalapb.compiler.FunctionalPrinter.PrinterEndo
object PrinterEndo {
def apply(endo: PrinterEndo): PrinterEndo = endo
}
object FunctionalPrinter {
type PrinterEndo = FunctionalPrinter => FunctionalPrinter
}
case class FunctionalPrinter(content: Vector[String] = Vector.empty, indentLevel: Int = 0) {
val INDENT_SIZE = 2
// Increase indent level
def indent: FunctionalPrinter = indent(1)
def indent(n: Int): FunctionalPrinter = copy(indentLevel = indentLevel + n)
// Decreases indent level
def outdent: FunctionalPrinter = outdent(1)
def outdent(n: Int): FunctionalPrinter = {
assert(indentLevel >= n)
copy(indentLevel = indentLevel - n)
}
/** Adds strings at the current indent level. */
def add(s: String*): FunctionalPrinter = {
copy(
content = content ++ s
.flatMap(_.split("\\n", -1))
.map(l => " " * (indentLevel * INDENT_SIZE) + l)
)
}
def seq(s: Seq[String]): FunctionalPrinter = add(s: _*)
/** add with indent */
def addIndented(s: String*): FunctionalPrinter = {
this.indent.seq(s).outdent
}
/** apply the function with indent */
def indented(f: PrinterEndo): FunctionalPrinter = this.indent.call(f).outdent
def newline: FunctionalPrinter = add("")
// Strips the margin, splits lines and adds.
def addStringMargin(s: String): FunctionalPrinter =
add(s.stripMargin)
// Adds the strings, while putting a delimiter between two lines.
def addWithDelimiter(delimiter: String)(s: Seq[String]) = {
add(s.zipWithIndex.map { case (line, index) =>
if (index == s.length - 1) line else (line + delimiter)
}: _*)
}
def addGroupsWithDelimiter(delimiter: String)(groups: Seq[Seq[String]]) = {
val lines = for {
(group, index) <- groups.zipWithIndex
(line, lineInGroup) <- group.zipWithIndex
} yield
if (index < groups.length - 1 && lineInGroup == group.length - 1)
(line + delimiter)
else line
add(lines: _*)
}
def call(f: PrinterEndo*): FunctionalPrinter =
f.foldLeft(this)((p, f) => f(p))
def when(cond: => Boolean)(func: FunctionalPrinter => FunctionalPrinter) =
if (cond) {
func(this)
} else {
this
}
def print[M](
objects: Iterable[M]
)(f: (FunctionalPrinter, M) => FunctionalPrinter): FunctionalPrinter = {
objects.foldLeft(this)(f)
}
def result() =
content.mkString("\\n")
override def toString = s"FunctionalPrinter(lines=${content.length}, indentLevel=$indentLevel)"
}
| trueaccord/ScalaPB | compiler-plugin/src/main/scala/scalapb/compiler/FunctionalPrinter.scala | Scala | apache-2.0 | 2,542 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.scala.dsl
import builder.RouteBuilder
import org.junit.Test
/**
* Test case for Splitter
*/
class SplitterTokenizeTest extends ScalaTestSupport {
@Test
def testSplitterTokenize() {
val mock = getMockEndpoint("mock:b")
mock.expectedBodiesReceived("Claus", "James", "Willem")
val data: String = "Claus,James,Willem"
template.sendBody("direct:b", data)
assertMockEndpointsSatisfied()
}
val builder =
//START SNIPPET: e1
new RouteBuilder {
"direct:b" ==> {
split(tokenize(",")) {
to("log:b")
to("mock:b")
}
}
}
//END SNIPPET: e1
}
| nikvaessen/camel | components/camel-scala/src/test/scala/org/apache/camel/scala/dsl/SplitterTokenizeTest.scala | Scala | apache-2.0 | 1,465 |
package com.sksamuel.elastic4s.requests.searches.sort
import com.sksamuel.elastic4s.requests.searches.queries.Query
import com.sksamuel.elastic4s.ext.OptionImplicits._
case class FieldSort(field: String,
missing: Option[Any] = None,
unmappedType: Option[String] = None,
@deprecated("use nested", "7.8.2")
nestedFilter: Option[Query] = None,
@deprecated("use nested", "7.8.2")
nestedPath: Option[String] = None,
sortMode: Option[SortMode] = None,
order: SortOrder = SortOrder.Asc,
numericType: Option[String] = None,
nested: Option[NestedSort] = None) extends Sort {
def missing(missing: AnyRef): FieldSort = copy(missing = missing.some)
def unmappedType(`type`: String): FieldSort = copy(unmappedType = `type`.some)
def mode(mode: String): FieldSort = sortMode(SortMode.valueOf(mode.toUpperCase))
def mode(mode: SortMode): FieldSort = copy(sortMode = mode.some)
def sortMode(mode: String): FieldSort = sortMode(SortMode.valueOf(mode.toUpperCase))
def sortMode(mode: SortMode): FieldSort = copy(sortMode = mode.some)
@deprecated("use nested", "7.8.2")
def nestedPath(path: String): FieldSort = copy(nestedPath = path.some, nested = None)
@deprecated("use nested", "7.8.2")
def nestedFilter(query: Query): FieldSort = copy(nestedFilter = query.some, nested = None)
def nested(nested: NestedSort): FieldSort = copy(nested = nested.some, nestedPath = None, nestedFilter = None)
def numericType(numericType: String): FieldSort = copy(numericType = numericType.some)
def order(order: SortOrder): FieldSort = copy(order = order)
def sortOrder(order: SortOrder): FieldSort = copy(order = order)
def desc(): FieldSort = copy(order = SortOrder.Desc)
def asc(): FieldSort = copy(order = SortOrder.Asc)
}
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/searches/sort/FieldSort.scala | Scala | apache-2.0 | 1,945 |
trait A {
def foo = 4
}
object B extends A {
private[this] def foo = 0
} | lampepfl/dotty | tests/pending/neg/i7749.scala | Scala | apache-2.0 | 80 |
package org.vitrivr.adampro.shared.cache
import org.apache.spark.sql.DataFrame
import org.vitrivr.adampro.data.entity.Entity
import org.vitrivr.adampro.data.entity.Entity.EntityName
import org.vitrivr.adampro.data.index.Index
import org.vitrivr.adampro.data.index.Index.IndexName
import org.vitrivr.adampro.process.SharedComponentContext
import org.vitrivr.adampro.shared.catalog.LogManager
import org.vitrivr.adampro.utils.Logging
import scala.util.Try
/**
* ADAMpro
*
* Ivan Giangreco
* July 2017
*/
class CacheManager()(@transient implicit val ac: SharedComponentContext) extends Serializable with Logging {
private val entityLRUCache = new LRUCache[EntityName, Entity](ac.config.maximumCacheSizeEntity, ac.config.expireAfterAccessEntity)
private val indexLRUCache = new LRUCache[IndexName, Index](ac.config.maximumCacheSizeIndex, ac.config.expireAfterAccessIndex)
private val queryLRUCache = new LRUCache[String, DataFrame](ac.config.maximumCacheSizeQueryResults, ac.config.expireAfterAccessQueryResults)
// entity
def containsEntity(entityname: EntityName): Boolean = entityLRUCache.contains(entityname)
def put(entityname: EntityName, entity: Entity): Unit = entityLRUCache.put(entityname, entity)
def getEntity(entityname: EntityName): Try[Entity] = entityLRUCache.get(entityname)
def invalidateEntity(entityname: EntityName): Unit = entityLRUCache.invalidate(entityname)
def emptyEntity(): Unit = entityLRUCache.empty()
// index
def containsIndex(indexname: IndexName): Boolean = indexLRUCache.contains(indexname)
def put(name: IndexName, index: Index): Unit = indexLRUCache.put(name, index)
def getIndex(indexname: IndexName): Try[Index] = indexLRUCache.get(indexname)
def invalidateIndex(indexname: IndexName): Unit = indexLRUCache.invalidate(indexname)
def emptyIndex(): Unit = indexLRUCache.empty()
// query
def getQuery(id: String): Try[DataFrame] = queryLRUCache.get(id)
def put(id: String, df: DataFrame): Unit = queryLRUCache.put(id, df)
}
object CacheManager {
/**
* Create cache manager and fill it
* @return
*/
def build()(implicit ac: SharedComponentContext): CacheManager = new CacheManager()(ac)
} | dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/shared/cache/CacheManager.scala | Scala | mit | 2,200 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io._
import java.util.concurrent.atomic.AtomicInteger
import java.util.zip.GZIPOutputStream
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{BlockLocation, FileStatus, Path, RawLocalFileSystem}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionSet, PredicateHelper}
import org.apache.spark.sql.catalyst.util
import org.apache.spark.sql.execution.{DataSourceScanExec, SparkPlan}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.util.Utils
class FileSourceStrategySuite extends QueryTest with SharedSQLContext with PredicateHelper {
import testImplicits._
protected override def sparkConf = super.sparkConf.set("spark.default.parallelism", "1")
test("unpartitioned table, single partition") {
val table =
createTable(
files = Seq(
"file1" -> 1,
"file2" -> 1,
"file3" -> 1,
"file4" -> 1,
"file5" -> 1,
"file6" -> 1,
"file7" -> 1,
"file8" -> 1,
"file9" -> 1,
"file10" -> 1))
checkScan(table.select('c1)) { partitions =>
// 10 one byte files should fit in a single partition with 10 files.
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 10, "when checking partition 1")
// 1 byte files are too small to split so we should read the whole thing.
assert(partitions.head.files.head.start == 0)
assert(partitions.head.files.head.length == 1)
}
checkPartitionSchema(StructType(Nil))
checkDataSchema(StructType(Nil).add("c1", IntegerType))
}
test("unpartitioned table, multiple partitions") {
val table =
createTable(
files = Seq(
"file1" -> 5,
"file2" -> 5,
"file3" -> 5))
withSQLConf(SQLConf.FILES_MAX_PARTITION_BYTES.key -> "11",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "1") {
checkScan(table.select('c1)) { partitions =>
// 5 byte files should be laid out [(5, 5), (5)]
assert(partitions.size == 2, "when checking partitions")
assert(partitions(0).files.size == 2, "when checking partition 1")
assert(partitions(1).files.size == 1, "when checking partition 2")
// 5 byte files are too small to split so we should read the whole thing.
assert(partitions.head.files.head.start == 0)
assert(partitions.head.files.head.length == 5)
}
checkPartitionSchema(StructType(Nil))
checkDataSchema(StructType(Nil).add("c1", IntegerType))
}
}
test("Unpartitioned table, large file that gets split") {
val table =
createTable(
files = Seq(
"file1" -> 15,
"file2" -> 3))
withSQLConf(SQLConf.FILES_MAX_PARTITION_BYTES.key -> "10",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "1") {
checkScan(table.select('c1)) { partitions =>
// Files should be laid out [(0-10), (10-15, 4)]
assert(partitions.size == 2, "when checking partitions")
assert(partitions(0).files.size == 1, "when checking partition 1")
assert(partitions(1).files.size == 2, "when checking partition 2")
// Start by reading 10 bytes of the first file
assert(partitions.head.files.head.start == 0)
assert(partitions.head.files.head.length == 10)
// Second partition reads the remaining 5
assert(partitions(1).files.head.start == 10)
assert(partitions(1).files.head.length == 5)
}
checkPartitionSchema(StructType(Nil))
checkDataSchema(StructType(Nil).add("c1", IntegerType))
}
}
test("Unpartitioned table, many files that get split") {
val table =
createTable(
files = Seq(
"file1" -> 2,
"file2" -> 2,
"file3" -> 1,
"file4" -> 1,
"file5" -> 1,
"file6" -> 1))
withSQLConf(SQLConf.FILES_MAX_PARTITION_BYTES.key -> "4",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "1") {
checkScan(table.select('c1)) { partitions =>
// Files should be laid out [(file1), (file2, file3), (file4, file5), (file6)]
assert(partitions.size == 4, "when checking partitions")
assert(partitions(0).files.size == 1, "when checking partition 1")
assert(partitions(1).files.size == 2, "when checking partition 2")
assert(partitions(2).files.size == 2, "when checking partition 3")
assert(partitions(3).files.size == 1, "when checking partition 4")
// First partition reads (file1)
assert(partitions(0).files(0).start == 0)
assert(partitions(0).files(0).length == 2)
// Second partition reads (file2, file3)
assert(partitions(1).files(0).start == 0)
assert(partitions(1).files(0).length == 2)
assert(partitions(1).files(1).start == 0)
assert(partitions(1).files(1).length == 1)
// Third partition reads (file4, file5)
assert(partitions(2).files(0).start == 0)
assert(partitions(2).files(0).length == 1)
assert(partitions(2).files(1).start == 0)
assert(partitions(2).files(1).length == 1)
// Final partition reads (file6)
assert(partitions(3).files(0).start == 0)
assert(partitions(3).files(0).length == 1)
}
checkPartitionSchema(StructType(Nil))
checkDataSchema(StructType(Nil).add("c1", IntegerType))
}
}
test("partitioned table") {
val table =
createTable(
files = Seq(
"p1=1/file1" -> 10,
"p1=2/file2" -> 10))
// Only one file should be read.
checkScan(table.where("p1 = 1")) { partitions =>
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 1, "when files in partition 1")
}
// We don't need to reevaluate filters that are only on partitions.
checkDataFilters(Set.empty)
// Only one file should be read.
checkScan(table.where("p1 = 1 AND c1 = 1 AND (p1 + c1) = 2")) { partitions =>
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 1, "when checking files in partition 1")
assert(partitions.head.files.head.partitionValues.getInt(0) == 1,
"when checking partition values")
}
// Only the filters that do not contain the partition column should be pushed down
checkDataFilters(Set(IsNotNull("c1"), EqualTo("c1", 1)))
}
test("partitioned table - case insensitive") {
withSQLConf("spark.sql.caseSensitive" -> "false") {
val table =
createTable(
files = Seq(
"p1=1/file1" -> 10,
"p1=2/file2" -> 10))
// Only one file should be read.
checkScan(table.where("P1 = 1")) { partitions =>
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 1, "when files in partition 1")
}
// We don't need to reevaluate filters that are only on partitions.
checkDataFilters(Set.empty)
// Only one file should be read.
checkScan(table.where("P1 = 1 AND C1 = 1 AND (P1 + C1) = 2")) { partitions =>
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 1, "when checking files in partition 1")
assert(partitions.head.files.head.partitionValues.getInt(0) == 1,
"when checking partition values")
}
// Only the filters that do not contain the partition column should be pushed down
checkDataFilters(Set(IsNotNull("c1"), EqualTo("c1", 1)))
}
}
test("partitioned table - after scan filters") {
val table =
createTable(
files = Seq(
"p1=1/file1" -> 10,
"p1=2/file2" -> 10))
val df1 = table.where("p1 = 1 AND (p1 + c1) = 2 AND c1 = 1")
// Filter on data only are advisory so we have to reevaluate.
assert(getPhysicalFilters(df1) contains resolve(df1, "c1 = 1"))
// Don't reevaluate partition only filters.
assert(!(getPhysicalFilters(df1) contains resolve(df1, "p1 = 1")))
val df2 = table.where("(p1 + c2) = 2 AND c1 = 1")
// Filter on data only are advisory so we have to reevaluate.
assert(getPhysicalFilters(df2) contains resolve(df2, "c1 = 1"))
// Need to evaluate filters that are not pushed down.
assert(getPhysicalFilters(df2) contains resolve(df2, "(p1 + c2) = 2"))
}
test("bucketed table") {
val table =
createTable(
files = Seq(
"p1=1/file1_0000" -> 1,
"p1=1/file2_0000" -> 1,
"p1=1/file3_0002" -> 1,
"p1=2/file4_0002" -> 1,
"p1=2/file5_0000" -> 1,
"p1=2/file6_0000" -> 1,
"p1=2/file7_0000" -> 1),
buckets = 3)
// No partition pruning
checkScan(table) { partitions =>
assert(partitions.size == 3)
assert(partitions(0).files.size == 5)
assert(partitions(1).files.size == 0)
assert(partitions(2).files.size == 2)
}
// With partition pruning
checkScan(table.where("p1=2")) { partitions =>
assert(partitions.size == 3)
assert(partitions(0).files.size == 3)
assert(partitions(1).files.size == 0)
assert(partitions(2).files.size == 1)
}
}
test("Locality support for FileScanRDD") {
val partition = FilePartition(0, Seq(
PartitionedFile(InternalRow.empty, "fakePath0", 0, 10, Array("host0", "host1")),
PartitionedFile(InternalRow.empty, "fakePath0", 10, 20, Array("host1", "host2")),
PartitionedFile(InternalRow.empty, "fakePath1", 0, 5, Array("host3")),
PartitionedFile(InternalRow.empty, "fakePath2", 0, 5, Array("host4"))
))
val fakeRDD = new FileScanRDD(
spark,
(file: PartitionedFile) => Iterator.empty,
Seq(partition)
)
assertResult(Set("host0", "host1", "host2")) {
fakeRDD.preferredLocations(partition).toSet
}
}
test("Locality support for FileScanRDD - one file per partition") {
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> "10",
"fs.file.impl" -> classOf[LocalityTestFileSystem].getName,
"fs.file.impl.disable.cache" -> "true") {
val table =
createTable(files = Seq(
"file1" -> 10,
"file2" -> 10
))
checkScan(table) { partitions =>
val Seq(p1, p2) = partitions
assert(p1.files.length == 1)
assert(p1.files.flatMap(_.locations).length == 1)
assert(p2.files.length == 1)
assert(p2.files.flatMap(_.locations).length == 1)
val fileScanRDD = getFileScanRDD(table)
assert(partitions.flatMap(fileScanRDD.preferredLocations).length == 2)
}
}
}
test("Locality support for FileScanRDD - large file") {
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> "10",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "0",
"fs.file.impl" -> classOf[LocalityTestFileSystem].getName,
"fs.file.impl.disable.cache" -> "true") {
val table =
createTable(files = Seq(
"file1" -> 15,
"file2" -> 5
))
checkScan(table) { partitions =>
val Seq(p1, p2) = partitions
assert(p1.files.length == 1)
assert(p1.files.flatMap(_.locations).length == 1)
assert(p2.files.length == 2)
assert(p2.files.flatMap(_.locations).length == 2)
val fileScanRDD = getFileScanRDD(table)
assert(partitions.flatMap(fileScanRDD.preferredLocations).length == 3)
}
}
}
test("SPARK-15654 do not split non-splittable files") {
// Check if a non-splittable file is not assigned into partitions
Seq("gz", "snappy", "lz4").foreach { suffix =>
val table = createTable(
files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, s"file3.${suffix}" -> 1)
)
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> "2",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "0") {
checkScan(table.select('c1)) { partitions =>
assert(partitions.size == 2)
assert(partitions(0).files.size == 1)
assert(partitions(1).files.size == 2)
}
}
}
// Check if a splittable compressed file is assigned into multiple partitions
Seq("bz2").foreach { suffix =>
val table = createTable(
files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, s"file3.${suffix}" -> 1)
)
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> "2",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "0") {
checkScan(table.select('c1)) { partitions =>
assert(partitions.size == 3)
assert(partitions(0).files.size == 1)
assert(partitions(1).files.size == 2)
assert(partitions(2).files.size == 1)
}
}
}
}
test("SPARK-14959: Do not call getFileBlockLocations on directories") {
// Setting PARALLEL_PARTITION_DISCOVERY_THRESHOLD to 2. So we will first
// list file statues at driver side and then for the level of p2, we will list
// file statues in parallel.
withSQLConf(
"fs.file.impl" -> classOf[MockDistributedFileSystem].getName,
SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "2") {
withTempPath { path =>
val tempDir = path.getCanonicalPath
Seq("p1=1/p2=2/p3=3/file1", "p1=1/p2=3/p3=3/file1").foreach { fileName =>
val file = new File(tempDir, fileName)
assert(file.getParentFile.exists() || file.getParentFile.mkdirs())
util.stringToFile(file, fileName)
}
val fileCatalog = new InMemoryFileIndex(
sparkSession = spark,
rootPathsSpecified = Seq(new Path(tempDir)),
parameters = Map.empty[String, String],
userSpecifiedSchema = None)
// This should not fail.
fileCatalog.listLeafFiles(Seq(new Path(tempDir)))
// Also have an integration test.
checkAnswer(
spark.read.text(tempDir).select("p1", "p2", "p3", "value"),
Row(1, 2, 3, "p1=1/p2=2/p3=3/file1") :: Row(1, 3, 3, "p1=1/p2=3/p3=3/file1") :: Nil)
}
}
}
test("[SPARK-16818] partition pruned file scans implement sameResult correctly") {
withTempPath { path =>
val tempDir = path.getCanonicalPath
spark.range(100)
.selectExpr("id", "id as b")
.write
.partitionBy("id")
.parquet(tempDir)
val df = spark.read.parquet(tempDir)
def getPlan(df: DataFrame): SparkPlan = {
df.queryExecution.executedPlan
}
assert(getPlan(df.where("id = 2")).sameResult(getPlan(df.where("id = 2"))))
assert(!getPlan(df.where("id = 2")).sameResult(getPlan(df.where("id = 3"))))
}
}
test("[SPARK-16818] exchange reuse respects differences in partition pruning") {
spark.conf.set("spark.sql.exchange.reuse", true)
withTempPath { path =>
val tempDir = path.getCanonicalPath
spark.range(10)
.selectExpr("id % 2 as a", "id % 3 as b", "id as c")
.write
.partitionBy("a")
.parquet(tempDir)
val df = spark.read.parquet(tempDir)
val df1 = df.where("a = 0").groupBy("b").agg("c" -> "sum")
val df2 = df.where("a = 1").groupBy("b").agg("c" -> "sum")
checkAnswer(df1.join(df2, "b"), Row(0, 6, 12) :: Row(1, 4, 8) :: Row(2, 10, 5) :: Nil)
}
}
test("spark.files.ignoreCorruptFiles should work in SQL") {
val inputFile = File.createTempFile("input-", ".gz")
try {
// Create a corrupt gzip file
val byteOutput = new ByteArrayOutputStream()
val gzip = new GZIPOutputStream(byteOutput)
try {
gzip.write(Array[Byte](1, 2, 3, 4))
} finally {
gzip.close()
}
val bytes = byteOutput.toByteArray
val o = new FileOutputStream(inputFile)
try {
// It's corrupt since we only write half of bytes into the file.
o.write(bytes.take(bytes.length / 2))
} finally {
o.close()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val e = intercept[SparkException] {
spark.read.text(inputFile.toURI.toString).collect()
}
assert(e.getCause.isInstanceOf[EOFException])
assert(e.getCause.getMessage === "Unexpected end of input stream")
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
assert(spark.read.text(inputFile.toURI.toString).collect().isEmpty)
}
} finally {
inputFile.delete()
}
}
test("[SPARK-18753] keep pushed-down null literal as a filter in Spark-side post-filter") {
val ds = Seq(Tuple1(Some(true)), Tuple1(None), Tuple1(Some(false))).toDS()
withTempPath { p =>
val path = p.getAbsolutePath
ds.write.parquet(path)
val readBack = spark.read.parquet(path).filter($"_1" === "true")
val filtered = ds.filter($"_1" === "true").toDF()
checkAnswer(readBack, filtered)
}
}
// Helpers for checking the arguments passed to the FileFormat.
protected val checkPartitionSchema =
checkArgument("partition schema", _.partitionSchema, _: StructType)
protected val checkDataSchema =
checkArgument("data schema", _.dataSchema, _: StructType)
protected val checkDataFilters =
checkArgument("data filters", _.filters.toSet, _: Set[Filter])
/** Helper for building checks on the arguments passed to the reader. */
protected def checkArgument[T](name: String, arg: LastArguments.type => T, expected: T): Unit = {
if (arg(LastArguments) != expected) {
fail(
s"""
|Wrong $name
|expected: $expected
|actual: ${arg(LastArguments)}
""".stripMargin)
}
}
/** Returns a resolved expression for `str` in the context of `df`. */
def resolve(df: DataFrame, str: String): Expression = {
df.select(expr(str)).queryExecution.analyzed.expressions.head.children.head
}
/** Returns a set with all the filters present in the physical plan. */
def getPhysicalFilters(df: DataFrame): ExpressionSet = {
ExpressionSet(
df.queryExecution.executedPlan.collect {
case execution.FilterExec(f, _) => splitConjunctivePredicates(f)
}.flatten)
}
/** Plans the query and calls the provided validation function with the planned partitioning. */
def checkScan(df: DataFrame)(func: Seq[FilePartition] => Unit): Unit = {
func(getFileScanRDD(df).filePartitions)
}
/**
* Constructs a new table given a list of file names and sizes expressed in bytes. The table
* is written out in a temporary directory and any nested directories in the files names
* are automatically created.
*
* When `buckets` is > 0 the returned [[DataFrame]] will have metadata specifying that number of
* buckets. However, it is the responsibility of the caller to assign files to each bucket
* by appending the bucket id to the file names.
*/
def createTable(
files: Seq[(String, Int)],
buckets: Int = 0): DataFrame = {
val tempDir = Utils.createTempDir()
files.foreach {
case (name, size) =>
val file = new File(tempDir, name)
assert(file.getParentFile.exists() || file.getParentFile.mkdirs())
util.stringToFile(file, "*" * size)
}
val df = spark.read
.format(classOf[TestFileFormat].getName)
.load(tempDir.getCanonicalPath)
if (buckets > 0) {
val bucketed = df.queryExecution.analyzed transform {
case l @ LogicalRelation(r: HadoopFsRelation, _, _, _) =>
l.copy(relation =
r.copy(bucketSpec =
Some(BucketSpec(numBuckets = buckets, "c1" :: Nil, Nil)))(r.sparkSession))
}
Dataset.ofRows(spark, bucketed)
} else {
df
}
}
def getFileScanRDD(df: DataFrame): FileScanRDD = {
df.queryExecution.executedPlan.collect {
case scan: DataSourceScanExec if scan.inputRDDs().head.isInstanceOf[FileScanRDD] =>
scan.inputRDDs().head.asInstanceOf[FileScanRDD]
}.headOption.getOrElse {
fail(s"No FileScan in query\\n${df.queryExecution}")
}
}
}
/** Holds the last arguments passed to [[TestFileFormat]]. */
object LastArguments {
var partitionSchema: StructType = _
var dataSchema: StructType = _
var filters: Seq[Filter] = _
var options: Map[String, String] = _
}
/** A test [[FileFormat]] that records the arguments passed to buildReader, and returns nothing. */
class TestFileFormat extends TextBasedFileFormat {
override def toString: String = "TestFileFormat"
/**
* When possible, this method should return the schema of the given `files`. When the format
* does not support inference, or no valid files are given should return None. In these cases
* Spark will require that user specify the schema manually.
*/
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] =
Some(
StructType(Nil)
.add("c1", IntegerType)
.add("c2", IntegerType))
/**
* Prepares a write job and returns an [[OutputWriterFactory]]. Client side job preparation can
* be put here. For example, user defined output committer can be configured here
* by setting the output committer class in the conf of spark.sql.sources.outputCommitterClass.
*/
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
throw new NotImplementedError("JUST FOR TESTING")
}
override def buildReader(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): PartitionedFile => Iterator[InternalRow] = {
// Record the arguments so they can be checked in the test case.
LastArguments.partitionSchema = partitionSchema
LastArguments.dataSchema = requiredSchema
LastArguments.filters = filters
LastArguments.options = options
(file: PartitionedFile) => { Iterator.empty }
}
}
class LocalityTestFileSystem extends RawLocalFileSystem {
private val invocations = new AtomicInteger(0)
override def getFileBlockLocations(
file: FileStatus, start: Long, len: Long): Array[BlockLocation] = {
require(!file.isDirectory, "The file path can not be a directory.")
val count = invocations.getAndAdd(1)
Array(new BlockLocation(Array(s"host$count:50010"), Array(s"host$count"), 0, len))
}
}
// This file system is for SPARK-14959 (DistributedFileSystem will throw an exception
// if we call getFileBlockLocations on a dir).
class MockDistributedFileSystem extends RawLocalFileSystem {
override def getFileBlockLocations(
file: FileStatus, start: Long, len: Long): Array[BlockLocation] = {
require(!file.isDirectory, "The file path can not be a directory.")
super.getFileBlockLocations(file, start, len)
}
}
| tejasapatil/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala | Scala | apache-2.0 | 24,305 |
package maker.task.compile
trait Phase
trait CompilePhase extends Phase {
def name : String
}
case object SourceCompilePhase extends CompilePhase {
def name = "source-compile"
override def toString = "Source compile"
}
case object TestCompilePhase extends CompilePhase {
def name = "test-compile"
override def toString = "Test compile"
}
object CompilePhase{
def apply(name : String) : CompilePhase = {
if (name == SourceCompilePhase.toString)
SourceCompilePhase
else if (name == TestCompilePhase.toString)
TestCompilePhase
else
throw new RuntimeException("Unrecognised phase name " + name)
}
}
case object Run extends Phase
| syl20bnr/maker | maker/src/maker/task/compile/Phases.scala | Scala | bsd-2-clause | 675 |
package com.alvin.niagara.dao
/**
* Created by alvinjin on 2017-03-30.
*/
import com.alvin.niagara.config.DBManager
import org.joda.time.{DateTime, DateTimeZone}
import slick.driver.MySQLDriver.api._
import java.sql.Timestamp
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Random, Success}
object RestaurantDAO extends DBManager {
case class Restaurant(
//id: Option[Int] = None,
restaurantid: String,
//restaurantname: String,
maxid: Long,
ingesttime: Timestamp
)
//case class UserPayload(username: String, password: String, email: String)
class Restaurants(tag: Tag) extends Table[Restaurant](tag, "restaurants") {
//def id = column[Int]("id", O.SqlType("SERIAL"), O.AutoInc, O.PrimaryKey)
def restaurantid = column[String]("restaurantid", O.SqlType("VARCHAR(30)"))
//def restaurantname = column[String]("restaurantname", O.SqlType("VARCHAR(30)"))
def maxid = column[Long]("maxid")
def ingesttime = column[Timestamp]("createdAt", O.SqlType("timestamp not null default CURRENT_TIMESTAMP"))
def * = (restaurantid, maxid, ingesttime) <>(Restaurant.tupled, Restaurant.unapply)
def unique_user_idx = index("unique_user_id", restaurantid, unique = true)
}
lazy val restaurants = TableQuery[Restaurants]
def createUsersTable = db.run(restaurants.schema.create)
/*
def insertRestaurant(restaurantid: String, maxid: Long, ingesttime: Timestamp): Future[Option[Restaurant]] = {
val dt = new DateTime(DateTimeZone.forID("America/Toronto"))
val restaurant = Restaurant(restaurantid = restaurantid,
maxid = maxid, ingesttime = new Timestamp(dt.getMillis))
val query = restaurants returning restaurants.map(_.restaurantid) into
((res, id) => restaurant.copy(id = id))
val action = query += restaurant
db.run(action.asTry).map { result =>
result match {
case Success(res) => Some(res)
case Failure(e: Exception) => None
}
}
}
*/
def queryUserById(handler: String): Future[Option[Long]] = {
val action = restaurants.filter(_.restaurantid === handler).map(_.maxid).result.headOption
/*db.run(action.asTry).map { result =>
result match {
case Success(res) => res
case Failure(e: Exception) => None
}
}
*/
db.run(action)
}
def queryAllHandlers(): Future[Seq[String]] = {
val action = restaurants.map(_.restaurantid).result
/*db.run(action.asTry).map { result =>
result match {
case Success(res) => res
case Failure(e: Exception) => None
}
}
*/
db.run(action)
}
}
| AlvinCJin/Niagara | src/main/scala/com/alvin/niagara/dao/RestaurantDAO.scala | Scala | apache-2.0 | 2,823 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.logical
import java.lang.reflect.Type
import java.util.{List => JList, Set => JSet}
import org.apache.calcite.plan.{Convention, RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.{TableFunctionScan, TableScan}
import org.apache.calcite.rel.logical.{LogicalTableFunctionScan, LogicalTableScan}
import org.apache.calcite.rel.metadata.RelColumnMapping
import org.apache.calcite.rex.RexNode
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.schema.TableSourceTable
class FlinkLogicalTableFunctionScan(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputs: JList[RelNode],
rexCall: RexNode,
elementType: Type,
rowType: RelDataType,
columnMappings: JSet[RelColumnMapping])
extends TableFunctionScan(
cluster,
traitSet,
inputs,
rexCall,
elementType,
rowType,
columnMappings)
with FlinkLogicalRel {
override def copy(traitSet: RelTraitSet,
inputs: JList[RelNode],
rexCall: RexNode,
elementType: Type,
rowType: RelDataType,
columnMappings: JSet[RelColumnMapping]): TableFunctionScan = {
new FlinkLogicalTableFunctionScan(
cluster,
traitSet,
inputs,
rexCall,
elementType,
rowType,
columnMappings)
}
}
class FlinkLogicalTableFunctionScanConverter
extends ConverterRule(
classOf[LogicalTableFunctionScan],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalTableFunctionScanConverter") {
def convert(rel: RelNode): RelNode = {
val scan = rel.asInstanceOf[LogicalTableFunctionScan]
val traitSet = rel.getTraitSet.replace(FlinkConventions.LOGICAL)
new FlinkLogicalTableFunctionScan(
rel.getCluster,
traitSet,
scan.getInputs,
scan.getCall,
scan.getElementType,
scan.getRowType,
scan.getColumnMappings
)
}
}
object FlinkLogicalTableFunctionScan {
val CONVERTER = new FlinkLogicalTableFunctionScanConverter
}
| WangTaoTheTonic/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/logical/FlinkLogicalTableFunctionScan.scala | Scala | apache-2.0 | 2,971 |
/*
* OpenURP, Open University Resouce Planning
*
* Copyright (c) 2013-2014, OpenURP Software.
*
* OpenURP is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenURP is distributed in the hope that it will be useful.
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Beangle. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.attendance.ws.web.app
import java.{ util => ju }
import java.sql.Date
import org.beangle.commons.lang.Dates.{ today, now, join }
import org.beangle.commons.logging.Logging
import org.beangle.data.jdbc.query.JdbcExecutor
import org.openurp.edu.attendance.ws.domain.ShardPolicy._
import org.openurp.edu.attendance.ws.impl.{ ActivityService, DeviceRegistry }
import org.openurp.edu.attendance.ws.web.util.Consts.{ DeviceId, SigninDate, SigninTime, Rule }
import org.openurp.edu.attendance.ws.domain.DateFormats.{ toCourseTime, toDateStr, toTimeStr }
import org.openurp.edu.attendance.ws.web.util.Params
import org.openurp.edu.attendance.ws.web.util.Render.render
import com.google.gson.{ JsonArray, JsonObject }
import javax.servlet.{ ServletRequest, ServletResponse }
import javax.servlet.http.HttpServlet
import java.sql.Time
import java.{util => ju}
import java.{util => ju}
/**
* 本次考勤的出勤明细
*
* @author chaostone
* @version 1.0, 2014/03/22
* @since 0.0.1
*/
class DetailServlet extends HttpServlet with Logging {
var deviceRegistry: DeviceRegistry = _
var activityService: ActivityService = _
var jdbcExecutor: JdbcExecutor = _
override def service(req: ServletRequest, res: ServletResponse) {
var retcode, devid = 0
var retmsg, classname = ""
val json = new JsonObject()
val array = new JsonArray()
val params = Params.require(DeviceId).optional(SigninDate, SigninTime).get(req, Rule)
if (!params.ok) {
retmsg = params.msg.values.mkString(";")
retcode = -1
} else {
devid = params(DeviceId)
deviceRegistry.get(devid) foreach { device =>
val signinDate: Date = params.get(SigninDate).getOrElse(today)
val signinTime: Time = params.get(SigninTime).getOrElse(new Time(now.getTime))
val activity = activityService.getActivity(device.room, join(signinDate, signinTime))
activity.foreach { l => classname = l.className }
val datas = jdbcExecutor.query("select xs.xh,xs.xm,d.signin_at from " + detailTable(signinDate) + " d,xsxx_t xs," + activityTable(signinDate) + " aa where " +
" aa.room_id=? and aa.course_date = ?" +
" and ? between aa.attend_begin_time and aa.end_time and xs.id=d.std_id and aa.id=d.activity_id order by d.signin_at desc", device.room.id, signinDate, toCourseTime(signinTime))
datas foreach { data =>
val attendJson = new JsonObject()
attendJson.addProperty("stuempno", data(0).toString)
attendJson.addProperty("custname", data(1).toString)
val signinAt = data(2).asInstanceOf[ju.Date]
if (null != signinAt) {
attendJson.addProperty("signindate", toDateStr(signinAt))
attendJson.addProperty("signintime", toTimeStr(signinAt))
} else {
attendJson.addProperty("signindate", "")
attendJson.addProperty("signintime", "")
}
array.add(attendJson)
}
}
}
json.addProperty("retcode", retcode)
json.addProperty("retmsg", retmsg)
json.addProperty("devid", devid)
json.addProperty("classname", classname)
json.add("list", array)
render(res, json)
}
} | openurp/edu-core | attendance/ws/src/main/scala/org/openurp/edu/attendance/ws/web/app/DetailServlet.scala | Scala | gpl-3.0 | 3,943 |
package controllers
import javax.inject._
import actors.PigExecutorHandling
import com.google.inject.Inject
import controllers.forms.{PigQueryForm, PigQueryImportForm}
import models.Tables.PigQuery
import models.{DBEnums, DatabaseAccess, MetaDataAccess}
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.libs.concurrent.Execution.Implicits._
import play.api.mvc._
import prickle.{Pickle, Unpickle}
import qrygraph.shared.SharedMessages.PicklerImplicits
import qrygraph.shared.compilation.QueryCompiler
import qrygraph.shared.data.PigQueryGraph
import qrygraph.shared.parser.PigScriptParser
import services.PigExecution
import util.Actions._
import util.FutureEnhancements._
import util.HDFS
import scala.concurrent.Future
@Singleton
class Queries @Inject()(implicit val app: play.api.Application, val messagesApi: MessagesApi)
extends Controller with DatabaseAccess with I18nSupport with PigExecutorHandling with MetaDataAccess with PicklerImplicits {
import dbConfig.driver.api._
def indexGET() = AuthedAction.async { implicit request =>
runQuery(Tables.PigQueries.filter(_.creatorUserId === request.user.id)).map(q =>
Ok(views.html.queries(request.user, q))
)
}
def createGET = AuthedAction(app) { request =>
Ok(views.html.createQuery(request.user, PigQueryForm.form))
}
def createPOST = AuthedAction.async { implicit request =>
PigQueryForm.form.bindFromRequest.fold(
// Form errors
formWithErrors => {
Future(BadRequest(views.html.createQuery(request.user, formWithErrors)))
},
// Correct form
createRequest => {
runInsert(Tables.PigQueries +=
PigQuery(newUUID(), createRequest.name, createRequest.description, None, None, undeployedChanges = false, request.user.id, DBEnums.AuthApproved, "waiting", createRequest.cronjob)
).mapAll { _ =>
syncQuerySchedules()
Redirect(routes.Queries.indexGET())
}
}
)
}
def createPOSTAPI = AuthedAction.async { implicit request =>
PigQueryForm.form.bindFromRequest.fold(
// Form errors
formWithErrors => {
Future(BadRequest(views.html.createQuery(request.user, formWithErrors)))
},
// Correct form
createRequest => {
val newObject = PigQuery(newUUID(), createRequest.name, createRequest.description, None, None, undeployedChanges = false, request.user.id, DBEnums.AuthApproved, "waiting", createRequest.cronjob)
runInsert(Tables.PigQueries += newObject)
.mapAll { _ => {
Ok(newObject.id)
} }
}
)
}
def importGET = AdminAction(app) { request =>
Ok(views.html.importQuery(request.user, PigQueryImportForm.form))
}
def importPOST = AdminAction.async { implicit request =>
PigQueryImportForm.form.bindFromRequest.fold(
// Form errors
formWithErrors => {
Future(BadRequest(views.html.importQuery(request.user, formWithErrors)))
},
// Correct form
createRequest => {
val parsedQuery = PigScriptParser.parsePigScript(createRequest.importString)
val serialized = Pickle.intoString(parsedQuery)
runInsert(Tables.PigQueries +=
PigQuery(newUUID(), createRequest.name, createRequest.description, Some(serialized), None, undeployedChanges = false, request.user.id, DBEnums.AuthApproved, "waiting", createRequest.cronjob)
).mapAll { _ =>
syncQuerySchedules()
Redirect(routes.Queries.indexGET())
}
}
)
}
def editGET(id: String) = (AuthedAction andThen ReadQueryFromId(app, id)) { request =>
val pigQueryForm = PigQueryForm(request.pigQueriesRow.name, request.pigQueriesRow.description, request.pigQueriesRow.cronjob)
Ok(views.html.editQuery(request.user, id, PigQueryForm.form.fill(pigQueryForm)))
}
def editPOST(id: String) = (AuthedAction andThen ReadQueryFromId(app, id)).async { implicit request =>
PigQueryForm.form.bindFromRequest.fold(
// Form errors
formWithErrors => {
Future(BadRequest(views.html.createQuery(request.user, formWithErrors)))
},
// Correct form
editRequest => {
val newQuery = request.pigQueriesRow.copy(name = editRequest.name, description = editRequest.description, cronjob = editRequest.cronjob)
runInsert(Tables.PigQueries.insertOrUpdate(newQuery)).mapAll { _ =>
syncQuerySchedules()
Redirect(routes.Queries.indexGET())
}
}
)
}
def deleteQuery(id: String) = AuthedAction.async { implicit request =>
val future = db.run(Tables.PigQueries
.filter(_.id === id)
.filter(_.creatorUserId === request.user.id)
.delete)
future
.map(i => {
syncQuerySchedules()
Redirect(routes.Queries.indexGET())
})
.recover { case f => Unauthorized(f.toString) }
}
def run(id: String) = AuthedAction(app) {
// start actor to run the execution
Future {
new PigExecution().executePig(id)
}
Redirect(routes.Queries.indexGET())
}
def results(id: String) = AuthedAction(app).async { implicit request =>
HDFS.readResults(globalSetting, id)
.map(i => Ok(views.html.results(request.user, i)))
.recover { case f => Unauthorized(f.toString) }
}
def editor(id: String) = (AuthedAction andThen ReadQueryFromId(app, id)) { request =>
Ok(views.html.editor(request.user, request.cookies, id, isComponent = false, loadDataSources(), loadPublishedComponents()))
}
def compile(id: String) = (AuthedAction andThen ReadQueryFromId(app, id)) { request =>
request.pigQueriesRow.serializedDraftQuerie match {
case Some(x) =>
def parsedQrygraphDraft = Unpickle[PigQueryGraph].fromString(x).get
Ok(QueryCompiler.compile(parsedQrygraphDraft).mkString("\\n"))
case None => NotFound("Query not available")
}
}
}
| Starofall/QryGraph | qrygraph/jvm/app/controllers/Queries.scala | Scala | mit | 5,900 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.controller
import java.lang.{Boolean => JBoolean}
import java.net.URI
import org.orbeon.exception.OrbeonFormatter
import org.orbeon.oxf.externalcontext.ExternalContext
import org.orbeon.oxf.externalcontext.URLRewriter._
import org.orbeon.oxf.http.Headers._
import org.orbeon.oxf.http.HttpMethod.HttpMethodsWithRequestBody
import org.orbeon.oxf.http.{EmptyInputStream, HttpStatusCodeException, StreamedContent}
import org.orbeon.oxf.properties.PropertySet
import org.orbeon.oxf.util.CoreUtils._
import org.orbeon.oxf.util.PathUtils._
import org.orbeon.oxf.util._
import scala.jdk.CollectionConverters._
import scala.util.control.NonFatal
object Authorizer extends Logging {
import org.orbeon.oxf.controller.PageFlowControllerProcessor._
// For now don't remember authorization, because simply remembering in the session is not enough if the authorization
// depends on the request method, path, or headers.
private val RememberAuthorization = false
private val AuthorizedKey = "org.orbeon.oxf.controller.service.authorized"
// Whether the incoming request is authorized either with a token or via the delegate
def authorized(
ec : ExternalContext)(implicit
logger : IndentedLogger,
propertySet : PropertySet
): Boolean =
authorizedWithToken(ec) || (
if (RememberAuthorization)
authorizeIfNeededAndRemember(ec)
else
authorizedWithDelegate(ec)
)
// Whether the incoming request is authorized with a token
def authorizedWithToken(ec: ExternalContext): Boolean =
authorizedWithToken(k => Option(ec.getRequest.getHeaderValuesMap.get(k)), k => ec.getWebAppContext.attributes.get(k))
def authorizedWithToken(header: String => Option[Array[String]], attribute: String => Option[AnyRef]): Boolean = {
val requestToken =
header(OrbeonTokenLower).toList.flatten.headOption
def applicationToken =
attribute(OrbeonTokenLower) collect { case token: String => token }
requestToken.isDefined && requestToken == applicationToken
}
// Check the session to see if the request is already authorized. If not, try to authorize, and remember the
// authorization if successful. Return whether the request is authorized.
private def authorizeIfNeededAndRemember(
ec : ExternalContext)(implicit
logger : IndentedLogger,
propertySet : PropertySet
): Boolean = {
val request = ec.getRequest
def alreadyAuthorized: Boolean =
request.sessionOpt flatMap
(_.getAttribute(AuthorizedKey)) collect
{ case value: JBoolean => value.booleanValue() } exists
identity
def rememberAuthorized(): Unit =
Option(request.getSession(true)) foreach
(_.setAttribute(AuthorizedKey, JBoolean.TRUE))
if (! alreadyAuthorized) {
val newlyAuthorized = authorizedWithDelegate(ec)
if (newlyAuthorized)
rememberAuthorized()
newlyAuthorized
} else
true
}
// Authorize the given request with the given delegate service
private def authorizedWithDelegate(
ec : ExternalContext)(implicit
logger : IndentedLogger,
propertySet : PropertySet
): Boolean = {
val request = ec.getRequest
def appendToURI(uri: URI, path: String, query: Option[String]) = {
val newPath = uri.getRawPath.dropTrailingSlash + path.prependSlash
val newQuery = Option(uri.getRawQuery) ++ query mkString "&"
new URI(uri.getScheme, uri.getRawUserInfo, uri.getHost, uri.getPort, newPath, if (newQuery.nonEmpty) newQuery else null, null)
}
// NOTE: If the authorizer base URL is an absolute path, it is rewritten against the host
def delegateAbsoluteBaseURIOpt =
propertySet.getStringOrURIAsStringOpt(AuthorizerProperty) map
(p => new URI(URLRewriterUtils.rewriteServiceURL(request, p, REWRITE_MODE_ABSOLUTE_NO_CONTEXT)))
delegateAbsoluteBaseURIOpt match {
case Some(baseDelegateURI) =>
// Forward method and headers but not the body
// NOTE: There is a question of whether we need to forward cookies for authorization purposes. If we
// do, there is the issue of the first incoming request which doesn't have incoming cookies. So at this
// point, we just follow the header proxying method we use in other places and remove Cookie/Set-Cookie.
val method = request.getMethod
val newURL = appendToURI(baseDelegateURI, request.getRequestPath, Option(request.getQueryString))
// Add remote address to help authorizer filter
val allHeaders = {
val proxiedHeaders =
proxyAndCapitalizeHeaders(request.getHeaderValuesMap.asScala, request = true).toMap mapValues (_.toList)
proxiedHeaders + (OrbeonRemoteAddress -> Option(request.getRemoteAddr).toList)
}
val content =
HttpMethodsWithRequestBody(method) option
StreamedContent(
EmptyInputStream,
Some("application/octet-stream"),
Some(0L),
None
)
debug("Delegating to authorizer", Seq("url" -> newURL.toString))
val cxr =
Connection.connectNow(
method = method,
url = newURL,
credentials = None,
content = content,
headers = allHeaders,
loadState = true,
saveState = true,
logBody = false)(
logger = logger,
externalContext = ec
)
// TODO: state must be saved in session, not anywhere else; why is this configurable globally?
try
ConnectionResult.withSuccessConnection(cxr, closeOnSuccess = true)(_ => true)
catch {
case HttpStatusCodeException(code, _, _) =>
debug("Unauthorized", Seq("code" -> code.toString))
false
case NonFatal(t) =>
error("Could not connect to authorizer", Seq("url" -> newURL.toString))
error(OrbeonFormatter.format(t))
false
}
case None =>
// No authorizer
debug("No authorizer configured")
false
}
}
}
| orbeon/orbeon-forms | src/main/scala/org/orbeon/oxf/controller/Authorizer.scala | Scala | lgpl-2.1 | 6,906 |
package actors.client
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import play.api.libs.json._
/**
* Created by basso on 27/04/15.
* When an unauthorised client tries to connect
* TODO: There is loophole in the authorisation scheme, check controllers.Application
*/
class UnAuthClient(out: ActorRef) extends Actor with ActorLogging {
override def preStart() {
out ! Json.obj(
"error" -> "You are unauthorised to connect"
)
context stop self
}
def receive = {
case msg => log.warning("Unauth client received message: "+ msg)
}
}
object UnAuthClient {
def props(out: ActorRef) = Props(classOf[UnAuthClient], out)
}
| ChetanBhasin/Veracious | app/actors/client/UnAuthClient.scala | Scala | apache-2.0 | 664 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.items.chambers
import com.castlebravostudios.rayguns.api.items.ItemModule
import com.castlebravostudios.rayguns.entities.effects.HeatRayEffect
import com.castlebravostudios.rayguns.mod.ModularRayguns
import com.castlebravostudios.rayguns.items.emitters.Emitters
import com.castlebravostudios.rayguns.items.misc.Tier1EmptyChamber
object HeatRayChamber extends BaseChamber {
val moduleKey = "HeatRayChamber"
val powerModifier = 1.5
val shotEffect = HeatRayEffect
val nameSegmentKey = "rayguns.HeatRayChamber.segment"
def createItem() : ItemModule = new ItemChamber( this,
Emitters.heatRayEmitter, Tier1EmptyChamber )
.setUnlocalizedName("rayguns.HeatRayChamber")
.setTextureName("rayguns:chamber_heat_ray")
.setCreativeTab( ModularRayguns.raygunsTab )
.setMaxStackSize(1)
def registerShotHandlers() : Unit = {
registerSingleShotHandlers()
registerScatterShotHandler()
registerChargedShotHandler()
registerPreciseShotHandler()
}
}
| Redattack34/ModularRayguns | src/main/scala/com/castlebravostudios/rayguns/items/chambers/HeatRayChamber.scala | Scala | bsd-3-clause | 2,622 |
/** MACHINE-GENERATED FROM AVRO SCHEMA. DO NOT EDIT DIRECTLY */
package test
import scala.annotation.switch
final case class Up(var value: Int) extends org.apache.avro.specific.SpecificRecordBase {
def this() = this(0)
def get(field$: Int): AnyRef = {
(field$: @switch) match {
case 0 => {
value
}.asInstanceOf[AnyRef]
case _ => new org.apache.avro.AvroRuntimeException("Bad index")
}
}
def put(field$: Int, value: Any): Unit = {
(field$: @switch) match {
case 0 => this.value = {
value
}.asInstanceOf[Int]
case _ => new org.apache.avro.AvroRuntimeException("Bad index")
}
()
}
def getSchema: org.apache.avro.Schema = Up.SCHEMA$
}
object Up {
val SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\\"type\\":\\"record\\",\\"name\\":\\"Up\\",\\"namespace\\":\\"test\\",\\"fields\\":[{\\"name\\":\\"value\\",\\"type\\":\\"int\\"}]}")
}
final case class Down(var value: Int) extends org.apache.avro.specific.SpecificRecordBase {
def this() = this(0)
def get(field$: Int): AnyRef = {
(field$: @switch) match {
case 0 => {
value
}.asInstanceOf[AnyRef]
case _ => new org.apache.avro.AvroRuntimeException("Bad index")
}
}
def put(field$: Int, value: Any): Unit = {
(field$: @switch) match {
case 0 => this.value = {
value
}.asInstanceOf[Int]
case _ => new org.apache.avro.AvroRuntimeException("Bad index")
}
()
}
def getSchema: org.apache.avro.Schema = Down.SCHEMA$
}
object Down {
val SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\\"type\\":\\"record\\",\\"name\\":\\"Down\\",\\"namespace\\":\\"test\\",\\"fields\\":[{\\"name\\":\\"value\\",\\"type\\":\\"int\\"}]}")
} | julianpeeters/avrohugger | avrohugger-core/src/test/expected/specific/test/Joystick.scala | Scala | apache-2.0 | 1,708 |
package com.sksamuel.elastic4s.http.search.queries.term
import com.sksamuel.elastic4s.searches.queries.term.TermQueryDefinition
import org.elasticsearch.common.xcontent.{XContentBuilder, XContentFactory}
object TermQueryBodyFn {
def apply(t: TermQueryDefinition): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject()
builder.startObject("term")
builder.startObject(t.field)
t.boost.map(_.toString).foreach(builder.field("boost", _))
t.queryName.foreach(builder.field("_name", _))
builder.field("value", t.value)
builder.endObject()
builder.endObject()
builder.endObject()
}
}
| FabienPennequin/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/search/queries/term/TermQueryBodyFn.scala | Scala | apache-2.0 | 655 |
package wafna.udp.util
import java.nio.ByteBuffer
/**
* Encapsulates a buffer with position for successive writes.
*/
class WriteBuffer(buffer: Array[Byte], at: Int) {
private var nth = at
def position = nth
def writeByte(i: Byte): Int = {
buffer(nth) = i
nth += 1
nth
}
def writeByte(i: Int): Int = {
if (i > Byte.MaxValue || i < Byte.MinValue) sys error s"Invalid byte: $i"
writeByte(i.toByte)
}
def writeShort(i: Short): Int = writeBytes(ByteBuffer.allocate(2).putShort(i))
def writeShort(i: Int): Int = {
if (i > Short.MaxValue || i < Short.MinValue) sys error s"Invalid byte: $i"
writeShort(i.toShort)
}
def writeInt(i: Int): Int = writeBytes(ByteBuffer.allocate(4).putInt(i))
def writeBytes(bytes: Array[Byte]): Int = {
for (byte <- bytes) writeByte(byte)
nth
}
def writeBytes(bb: ByteBuffer): Int = {
// todo getting the array here is a inefficient.
writeBytes(bb.array())
}
}
| wafna/scud | util/src/main/scala/wafna/udp/util/WriteBuffer.scala | Scala | unlicense | 960 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.result.reader
import scala.collection.mutable
import io.gatling.BaseSpec
import io.gatling.charts.stats.LogFileReader
import io.gatling.core.ConfigKeys._
import io.gatling.core.config.{ GatlingConfiguration, GatlingPropertiesBuilder }
class LogFileReaderSpec extends BaseSpec {
val props = new GatlingPropertiesBuilder
props.resultsDirectory("src/test/resources")
implicit val configuration = GatlingConfiguration.loadForTest(props.build)
// FIXME re-enable with fresh and SIMPLE samples
// "When reading a single log file, LogFileReader" should {
//
// val singleLogFileReader = new LogFileReader("run_single_node")
//
// "be able to read a single file simulation" in {
// singleLogFileReader must not be null
// }
//
// "find the two correct scenarios" in {
// singleLogFileReader.scenarioNames must beEqualTo(List("Scenario name", "Other Scenario Name"))
// }
//
// "find the fifteen correct requests" in {
// val requestNames = List("Request request_1", "Request request_2", "Request request_3", "Request request_4", "Request request_5", "Request request_6", "Request request_7", "Request request_8", "Request request_9", "Request request_10")
// val otherRequestNames = List("Request other_request_1", "Request other_request_2", "Request other_request_3", "Request other_request_9", "Request other_request_10")
// singleLogFileReader.groupsAndRequests.collect { case (group, Some(request)) => RequestPath.path(request, group)} must haveTheSameElementsAs(requestNames ++ otherRequestNames)
// }
//
// "have a correct run record" in {
// singleLogFileReader.runMessage must beEqualTo(RunMessage(parseTimestampString("20120607202804"), "run1", "interesting test run"))
// }
//
// }
//
// "When reading two log files coming from a multinode simulation, LogFileReader" should {
//
// val multipleFilesDataReader = new LogFileReader("run_multiple_nodes")
//
// "be able to read a multiple files simulation" in {
// multipleFilesDataReader must not be null
// }
//
// "find the two correct scenarios" in {
// multipleFilesDataReader.scenarioNames must beEqualTo(List("Scenario name", "Other Scenario Name"))
// }
//
// "find the fifteen correct requests" in {
// val requestNames = List("Request request_1", "Request request_2", "Request request_3", "Request request_4", "Request request_5", "Request request_6", "Request request_7", "Request request_8", "Request request_9", "Request request_10")
// val otherRequestNames = List("Request other_request_1", "Request other_request_2", "Request other_request_3", "Request other_request_9", "Request other_request_10")
// multipleFilesDataReader.groupsAndRequests.collect { case (group, Some(request)) => RequestPath.path(request, group)} must haveTheSameElementsAs(requestNames ++ otherRequestNames)
// }
//
// //TODO - how to define correctly the runMessage method
// "have correct run records" in {
// multipleFilesDataReader.runMessage must not be null
// }
// }
val singleLogFileReader = new LogFileReader("run_single_node_with_known_stats")
"When reading a single log file with known statistics, FileDataReder" should "return expected minResponseTime for correct request data" in {
singleLogFileReader.requestGeneralStats().min shouldBe 2000
}
it should "return expected maxResponseTime for correct request data" in {
singleLogFileReader.requestGeneralStats().max shouldBe 9000
}
it should "return expected responseTimeStandardDeviation for correct request data" in {
val computedValue = singleLogFileReader.requestGeneralStats().stdDev
val expectedValue = 2138
val error = (computedValue.toDouble - expectedValue) / expectedValue
error shouldBe <=(0.06)
}
it should "return expected responseTimePercentile for the (0, 0.7) percentiles" in {
val props = mutable.Map.empty[String, Any]
props.put(charting.indicators.Percentile1, 0)
props.put(charting.indicators.Percentile2, 70)
props.put(core.directory.Simulations, "src/test/resources")
props.put(core.directory.Results, "src/test/resources")
implicit val configuration = GatlingConfiguration.loadForTest(props)
val lowPercentilesLogFileReader = new LogFileReader("run_single_node_with_known_stats")
lowPercentilesLogFileReader.requestGeneralStats().percentile(configuration.charting.indicators.percentile1) shouldBe 2000
lowPercentilesLogFileReader.requestGeneralStats().percentile(configuration.charting.indicators.percentile2) shouldBe 5000
}
it should "return expected result for the (99.99, 100) percentiles" in {
val props = mutable.Map.empty[String, Any]
props.put(charting.indicators.Percentile1, 99)
props.put(charting.indicators.Percentile2, 100)
props.put(core.directory.Simulations, "src/test/resources")
props.put(core.directory.Results, "src/test/resources")
implicit val configuration = GatlingConfiguration.loadForTest(props)
val highPercentilesLogFileReader = new LogFileReader("run_single_node_with_known_stats")
highPercentilesLogFileReader.requestGeneralStats().percentile(configuration.charting.indicators.percentile1) shouldBe 8860
highPercentilesLogFileReader.requestGeneralStats().percentile(configuration.charting.indicators.percentile2) shouldBe 9000
}
it should "indicate that all the request have their response time in between 0 and 100000" in {
val props = mutable.Map.empty[String, Any]
props.put(charting.indicators.LowerBound, 0)
props.put(charting.indicators.HigherBound, 100000)
props.put(core.directory.Simulations, "src/test/resources")
props.put(core.directory.Results, "src/test/resources")
implicit val configuration = GatlingConfiguration.loadForTest(props)
val fileDataReader = new LogFileReader("run_single_node_with_known_stats")
fileDataReader.numberOfRequestInResponseTimeRange(None, None).map(_._2) shouldBe List(0, 8, 0, 0)
}
it should "indicate that 1 request had a response time below 2500ms" in {
val props = mutable.Map.empty[String, Any]
props.put(charting.indicators.LowerBound, 2500)
props.put(charting.indicators.HigherBound, 5000)
props.put(core.directory.Simulations, "src/test/resources")
props.put(core.directory.Results, "src/test/resources")
implicit val configuration = GatlingConfiguration.loadForTest(props)
val nRequestInResponseTimeRange = new LogFileReader("run_single_node_with_known_stats").numberOfRequestInResponseTimeRange(None, None).map(_._2)
nRequestInResponseTimeRange.head shouldBe 1
}
it should "indicate that 5 request had a response time in between 2500ms and 5000ms" in {
val props = mutable.Map.empty[String, Any]
props.put(charting.indicators.LowerBound, 2500)
props.put(charting.indicators.HigherBound, 5000)
props.put(core.directory.Simulations, "src/test/resources")
props.put(core.directory.Results, "src/test/resources")
implicit val configuration = GatlingConfiguration.loadForTest(props)
val nRequestInResponseTimeRange = new LogFileReader("run_single_node_with_known_stats").numberOfRequestInResponseTimeRange(None, None).map(_._2)
nRequestInResponseTimeRange(1) shouldBe 5
}
it should "indicate that 2 request had a response time above 5000ms" in {
val props = mutable.Map.empty[String, Any]
props.put(charting.indicators.LowerBound, 2500)
props.put(charting.indicators.HigherBound, 5000)
props.put(core.directory.Simulations, "src/test/resources")
props.put(core.directory.Results, "src/test/resources")
implicit val configuration = GatlingConfiguration.loadForTest(props)
val nRequestInResponseTimeRange = new LogFileReader("run_single_node_with_known_stats").numberOfRequestInResponseTimeRange(None, None).map(_._2)
nRequestInResponseTimeRange(2) shouldBe 2
}
}
| GabrielPlassard/gatling | gatling-charts/src/test/scala/io/gatling/charts/result/reader/LogFileReaderSpec.scala | Scala | apache-2.0 | 8,527 |
package com.box.castle.core.committer.states
import akka.actor.Actor
import com.box.castle.core.committer.CommitterActorBase
import com.box.castle.core.const
import com.box.castle.router.RouterRequestManager
import org.slf4s.Logging
import scala.concurrent.duration.FiniteDuration
trait WaitingToRestart extends CommitterActorBase with CommitterActorStates {
self: Actor with RouterRequestManager with Logging =>
private case class RestartCommitterActor(t: Throwable)
override def becomeWaitingToRestart(delay: FiniteDuration, t: Throwable): Unit = {
log.info(s"$committerActorId restarting itself in $delay")
context.become(waitingToRestart)
scheduleOnce(delay, RestartCommitterActor(t))
}
override def waitingToRestart: Receive = {
case RestartCommitterActor(t) => {
log.info(s"$committerActorId is restarting itself due to ${t.getMessage}")
count(const.Metrics.RecoverableFailures)
throw t
}
case msg => receiveCommon(msg)
}
}
| Box-Castle/core | src/main/scala/com/box/castle/core/committer/states/WaitingToRestart.scala | Scala | apache-2.0 | 991 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.box.stubs
import uk.gov.hmrc.ct._
import uk.gov.hmrc.ct.box.CtValue
import uk.gov.hmrc.ct.box.retriever.FilingAttributesBoxValueRetriever
trait StubbedFilingAttributesBoxValueRetriever extends FilingAttributesBoxValueRetriever {
override def retrieveProductName(): ProductName = ???
override def retrieveCompanyType(): FilingCompanyType = ???
override def retrieveAbbreviatedAccountsFiling(): AbbreviatedAccountsFiling = ???
override def retrieveStatutoryAccountsFiling(): StatutoryAccountsFiling = ???
override def retrieveMicroEntityFiling(): MicroEntityFiling = ???
override def retrieveAbridgedFiling(): AbridgedFiling = ???
override def retrieveHMRCFiling(): HMRCFiling = ???
override def retrieveHMRCAmendment(): HMRCAmendment = ???
override def retrieveCompaniesHouseFiling(): CompaniesHouseFiling = ???
override def generateValues: Map[String, CtValue[_]] = ???
override def retrieveUTR(): UTR = ???
}
| ahudspith-equalexperts/ct-calculations | src/test/scala/uk/gov/hmrc/ct/box/stubs/StubbedFilingAttributesBoxValueRetriever.scala | Scala | apache-2.0 | 1,572 |
package numbertheory
import org.salgo.numbertheory.GreatestCommonDivisor
import org.scalatest.{FunSuite, Matchers}
class GreatestCommonDivisorSpec extends FunSuite with Matchers {
test("Euclid (first small)") { GreatestCommonDivisor.getByEuclid(20, 10) shouldEqual 10 }
test("Euclid (second small)") { GreatestCommonDivisor.getByEuclid(10, 20) shouldEqual 10 }
test("Euclid (multi level)") { GreatestCommonDivisor.getByEuclid(25, 10) shouldEqual 5 }
test("Euclid (no divisor)") { GreatestCommonDivisor.getByEuclid(25, 13) shouldEqual 1 }
test("GDC (first small)") { GreatestCommonDivisor.getByBinary(20, 10) shouldEqual 10 }
test("GDC (second small)") { GreatestCommonDivisor.getByBinary(10, 20) shouldEqual 10 }
test("GDC (multi level)") { GreatestCommonDivisor.getByBinary(25, 10) shouldEqual 5 }
test("GDC (no divisor)") { GreatestCommonDivisor.getByBinary(25, 13) shouldEqual 1 }
}
| ascensio/salgo | tests/numbertheory/GreatestCommonDivisorSpec.scala | Scala | apache-2.0 | 905 |
// Copyright 2011-2012 James Michael Callahan
// See LICENSE-2.0 file for licensing information.
package org.scalagfx.math
import scala.util.Random
//--------------------------------------------------------------------------------------------------
// V E C 3 D
//--------------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------------
// Supported Subset of Operations:
//
// P + S -> P P - S -> P P * S -> P P / V -> P
// V + S -> V V - S -> V V * S -> V V / S -> V
//
// --- P - P -> V --- ---
// P + V -> P P - V -> P P * V -> P P / V -> P
// V + V -> V V - V -> V V * V -> V V / V -> V
//
// S = Scalar(Double), P = Position(Pos3d), V = Vector(Vec3d)
//--------------------------------------------------------------------------------------------------
/** Companion object for Vec3d. */
object Vec3d
{
//------------------------------------------------------------------------------------------------
// C R E A T I O N
//------------------------------------------------------------------------------------------------
/** Create a new vector from components. */
def apply(x: Double, y: Double, z: Double) =
new Vec3d(x, y, z)
/** Create a new vector in which all components are the same scalar value. */
def apply(s: Double) =
new Vec3d(s, s, s)
/** A zero length vector. */
val zero: Vec3d =
Vec3d(0.0)
/** A vector with all components equal to (1.0). */
val one: Vec3d =
Vec3d(1.0)
/** A vector with all components equal to (0.5). */
val half: Vec3d =
Vec3d(0.5)
/** A unit length vector along the X-axis. */
val unitX: Vec3d =
Vec3d(1.0, 0.0, 0.0)
/** A unit length vector along the Y-axis. */
val unitY: Vec3d =
Vec3d(0.0, 1.0, 0.0)
/** A unit length vector along the Z-axis. */
val unitZ: Vec3d =
Vec3d(0.0, 0.0, 1.0)
import scala.math.{cos,sin,Pi}
/** A random vector with components in the range [0.0, 1.0) */
def random: Vec3d =
Vec3d(scala.math.random, scala.math.random, scala.math.random)
/** A random vector with components in the range [0.0, 1.0)
*
* @param gen The random number generator to use. */
def random(gen: Random): Vec3d =
Vec3d(gen.nextDouble, gen.nextDouble, gen.nextDouble)
/** A random direction of unit length. */
def randomDir: Vec3d = {
// Generate a random point distributed evenly within the unit cube, try again if its outside
// the unit sphere and then project the chosen point onto the sphere.
def f(): Vec3d = {
val v = random*2.0 - 1.0
val len = v.length
if((len > 0.0) && (len < 1.0)) v * (1.0 / len)
else f
}
f
}
/** A random direction of unit length.
*
* @param gen The random number generator to use. */
def randomDir(gen: Random): Vec3d = {
// Generate a random point distributed evenly within the unit cube, try again if its outside
// the unit sphere and then project the chosen point onto the sphere.
def f(): Vec3d = {
val v = random(gen)*2.0 - 1.0
val len = v.length
if((len > 0.0) && (len < 1.0)) v * (1.0 / len)
else f
}
f
}
//------------------------------------------------------------------------------------------------
// C O M P A R I S O N
//------------------------------------------------------------------------------------------------
/** The component-wise comparison of whether two vectors are within a given epsilon. */
def equiv(a: Vec3d, b: Vec3d, epsilon: Double): Boolean =
a.equiv(b, epsilon)
/** The component-wise comparison of whether two vectors are within a type specific
* epsilon. */
def equiv(a: Vec3d, b: Vec3d): Boolean =
(a equiv b)
/** The component-wise minimum of two vectors. */
def min(a: Vec3d, b: Vec3d): Vec3d =
compwise(a, b, scala.math.min(_, _))
/** The component-wise maximum of two vectors. */
def max(a: Vec3d, b: Vec3d): Vec3d =
compwise(a, b, scala.math.max(_, _))
//------------------------------------------------------------------------------------------------
// I N T E R P O L A T I O N
//------------------------------------------------------------------------------------------------
/** Linearly interpolate between two vectors. */
def lerp(a: Vec3d, b: Vec3d, t: Double): Vec3d =
compwise(a, b, Scalar.lerp(_, _, t))
/** Smooth-step interpolate between two vectors. */
def smoothlerp(a: Vec3d, b: Vec3d, t: Double): Vec3d =
compwise(a, b, Scalar.smoothlerp(_, _, t))
//------------------------------------------------------------------------------------------------
// U T I L I T Y
//------------------------------------------------------------------------------------------------
/** Create a vector who's components are generated by applying the given binary operator
* to each of the corresponding components of the given two vectors. */
def compwise(a: Vec3d, b: Vec3d, f: (Double, Double) => Double): Vec3d =
Vec3d(f(a.x, b.x), f(a.y, b.y), f(a.z, b.z))
}
/** An immutable 3-dimensional vector of Double element type used to represent a direction
* with magnitude for use in computational geometry applications.
*
* This is not meant to be a general purpose vector, but rather to only defined the limited
* set of operations which make geometric sense. This allows Scala type checking to catch
* many of the most common errors where scalars, points or vectors are being accidently
* used in a way that is geometrically meaningless. */
class Vec3d(val x: Double, val y: Double, val z: Double) extends Vector3dLike
{
type Self = Vec3d
//------------------------------------------------------------------------------------------------
// C O M P O N E N T O P S
//------------------------------------------------------------------------------------------------
/** A copy of this vector in which the X component has been replaced with the given
* value. */
def newX(v: Double): Vec3d =
Vec3d(v, y, z)
/** A copy of this vector in which the Y component has been replaced with the given
* value. */
def newY(v: Double): Vec3d =
Vec3d(x, v, z)
/** A copy of this vector in which the Z component has been replaced with the given
* value. */
def newZ(v: Double): Vec3d =
Vec3d(x, y, v)
/** A copy of this vector in which the component with the given index as been replaced. */
def newComp(i: Int, v: Double) =
i match {
case 0 => Vec3d(v, y, z)
case 1 => Vec3d(x, v, z)
case 2 => Vec3d(x, y, v)
case _ => throw new IllegalArgumentException("Invalid index (" + i + ")!")
}
//------------------------------------------------------------------------------------------------
// U N A R Y O P S
//------------------------------------------------------------------------------------------------
/** A vector of identical magnitude but opposite direction. */
def negated: Vec3d = Vec3d(-x, -y, -z)
/** The length (magnitude) of this vector squared. */
def lengthSq: Double = dot(this)
/** The length (magnitude) of this vector. */
def length: Double = scala.math.sqrt(lengthSq)
/** A vector of identical direction but unit length. */
def normalized: Self = this * (1.0 / length)
//------------------------------------------------------------------------------------------------
// O P E R A T O R S
//------------------------------------------------------------------------------------------------
/** The addition of a scalar value to all components of this vector. */
def + (scalar: Double): Vec3d = Vec3d(x+scalar, y+scalar, z+scalar)
/** The component-wise addition of this vector with another vector. */
def + (that: Vec3d): Vec3d = Vec3d(x+that.x, y+that.y, z+that.z)
/** The subtraction of a scalar value from all components of this vector. */
def - (scalar: Double): Vec3d = Vec3d(x-scalar, y-scalar, z-scalar)
/** The component-wise subtraction of another vector from this vector. */
def - (that: Vec3d): Vec3d = Vec3d(x-that.x, y-that.y, z-that.z)
/** The product of a scalar value with all components of this vector. */
def * (scalar: Double): Vec3d = Vec3d(x*scalar, y*scalar, z*scalar)
/** The component-wise multiplication of this vector with another vector. */
def * (that: Vec3d): Vec3d = Vec3d(x*that.x, y*that.y, z*that.z)
/** The quotient of dividing all components of this vector by a scalar value. */
def / (scalar: Double): Vec3d = Vec3d(x/scalar, y/scalar, z/scalar)
/** The component-wise division of this vector by another vector. */
def / (that: Vec3d): Vec3d = Vec3d(x/that.x, y/that.y, z/that.z)
/** The dot-product of this and another vector. */
def dot(that: Vec3d): Double =
x*that.x + y*that.y + z*that.z
/** The cross-product of this and another vector. */
def cross(that: Vec3d): Vec3d =
Vec3d(y * that.z - z * that.y,
z * that.x - x * that.z,
x * that.y - y * that.x)
//------------------------------------------------------------------------------------------------
// C O M P A R I S O N
//------------------------------------------------------------------------------------------------
/** Compares this vector to the specified value for equality. */
override def equals(that: Any): Boolean =
that match {
case that: Vec3d =>
(that canEqual this) && (x == that.x) && (y == that.y) && (z == that.z)
case _ => false
}
/** A method that should be called from every well-designed equals method that is open
* to be overridden in a subclass. */
def canEqual(that: Any): Boolean =
that.isInstanceOf[Vec3d]
/** Returns a hash code value for the object. */
override def hashCode: Int =
47 * (43 * (41 + x.##) + y.##) + z.##
//------------------------------------------------------------------------------------------------
// U T I L I T Y
//------------------------------------------------------------------------------------------------
/** Tests whether the given predicate holds true for all of the corresponding components
* of this and the given vector. */
def forall(that: Vec3d)(p: (Double, Double) => Boolean): Boolean =
p(x, that.x) && p(y, that.y) && p(z, that.z)
/** Tests whether the given predicate holds true for any of the corresponding components
* of this and the given vector. */
def forany(that: Vec3d)(p: (Double, Double) => Boolean): Boolean =
p(x, that.x) || p(y, that.y) || p(z, that.z)
/** Builds a new vector by applying a function to each component of this vector. */
def map(f: (Double) => Double): Vec3d =
Vec3d(f(x), f(y), f(z))
//------------------------------------------------------------------------------------------------
// C O N V E R S I O N
//------------------------------------------------------------------------------------------------
/** Convert to a string representation. */
override def toString() =
"Vec3d(%.2f, %.2f, %.2f)".format(x, y, z)
}
| JimCallahan/Graphics | src/org/scalagfx/math/Vec3d.scala | Scala | apache-2.0 | 12,410 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC126C(value: Option[Int]) extends CtBoxIdentifier(name = "Tangible assets - Motor vehicles - cost - disposals")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators
with Debit {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value, min = 0)
)
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC126C.scala | Scala | apache-2.0 | 1,158 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Linked}
import uk.gov.hmrc.ct.computations.CP502
case class B205(value: Option[Int]) extends CtBoxIdentifier("Other income") with CtOptionalInteger
object B205 extends Linked[CP502, B205] {
override def apply(source: CP502): B205 = B205(source.value)
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v3/B205.scala | Scala | apache-2.0 | 959 |
package com.github.jarlakxen.reactive.serial
import com.fazecast.jSerialComm.SerialPort
import scala.util.{ Try, Failure, Success }
import akka.util.ByteString
import java.io.IOException
/**
* @author fviale
*/
class Port(port: SerialPort) {
def systemName = port.getSystemPortName
def open(baudRate: Int): Try[_] = {
if (isClose) {
port.setBaudRate(baudRate)
port.setComPortTimeouts(SerialPort.TIMEOUT_READ_SEMI_BLOCKING | SerialPort.TIMEOUT_WRITE_BLOCKING, 0, 0)
if (port.openPort)
Success(())
else
Failure(new IOException(s"Cannot open port '${port.getSystemPortName}'"))
} else {
Success(())
}
}
def isOpen = port.isOpen
def isClose = !isOpen
def read(buffer: Array[Byte]): Try[Int] = {
val bytes = port.readBytes(buffer, buffer.length)
if (bytes == -1) {
Failure(new IOException(s"There was an error reading the port '${port.getSystemPortName}'"))
} else {
Success(bytes)
}
}
def write(data: ByteString): Try[Int] = {
val bytes = port.writeBytes(data.toArray, data.length)
if (bytes == -1) {
Failure(new IOException(s"There was an error writing to the port '${port.getSystemPortName}'"))
} else {
Success(bytes)
}
}
def close: Try[_] = {
if (port.closePort())
Success(())
else
Failure(new IOException(s"Cannot open port '${port.getSystemPortName}'"))
}
} | Jarlakxen/reactive-serial | src/main/scala/com/github/jarlakxen/reactive/serial/Port.scala | Scala | apache-2.0 | 1,426 |
/*
* Copyright 2015 eleflow.com.br.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eleflow.uberdata.data
import java.text.{DecimalFormatSymbols, DecimalFormat}
import java.util.Locale
import eleflow.uberdata.core.data.Dataset
import org.apache.spark.rdd.RDD
/**
* Created by dirceu on 18/02/15.
*/
class ComposedDataset(train: Dataset, test: Dataset, result: Option[RDD[(Double, Double)]]) {
def exportResult(path: String, locale: Locale = Locale.ENGLISH) = {
val formatSymbols = new DecimalFormatSymbols(locale)
val formatter =
new DecimalFormat("###############.################", formatSymbols)
result.map(
res =>
res
.coalesce(1)
.map {
case (id, value) =>
s"${BigDecimal(id.toString).toString},${formatter.format(value)}"
}
.saveAsTextFile(path)
) getOrElse println("No result to export")
}
}
| eleflow/uberdata | iuberdata_core/src/main/scala/eleflow/uberdata/data/ComposedDataset.scala | Scala | apache-2.0 | 1,432 |
package ohnosequences.bio4j.bundles
import ohnosequences.typesets._
import ohnosequences.statika._
import ohnosequences.statika.aws._
import ohnosequences.statika.ami._
import ohnosequences.awstools.regions._
case object NCBITaxonomyApplicator extends AWSDistribution(
NCBITaxonomyMetadata,
amzn_ami_pv_64bit(Region.Ireland)(javaHeap = 6),
members = NCBITaxonomyRelease :~: NCBITaxonomyDistribution :~: ∅
)
| bio4j/ncbi-taxonomy-module | src/main/scala/NCBITaxonomyApplicator.scala | Scala | agpl-3.0 | 417 |
package org.axonframework.scynapse.akka
import akka.actor._
import akka.pattern.pipe
import scala.concurrent.{Future, ExecutionContext}
import scala.collection.JavaConverters._
import org.axonframework.scynapse.commandhandling.PromisingCallback
import org.axonframework.commandhandling.CommandBus
import org.axonframework.commandhandling.{CommandMessage, GenericCommandMessage}
object CommandGatewayActor {
type CommandMeta = Map[String, Any]
case class WithMeta(cmd: Any, meta: CommandMeta)
def props(axonCommandBus: CommandBus) =
Props(new CommandGatewayActor(axonCommandBus))
}
/**
* Actor-based command gateway
*
* @param axonCommandBus [[org.axonframework.commandhandling.CommandBus]]
*/
class CommandGatewayActor(axonCommandBus: CommandBus) extends Actor with ActorLogging {
import CommandGatewayActor._
implicit val executor: ExecutionContext = ExecutionContext.Implicits.global
def asCommandMessage(cmd: Any, meta: CommandMeta = Map.empty): CommandMessage[_] = {
val msg = GenericCommandMessage.asCommandMessage(cmd)
msg.withMetaData(meta.asJava)
}
def dispatchMessage[T](cmd: CommandMessage[T]): Future[Any] = {
val pc = new PromisingCallback[T, Any]
axonCommandBus.dispatch(cmd, pc)
pc.future
}
def receive = {
case cmdMessage: CommandMessage[_] =>
val respondTo = sender()
for (x <- dispatchMessage(cmdMessage) if x != null) {
respondTo ! x
}
case WithMeta(cmd, meta) =>
self forward asCommandMessage(cmd, meta)
case cmd =>
self forward asCommandMessage(cmd)
}
}
| AxonFramework/Scynapse | scynapse-akka/src/main/scala/org/axonframework/scynapse/akka/CommandGatewayActor.scala | Scala | apache-2.0 | 1,589 |
package models
import utils.silhouette.IdentitySilhouette
import com.mohiva.play.silhouette.contrib.utils.BCryptPasswordHasher
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
case class Manager (
id: Long,
email: String,
password: String,
firstName: String,
lastName: String,
/*
A manager can have one or more roles. Each role declares a level or area. The 'master' role has full access.
Ex: ("master") -> full access to every point of the Admin Webpage.
Ex: ("social") -> the manager works for the 'social' area.
Ex: ("high", "sales") -> the manager has a 'high' access and works for the 'sales' area.
*/
roles: Seq[String]
) extends IdentitySilhouette {
def key = email
def fullName: String = firstName + " " + lastName
}
object Manager {
val managers = scala.collection.mutable.HashMap[String, Manager](
"master@myweb.com" -> Manager(1L, "master@myweb.com", (new BCryptPasswordHasher()).hash("123123").password, "Eddard", "Stark", Seq("master")),
"social@myweb.com" -> Manager(2L, "social@myweb.com", (new BCryptPasswordHasher()).hash("123123").password, "Margaery", "Tyrell", Seq("social")),
"sales@myweb.com" -> Manager(3L, "sales@myweb.com", (new BCryptPasswordHasher()).hash("123123").password, "Petyr", "Baelish", Seq("sales")),
"sales_high@myweb.com" -> Manager(4L, "sales_high@myweb.com", (new BCryptPasswordHasher()).hash("123123").password, "Tyrion", "Lannister", Seq("sales", "high"))
)
def findByEmail (email: String): Future[Option[Manager]] = Future.successful(managers.get(email))
def findByEmailMap[A] (email: String)(f: Manager => A): Future[Option[A]] = findByEmail(email).map(_.map(f))
def save (manager: Manager): Future[Manager] = {
managers += (manager.email -> manager)
Future.successful(manager)
}
}
| vtapadia/crickit | modules/common/app/models/Manager.scala | Scala | apache-2.0 | 1,809 |
package com.sksamuel.elastic4s.fields
object ConstantKeywordField {
val `type`: String = "constant_keyword"
}
case class ConstantKeywordField(override val name: String, value: String) extends ElasticField {
override def `type`: String = ConstantKeywordField.`type`
}
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/fields/ConstantKeywordField.scala | Scala | apache-2.0 | 272 |
/** SComponent.scala
*
* Jim McBeath, June 16, 2008
*/
package net.jimmc.swing
import java.awt.event.ActionEvent
import java.awt.event.ActionListener
import java.net.URL
import javax.swing.event.ChangeEvent
import javax.swing.event.ChangeListener
import javax.swing.Icon
import javax.swing.ImageIcon
import javax.swing.JComponent
trait SComponent { this:JComponent =>
def setupToolTip(frame:SFrame, resPrefix:String) {
//Set up the tool tip text if defined in the resources
val ttKey = resPrefix+".toolTip"
frame.getResourceStringOption(ttKey).foreach(setToolTipText(_))
}
def setupIcon(frame:SFrame, resPrefix:String) {
//Set up the icon if defined in the resources and we have the call
type HasSetIconMethod = { def setIcon(icon:Icon) }
this match {
case c:HasSetIconMethod =>
val iconKey = resPrefix+".icon"
frame.getResourceStringOption(iconKey).foreach { iconName =>
c.setIcon(loadIcon(frame,iconName))
}
}
}
def setupActionListener(frame:SFrame, action: => Unit) {
//Wire in our action listener if we have such a thing
type HasAddActionListenerMethod =
{ def addActionListener(a:ActionListener) }
this match {
case c:HasAddActionListenerMethod =>
c.addActionListener(new ActionListener() {
override def actionPerformed(ev:ActionEvent) {
actionWithCatch(frame, action)
//perform the action and handle exceptions
}
})
}
}
def setupChangeListener(frame:SFrame, action: => Unit) {
//Wire in our action listener if we have such a thing
type HasAddChangeListenerMethod =
{ def addChangeListener(a:ChangeListener) }
this match {
case c:HasAddChangeListenerMethod =>
c.addChangeListener(new ChangeListener() {
override def stateChanged(ev:ChangeEvent) {
actionWithCatch(frame, action)
//perform the action and handle exceptions
}
})
}
}
//Execute our action. If we get any exceptions, pass them to the
//frame for handling.
private def actionWithCatch(frame:SFrame, action: =>Unit) {
try {
action
} catch {
case ex:Exception => frame.handleUiException(ex)
}
}
protected def loadIcon(frame:SFrame, iconName:String):Icon = {
val cl = frame.getClass()
val url:URL = cl.getResource(iconName)
if (url==null)
null
else
new ImageIcon(url)
}
}
| jimmc/mimprint | src/net/jimmc/swing/SComponent.scala | Scala | gpl-2.0 | 2,810 |
package views.util.formdata
import models.User
import play.Play
import play.api.Logger
import play.api.data.Form
import play.api.data.Forms._
import scala.concurrent.duration._
import play.api.libs.concurrent.Execution.Implicits._
import scala.concurrent.Await
case class LoginData(email: String, password: String)
object LoginData {
val MIN_PASSWORD_LEN = Play.application().configuration().getInt("form.min_password_len")
val MAX_PASSWORD_LEN = Play.application().configuration().getInt("form.max_password_len")
val EMAIL_FIELD_NAME = "signim_email"
val PASSWORD_FIELD_NAME = "signim_pswrd"
val loginForm = Form(
mapping(
EMAIL_FIELD_NAME -> email,
PASSWORD_FIELD_NAME -> nonEmptyText(MIN_PASSWORD_LEN, MAX_PASSWORD_LEN)
)(LoginData.apply)(LoginData.unapply)
verifying("Invalid user name or password", {fields => fields match {
case LoginData(eMail, password) =>
Logger.debug("in valid meth")
val user = User.getByEmail(eMail)
val isAuth = user.map({
case Some(u) => {
Logger.debug(u.toString)
u.password == password
}
case None => {
Logger.debug(s"non validate")
false
}
}) recover {case e: Throwable =>
Logger.debug("trowable ser!" + e)
false}
Await.result(isAuth, 2 second)
}})
)
} | Alykoff/kw-tst | app/views/util/formdata/LoginData.scala | Scala | mit | 1,386 |
package me.lsbengine.server
import scala.util.Properties._
object BlogConfiguration {
val adminPort: Int = envOrElse("ADMIN_PORT", "9090").toInt
val appContext: String = envOrElse("APP_CONTEXT", "DEV")
val blogMetaDescription: String = envOrElse("BLOG_META_DESCRIPTION", "My name is Jeremy Rabasco. I am a Computer Science major and I currently work at <JOB_HERE>.")
val blogOwnerFirstName: String = envOrElse("BLOG_OWNER_FIRST_NAME", "Jeremy")
val blogOwnerLastName: String = envOrElse("BLOG_OWNER_LAST_NAME", "Rabasco")
val blogOwner: String = blogOwnerFirstName + " " + blogOwnerLastName
val blogOwnerGender: String = envOrElse("BLOG_OWNER_GENDER", "male")
val blogOwnerPseudo: String = envOrElse("BLOG_OWNER_PSEUDO", "")
val blogShortDesc: String = envOrElse("BLOG_SHORT_DESCRIPTION", "My Personal Blog")
val contactAddress: String = envOrElse("CONTACT_ADDRESS", "rabasco.jeremy@gmail.com")
val copyright: String = envOrElse("COPYRIGHT", "")
val defaultPostsPerPage: Int = envOrElse("DEFAULT_POSTS_PER_PAGE", "10").toInt
val disclaimer: String = envOrElse("DISCLAIMER", "My opinions do not necessarily represent those of my employer.")
val gtagKey: String = envOrElse("GTAG_KEY", "")
val hashIterations: Int = envOrElse("HASH_ITERATIONS", "300000").toInt
val headerTitle: String = envOrElse("HEADER_TITLE", "LSBEngine")
val hostName: String = envOrElse("SERVER_HOST", "localhost")
val imagesLocation: String = envOrElse("IMAGES_LOCATION", "/home/jrabasco/images/")
val mongoDBHost: String = envOrElse("MONGO_HOST", "localhost")
val mongoDBName: String = envOrElse("MONGO_NAME", "lsbengine")
val mongoDBPort: Int = envOrElse("MONGO_CUST_PORT", "27017").toInt
val publicPort: Int = envOrElse("PUBLIC_PORT", "8080").toInt
val repositoryLink: String = envOrElse("REPOSITORY_LINK", "https://github.com/jrabasco/LSBEngine")
val siteUrl: String = envOrElse("SITE_URL", "local.lsbengine.me")
}
| jrabasco/LSBEngine | src/main/scala/me/lsbengine/server/BlogConfiguration.scala | Scala | mit | 1,940 |
package com.bot4s.telegram.methods
import ParseMode.ParseMode
import com.bot4s.telegram.models.{ Message, ReplyMarkup }
import com.bot4s.telegram.models.ChatId
/**
* Use this method to edit captions of messages sent by the bot or via the bot (for inline bots).
* On success, if edited message is sent by the bot, the edited Message is returned, otherwise True is returned.
*
* @param chatId Integer or String No Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)
* @param messageId Integer No Required if inline_message_id is not specified. Unique identifier of the sent message
* @param inlineMessageId String No Required if chat_id and message_id are not specified. Identifier of the inline message
* @param caption String Optional New caption of the message
* @param parseMode String Optional Send Markdown or HTML, if you want Telegram apps to show bold, italic,
* fixed-width text or inline URLs in the media caption.
* @param replyMarkup InlineKeyboardMarkup Optional A JSON-serialized object for an inline keyboard.
*/
case class EditMessageCaption(
chatId: Option[ChatId] = None,
messageId: Option[Int] = None,
inlineMessageId: Option[String] = None,
caption: Option[String] = None,
parseMode: Option[ParseMode] = None,
replyMarkup: Option[ReplyMarkup] = None
) extends JsonRequest[Message Either Boolean] {
if (inlineMessageId.isEmpty) {
require(chatId.isDefined, "Required if inlineMessageId is not specified")
require(messageId.isDefined, "Required if inlineMessageId is not specified")
}
if (chatId.isEmpty && messageId.isEmpty)
require(inlineMessageId.isDefined, "Required if chatId and messageId are not specified")
}
| mukel/telegrambot4s | core/src/com/bot4s/telegram/methods/EditMessageCaption.scala | Scala | apache-2.0 | 1,850 |
/*
* ActionObjView.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.mellite.impl.objview
import de.sciss.icons.raphael
import de.sciss.lucre.edit.UndoManager
import de.sciss.lucre.swing.Window
import de.sciss.lucre.synth.Txn
import de.sciss.lucre.{Obj, Source, Txn => LTxn}
import de.sciss.mellite.{CodeFrame, ObjListView, ObjView, UniverseHandler}
import de.sciss.proc.Action
import de.sciss.proc.Implicits._
import javax.swing.Icon
object ActionObjView extends NoArgsListObjViewFactory {
type E[~ <: LTxn[~]] = Action[~]
val icon : Icon = ObjViewImpl.raphaelIcon(raphael.Shapes.Bolt)
val prefix : String = "Action"
def humanName : String = prefix
def tpe : Obj.Type = Action
def category : String = ObjView.categComposition
def mkListView[T <: Txn[T]](obj: Action[T])(implicit tx: T): ActionObjView[T] with ObjListView[T] = {
val value = "" // ex.value
new Impl(tx.newHandle(obj), value).initAttrs(obj)
}
def makeObj[T <: Txn[T]](config: Config[T])(implicit tx: T): List[Obj[T]] = {
val name = config
val obj = Action[T]()
if (name.nonEmpty) obj.name = name
obj :: Nil
}
// XXX TODO make private
final class Impl[T <: Txn[T]](val objH: Source[T, Action[T]], var value: String)
extends ActionObjView[T]
with ObjListView[T]
with ObjViewImpl.Impl[T]
with ObjListViewImpl.StringRenderer {
def factory: ObjView.Factory = ActionObjView
override def tryEditListCell(value: Any)(implicit tx: T, undoManager: UndoManager[T]): Boolean = false
def isListCellEditable: Boolean = false // never within the list view
def isViewable: Boolean = true
override def openView(parent: Option[Window[T]])(implicit tx: T,
handler: UniverseHandler[T]): Option[Window[T]] = {
import de.sciss.mellite.Mellite.compiler
val frame = CodeFrame.action(obj)
// val frame = ActionEditorFrame(obj)
Some(frame)
}
}
}
trait ActionObjView[T <: LTxn[T]] extends ObjView[T] {
type Repr = Action[T]
} | Sciss/Mellite | app/src/main/scala/de/sciss/mellite/impl/objview/ActionObjView.scala | Scala | agpl-3.0 | 2,346 |
/*
* Copyright (c) 2013 Bridgewater Associates, LP
*
* Distributed under the terms of the Modified BSD License. The full license is in
* the file COPYING, distributed as part of this software.
*/
package notebook.kernel.pfork
import java.io.{EOFException, ObjectInputStream, ObjectOutputStream, File}
import java.net._
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.Executors
import scala.collection.mutable
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
import scala.concurrent._
import duration.Duration
import concurrent.ops
import com.sun.org.apache.xpath.internal.functions.FuncTrue
import org.apache.commons.exec._
import org.apache.log4j.PropertyConfigurator
import org.slf4j.LoggerFactory
import com.typesafe.config.{ConfigFactory, Config}
import play.api.{Play, Logger}
trait ForkableProcess {
/**
* Called in the remote VM. Can return any useful information to the server through the return
* @param args
* @return
*/
def init(args: Seq[String]):String
def waitForExit()
}
/**
* I am so sick of this being a thing that gets implemented everywhere. Let's abstract.
*/
class BetterFork[A <: ForkableProcess : reflect.ClassTag](config:Config, executionContext: ExecutionContext) {
private implicit val ec = executionContext
import BetterFork._
val processClass = (implicitly[reflect.ClassTag[A]]).runtimeClass
def workingDirectory = new File(if (config.hasPath("wd")) config.getString("wd") else ".")
def heap: Long = if (config.hasPath("heap")) config.getBytes("heap") else defaultHeap
def stack: Long = if (config.hasPath("stack")) config.getBytes("stack") else -1
def permGen: Long = if (config.hasPath("permGen")) config.getBytes("permGen") else -1
def reservedCodeCache: Long = if (config.hasPath("reservedCodeCache")) config.getBytes("reservedCodeCache") else -1
def server: Boolean = true
def debugPort: Option[Int] = if (config.hasPath("debug.port")) Some(config.getInt("debug.port")) else None
def vmArgs:List[String] = if (config.hasPath("vmArgs")) config.getStringList("vmArgs").toList else Nil
def classPath: IndexedSeq[String] = defaultClassPath
def classPathString = classPath.mkString(File.pathSeparator)
def jvmArgs = {
val builder = IndexedSeq.newBuilder[String]
def ifNonNeg(value: Long, prefix: String) {
if (value >= 0) {
builder += (prefix + value)
}
}
ifNonNeg(heap, "-Xmx")
ifNonNeg(stack, "-Xss")
ifNonNeg(permGen, "-XX:MaxPermSize=")
ifNonNeg(reservedCodeCache, "-XX:ReservedCodeCacheSize=")
if (server) builder += "-server"
debugPort.foreach { p =>
builder ++= IndexedSeq("-Xdebug", "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address="+p)
}
builder ++= vmArgs
builder.result()
}
implicit protected def int2SuffixOps(i: Int) = new SuffixOps(i)
protected final class SuffixOps(i: Int) {
def k: Long = i.toLong << 10
def m: Long = i.toLong << 20
def g: Long = i.toLong << 30
}
def execute(args: String*): Future[ProcessInfo] = {
/* DK: Bi-directional liveness can be detected via redirected System.in (child), System.out (parent), avoids need for socket... */
val ss = new ServerSocket(0)
val cmd = new CommandLine(javaHome + "/bin/java")
.addArguments(jvmArgs.toArray)
.addArgument(classOf[ChildProcessMain].getName)
.addArgument(processClass.getName)
.addArgument(ss.getLocalPort.toString)
.addArguments(args.toArray)
Future {
log.info("Spawning %s".format(cmd.toString))
// use environment because classpaths can be longer here than as a command line arg
val environment = System.getenv + ("CLASSPATH" -> classPathString)
val exec = new KillableExecutor
val completion = Promise[Int]
exec.setWorkingDirectory(workingDirectory)
exec.execute(cmd, environment, new ExecuteResultHandler {
Logger.info(s"Spawning $cmd")
Logger.info(s"With Env $environment")
Logger.info(s"In working directory $workingDirectory")
def onProcessFailed(e: ExecuteException) {
e.printStackTrace
}
def onProcessComplete(exitValue: Int) {
completion.success(exitValue)
}
})
val socket = ss.accept()
serverSockets += socket
try {
val ois = new ObjectInputStream(socket.getInputStream)
val resp = ois.readObject().asInstanceOf[String]
new ProcessInfo(() => exec.kill(), resp, completion.future)
} catch {
case ex:SocketException => throw new ExecuteException("Failed to start process %s".format(cmd), 1, ex)
case ex:EOFException => throw new ExecuteException("Failed to start process %s".format(cmd), 1, ex)
}
}
}
}
class ProcessInfo(killer: () => Unit, val initReturn: String, val completion: Future[Int]) {
def kill() { killer() }
}
object BetterFork {
// Keeps server sockets around so they are not GC'd
private val serverSockets = new ListBuffer[Socket]()
// →→→→→→→→→→→→→ NEEDED WHEN running in SBT/Play ...
def defaultClassPath: IndexedSeq[String] = {
def urls(cl:ClassLoader, acc:IndexedSeq[String]=IndexedSeq.empty):IndexedSeq[String] = {
if (cl != null) {
val us = if (!cl.isInstanceOf[URLClassLoader]) {
//println(" ----- ")
//println(cl.getClass.getSimpleName)
acc
} else {
acc ++ (cl.asInstanceOf[URLClassLoader].getURLs map { u =>
val f = new File(u.getFile)
URLDecoder.decode(f.getAbsolutePath, "UTF8")
})
}
urls(cl.getParent, us)
} else {
acc
}
}
val loader = Play.current.classloader
val gurls = urls(loader).distinct.filter(!_.contains("logback-classic"))//.filter(!_.contains("sbt/"))
gurls
}
def defaultHeap = Runtime.getRuntime.maxMemory
/* Override to expose ability to forcibly kill the process */
private class KillableExecutor extends DefaultExecutor {
val killed = new AtomicBoolean(false)
setWatchdog(new ExecuteWatchdog(ExecuteWatchdog.INFINITE_TIMEOUT) {
override def start(p: Process) { if (killed.get()) p.destroy() }
})
def kill() {
if (killed.compareAndSet(false, true))
Option(getExecutorThread()) foreach(_.interrupt())
}
}
private lazy val javaHome = System.getProperty("java.home")
private lazy val log = LoggerFactory.getLogger(getClass())
private[pfork] def main(args: Array[String]) {
val className = args(0)
val parentPort = args(1).toInt
PropertyConfigurator.configure(getClass().getResource("/log4j.subprocess.properties"))
log.info("Remote process starting")
val socket = new Socket("127.0.0.1", parentPort)
val remainingArgs = args.drop(2).toIndexedSeq
val hostedClass = Class.forName(className).newInstance().asInstanceOf[ForkableProcess]
val result = hostedClass.init(remainingArgs)
val oos = new ObjectOutputStream(socket.getOutputStream)
oos.writeObject(result)
oos.flush()
val executorService = Executors.newFixedThreadPool(10)
implicit val ec = ExecutionContext.fromExecutorService(executorService)
val parentDone = Future { socket.getInputStream.read() }
val localDone = Future{ hostedClass.waitForExit() }
val done = Future.firstCompletedOf(Seq(parentDone, localDone))
try {
Await.result(done, Duration.Inf)
} finally {
log.warn("Parent process stopped; exiting.")
sys.exit(0)
}
}
}
| bigdatagenomics/mango-notebook | modules/subprocess/src/main/scala/notebook/kernel/pfork/BetterFork.scala | Scala | apache-2.0 | 7,604 |
package solarsystemscalemodel.integrationtest
import java.io.File
import org.openqa.selenium.chrome.{ChromeDriverService, ChromeDriver}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfter, Matchers, FlatSpec}
import org.scalatest.selenium.{WebBrowser}
/**
* These are end-to-end integration tests which use selenium and chromedriver to launch a real browser and
* check that modifying values in the page updates the rest of the page
*/
class IntegrationTest extends FlatSpec with Matchers with WebBrowser with BeforeAndAfter with BeforeAndAfterAll {
System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, "/usr/lib/chromium-browser/chromedriver")
val service = ChromeDriverService.createDefaultService()
val indexPath = "file://" + new File("index.html").getAbsolutePath
implicit lazy val webDriver = new ChromeDriver(service)
override def afterAll {
service.stop()
}
before {
go to (indexPath) //read index.html at the start of every test to get to a consistent state
}
"The initial page" should "have the correct title" in {
pageTitle should be("Scale Solar System") //trivial test to confirm that index.html was read
}
"Modifying earth diameter" should "modify neptunes diameter" in {
val neptuneBefore = textField("Neptune-diameter").value
textField("Earth-diameter").value = "100"
textField("Neptune-diameter").value should not be neptuneBefore //just want to confirm the value updates
}
"Switching to Imperial" should "change mm to inches" in {
radioButtonGroup("measurementsystem").value = "Imperial"
singleSel("Neptune-diameter-uom").value should be ("Inches")
}
"Changing Units" should "convert the value" in {
singleSel("Earth-diameter-uom").value = "cm"
textField("Earth-diameter").value = "100"
singleSel("Earth-diameter-uom").value = "M"
textField("Earth-diameter").value should be ("1.00")
}
"Entering an invalid number" should "leave other values unchanged, and keep the junk" in {
val neptuneBefore = textField("Neptune-diameter").value
textField("Earth-diameter").value = "textnotanumber"
textField("Earth-diameter").value should be ("textnotanumber")
textField("Neptune-diameter").value should be (neptuneBefore)
val Red = "rgba(255, 0, 0, 1)"
textField("Earth-diameter").underlying.getCssValue("background-color") should be (Red)
}
} | thomasrynne/SolarSystemScaleModel | integration/src/test/scala/scaledsolarsystem/integrationtest/IntegrationTest.scala | Scala | mit | 2,393 |
package masterleague4s
package instances
import cats.implicits._
import cats.laws.discipline.FunctorTests
class EitherExampleSpec extends DisciplineSpec {
def is = s2"""
Either[Int, ?] forms a functor $e1
"""
def e1 = checkAll("Either[Int, Int]", FunctorTests[({ type l[a] = Either[Int, a] })#l].functor[Int, Int, Int])
}
| martijnhoekstra/masterleague4s | src/test/scala/discipline/EitherExampleSpec.scala | Scala | gpl-3.0 | 359 |
package org.apache.mesos.chronos.scheduler.config
import java.net.InetSocketAddress
import org.rogach.scallop.ScallopConf
/**
* Configuration values that may be parsed from a YAML file.
* @author Florian Leibert (flo@leibert.de)
*/
trait SchedulerConfiguration extends ScallopConf {
lazy val master = opt[String]("master",
descr = "The URL of the Mesos master",
default = Some("local"),
required = true,
noshort = true)
lazy val user = opt[String]("user",
descr = "The chronos user to run the processes under",
default = Some("root"))
//TODO(FL): Be consistent and do everything in millis
lazy val failoverTimeoutSeconds = opt[Int]("failover_timeout",
descr = "The failover timeout in seconds for Mesos",
default = Some(604800))
lazy val scheduleHorizonSeconds = opt[Int]("schedule_horizon",
descr = "The look-ahead time for scheduling tasks in seconds",
default = Some(60))
lazy val clusterName = opt[String]("cluster_name",
descr = "The name of the cluster where Chronos is run",
default = None)
lazy val zookeeperServersString = opt[String]("zk_hosts",
descr = "The list of ZooKeeper servers for storing state",
default = Some("localhost:2181"))
lazy val hostname = opt[String]("hostname",
descr = "The advertised hostname of this Chronos instance for network communication. This is used by other" +
"Chronos instances and the Mesos master to communicate with this instance",
default = Some(java.net.InetAddress.getLocalHost.getHostName))
lazy val leaderMaxIdleTimeMs = opt[Int]("leader_max_idle_time",
descr = "The look-ahead time for scheduling tasks in milliseconds",
default = Some(5000))
lazy val zooKeeperTimeout = opt[Long]("zk_timeout",
descr = "The timeout for ZooKeeper in milliseconds",
default = Some(10000L))
lazy val zooKeeperPath = opt[String]("zk_path",
descr = "Path in ZooKeeper for storing state",
default = Some("/chronos/state"))
lazy val mailServer = opt[String]("mail_server",
descr = "Address of the mailserver in server:port format",
default = None)
lazy val mailUser = opt[String]("mail_user",
descr = "Mail user (for auth)",
default = None)
lazy val mailPassword = opt[String]("mail_password",
descr = "Mail password (for auth)",
default = None)
lazy val mailFrom = opt[String]("mail_from",
descr = "Mail from field",
default = None)
lazy val mailSslOn = opt[Boolean]("mail_ssl",
descr = "Mail SSL",
default = Some(false))
lazy val mailTemplatePath = opt[String]("mail_template_path",
descr = "Path to a Mustache template file (optional)",
default = None)
lazy val mailAsHtml = opt[Boolean]("mail_as_html",
descr = "Send e-mail notifications as HTML",
default = Some(false)
)
lazy val ravenDsn = opt[String]("raven_dsn",
descr = "Raven DSN for connecting to a raven or sentry event service",
default = None)
lazy val slackWebhookUrl = opt[String]("slack_url",
descr = "Webhook URL for posting to Slack",
default = None)
lazy val httpNotificationUrl = opt[String]("http_notification_url",
descr = "Http URL for notifying failures",
default = None)
lazy val httpNotificationCredentials = opt[String]("http_notification_credentials",
descr = "Http notification URL credentials in format username:password",
default = None)
lazy val failureRetryDelayMs = opt[Long]("failure_retry",
descr = "Number of ms between retries",
default = Some(60000))
lazy val disableAfterFailures = opt[Long]("disable_after_failures",
descr = "Disables a job after this many failures have occurred",
default = Some(0))
lazy val mesosTaskMem = opt[Double]("mesos_task_mem",
descr = "Amount of memory to request from Mesos for each task (MB)",
default = Some(128.0))
lazy val mesosTaskCpu = opt[Double]("mesos_task_cpu",
descr = "Number of CPUs to request from Mesos for each task",
default = Some(0.1))
lazy val mesosTaskDisk = opt[Double]("mesos_task_disk",
descr = "Amount of disk capacity to request from Mesos for each task (MB)",
default = Some(256.0))
lazy val mesosCheckpoint = opt[Boolean]("mesos_checkpoint",
descr = "Enable checkpointing in Mesos",
default = Some(true))
lazy val mesosRole = opt[String]("mesos_role",
descr = "The Mesos role to run tasks under",
default = Some("*"))
lazy val taskEpsilon = opt[Int]("task_epsilon",
descr = "The default epsilon value for tasks, in seconds",
default = Some(60))
// Chronos version
lazy val version =
Option(classOf[SchedulerConfiguration].getPackage.getImplementationVersion).getOrElse("unknown")
lazy val mesosFrameworkName = opt[String]("mesos_framework_name",
descr = "The framework name",
default = Some("chronos-" + version))
lazy val webuiUrl = opt[String]("webui_url",
descr = "The http(s) url of the web ui, defaulting to the advertised hostname",
noshort = true,
default = None)
lazy val reconciliationInterval = opt[Int]("reconciliation_interval",
descr = "Reconciliation interval in seconds",
default = Some(600))
lazy val mesosAuthenticationPrincipal = opt[String]("mesos_authentication_principal",
descr = "Mesos Authentication Principal",
noshort = true)
lazy val mesosAuthenticationSecretFile = opt[String]("mesos_authentication_secret_file",
descr = "Mesos Authentication Secret",
noshort = true)
lazy val reviveOffersForNewJobs = opt[Boolean]("revive_offers_for_new_jobs",
descr = "Whether to call reviveOffers for new or changed jobs. (Default: do not use reviveOffers) ",
default = Some(false))
lazy val declineOfferDuration = opt[Long]("decline_offer_duration",
descr = "(Default: Use mesos default of 5 seconds) " +
"The duration (milliseconds) for which to decline offers by default",
default = None)
lazy val minReviveOffersInterval = opt[Long]("min_revive_offers_interval",
descr = "Do not ask for all offers (also already seen ones) more often than this interval (ms). (Default: 5000)",
default = Some(5000))
def zooKeeperHostAddresses: Seq[InetSocketAddress] =
for (s <- zookeeperServers().split(",")) yield {
val splits = s.split(":")
require(splits.length == 2, "expected host:port for zk servers")
new InetSocketAddress(splits(0), splits(1).toInt)
}
def zookeeperServers(): String = {
if (zookeeperServersString().startsWith("zk://")) {
return zookeeperServersString().replace("zk://", "").replaceAll("/.*", "")
}
zookeeperServersString()
}
def zooKeeperStatePath = "%s/state".format(zooKeeperPath())
def zooKeeperCandidatePath = "%s/candidate".format(zooKeeperPath())
}
| Jimdo/chronos | src/main/scala/org/apache/mesos/chronos/scheduler/config/SchedulerConfiguration.scala | Scala | apache-2.0 | 6,739 |
package io.github.ptitjes.scott.utils
/**
* @author Didier Villevalois
*/
class DepthCounter(val maxDepth: Int) {
private var _currentDepth: Int = 0
def reset(): Unit = {
_currentDepth = 0
}
def next(): Unit = {
if (_currentDepth < maxDepth) {
_currentDepth += 1
}
}
def current: Int = {
_currentDepth
}
}
| ptitjes/scott | scott-core/src/main/scala/io/github/ptitjes/scott/utils/DepthCounter.scala | Scala | gpl-3.0 | 331 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scheduler.fsb
import config.WaitingScheduledJobConfig
import javax.inject.{ Inject, Singleton }
import play.api.{ Configuration, Logging }
import play.modules.reactivemongo.ReactiveMongoComponent
import scheduler.BasicJobConfig
import scheduler.clustering.SingleInstanceScheduledJob
import services.application.FsbService
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.{ ExecutionContext, Future }
@Singleton
class EvaluateFsbJobImpl @Inject() (val fsbService: FsbService,
val mongoComponent: ReactiveMongoComponent,
val config: EvaluateFsbJobConfig
) extends EvaluateFsbJob {
}
trait EvaluateFsbJob extends SingleInstanceScheduledJob[BasicJobConfig[WaitingScheduledJobConfig]] with Logging {
val fsbService: FsbService
// val config: EvaluateFsbJobConfig.type = EvaluateFsbJobConfig
def tryExecute()(implicit ec: ExecutionContext): Future[Unit] = {
implicit val hc: HeaderCarrier = HeaderCarrier()
logger.debug(s"EvaluateFsbJob starting")
fsbService.nextFsbCandidateReadyForEvaluation.flatMap { appIdOpt =>
appIdOpt.map { appId =>
logger.debug(s"EvaluateFsbJob found a candidate - now evaluating...")
fsbService.evaluateFsbCandidate(appId)
}.getOrElse {
logger.debug(s"EvaluateFsbJob no candidates found - going back to sleep...")
Future.successful(())
}
}
}
}
@Singleton
class EvaluateFsbJobConfig @Inject() (config: Configuration) extends BasicJobConfig[WaitingScheduledJobConfig](
config = config,
configPrefix = "scheduling.evaluate-fsb-job",
name = "EvaluateFsbJob"
)
| hmrc/fset-faststream | app/scheduler/fsb/EvaluateFsbJob.scala | Scala | apache-2.0 | 2,297 |
package com.github.mdr.mash.ns.git
import java.nio.file.Path
import com.github.mdr.mash.evaluator.EvaluatorException
import com.github.mdr.mash.os.linux.LinuxFileSystem
import org.eclipse.jgit.api.Git
import org.eclipse.jgit.lib.Repository
import org.eclipse.jgit.storage.file.FileRepositoryBuilder
object GitHelper {
private val filesystem = LinuxFileSystem
def withGit[T](p: Git ⇒ T): T = withRepository(repo ⇒ p(new Git(repo)))
def withRepository[T](p: Repository ⇒ T): T = {
val repo = getRepository
try
p(repo)
finally
repo.close()
}
def isRepository(path: Path): Boolean = {
val builder = new FileRepositoryBuilder
builder.readEnvironment()
builder.findGitDir(path.toFile)
builder.setMustExist(true)
builder.getGitDir != null
}
private def getRepository = {
val builder = new FileRepositoryBuilder
builder.readEnvironment()
builder.findGitDir(filesystem.pwd.toFile)
builder.setMustExist(true)
if (builder.getGitDir == null)
throw EvaluatorException("Not a git repository (or any of the parent directories)")
else
builder.build()
}
} | mdr/mash | src/main/scala/com/github/mdr/mash/ns/git/GitHelper.scala | Scala | mit | 1,149 |
package uk.co.morleydev.zander.client.model.arg
class Project(val value : String) {
require(value.size <= 20
&& value.size >= 1
&& value != "."
&& value.forall(c => ('A' to 'Z').contains(c)
|| ('a' to 'z').contains(c)
|| ('0' to '9').contains(c)
|| c == '-'
|| c == '_'
|| c == '.'), "Should be alpha numeric or contain -_. but was " + value)
override def equals(other : Any) : Boolean = {
other match {
case project: Project => value.equals(project.value)
case _ => false
}
}
override def toString = value.toString
}
| MorleyDev/zander.client | src/main/scala/uk/co/morleydev/zander/client/model/arg/Project.scala | Scala | mit | 591 |
package mesosphere.marathon.core.task.tracker.impl
import akka.actor.SupervisorStrategy.Escalate
import akka.actor._
import akka.event.LoggingReceive
import com.twitter.util.NonFatal
import mesosphere.marathon.core.appinfo.TaskCounts
import mesosphere.marathon.core.task.bus.TaskChangeObservables.TaskChanged
import mesosphere.marathon.core.task.{ TaskStateChange, TaskStateOp, Task }
import mesosphere.marathon.core.task.tracker.{ TaskTrackerUpdateStepProcessor, TaskTracker }
import mesosphere.marathon.core.task.tracker.impl.TaskTrackerActor.ForwardTaskOp
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.metrics.Metrics.AtomicIntGauge
import mesosphere.marathon.state.{ PathId, Timestamp }
import org.slf4j.LoggerFactory
object TaskTrackerActor {
def props(
metrics: ActorMetrics,
taskLoader: TaskLoader,
updateStepProcessor: TaskTrackerUpdateStepProcessor,
taskUpdaterProps: ActorRef => Props): Props = {
Props(new TaskTrackerActor(metrics, taskLoader, updateStepProcessor, taskUpdaterProps))
}
/** Query the current [[TaskTracker.AppTasks]] from the [[TaskTrackerActor]]. */
private[impl] case object List
/** Forward an update operation to the child [[TaskUpdateActor]]. */
private[impl] case class ForwardTaskOp(deadline: Timestamp, taskId: Task.Id, taskStateOp: TaskStateOp)
/** Describes where and what to send after an update event has been processed by the [[TaskTrackerActor]]. */
private[impl] case class Ack(initiator: ActorRef, stateChange: TaskStateChange) {
def sendAck(): Unit = {
val msg = stateChange match {
case TaskStateChange.Failure(cause) => Status.Failure(cause)
case _ => stateChange
}
initiator ! msg
}
}
/** Inform the [[TaskTrackerActor]] of a task state change (after persistence). */
private[impl] case class StateChanged(taskChanged: TaskChanged, ack: Ack)
private[tracker] class ActorMetrics(metrics: Metrics) {
val stagedCount = metrics.gauge("service.mesosphere.marathon.task.staged.count", new AtomicIntGauge)
val runningCount = metrics.gauge("service.mesosphere.marathon.task.running.count", new AtomicIntGauge)
def resetMetrics(): Unit = {
stagedCount.setValue(0)
runningCount.setValue(0)
}
}
}
/**
* Holds the current in-memory version of all task state. It gets informed of task state changes
* after they have been persisted.
*
* It also spawns the [[TaskUpdateActor]] as a child and forwards update operations to it.
*/
private class TaskTrackerActor(
metrics: TaskTrackerActor.ActorMetrics,
taskLoader: TaskLoader,
updateStepProcessor: TaskTrackerUpdateStepProcessor,
taskUpdaterProps: ActorRef => Props) extends Actor with Stash {
private[this] val log = LoggerFactory.getLogger(getClass)
private[this] val updaterRef = context.actorOf(taskUpdaterProps(self), "updater")
override val supervisorStrategy = OneForOneStrategy() { case _: Exception => Escalate }
override def preStart(): Unit = {
super.preStart()
log.info(s"${getClass.getSimpleName} is starting. Task loading initiated.")
metrics.resetMetrics()
import akka.pattern.pipe
import context.dispatcher
taskLoader.loadTasks().pipeTo(self)
}
override def postStop(): Unit = {
metrics.resetMetrics()
super.postStop()
}
override def receive: Receive = initializing
private[this] def initializing: Receive = LoggingReceive.withLabel("initializing") {
case appTasks: TaskTracker.TasksByApp =>
log.info("Task loading complete.")
unstashAll()
context.become(withTasks(appTasks, TaskCounts(appTasks.allTasks, healthStatuses = Map.empty)))
case Status.Failure(cause) =>
// escalate this failure
throw new IllegalStateException("while loading tasks", cause)
case stashMe: AnyRef =>
stash()
}
private[this] def withTasks(appTasks: TaskTracker.TasksByApp, counts: TaskCounts): Receive = {
def becomeWithUpdatedApp(appId: PathId)(taskId: Task.Id, newTask: Option[Task]): Unit = {
val updatedAppTasks = newTask match {
case None => appTasks.updateApp(appId)(_.withoutTask(taskId))
case Some(task) => appTasks.updateApp(appId)(_.withTask(task))
}
val updatedCounts = {
val oldTask = appTasks.task(taskId)
// we do ignore health counts
val oldTaskCount = TaskCounts(oldTask, healthStatuses = Map.empty)
val newTaskCount = TaskCounts(newTask, healthStatuses = Map.empty)
counts + newTaskCount - oldTaskCount
}
context.become(withTasks(updatedAppTasks, updatedCounts))
}
// this is run on any state change
metrics.stagedCount.setValue(counts.tasksStaged)
metrics.runningCount.setValue(counts.tasksRunning)
LoggingReceive.withLabel("withTasks") {
case TaskTrackerActor.List =>
sender() ! appTasks
case ForwardTaskOp(deadline, taskId, taskStateOp) =>
val op = TaskOpProcessor.Operation(deadline, sender(), taskId, taskStateOp)
updaterRef.forward(TaskUpdateActor.ProcessTaskOp(op))
case msg @ TaskTrackerActor.StateChanged(change, ack) =>
change.stateChange match {
case TaskStateChange.Update(task, _) =>
becomeWithUpdatedApp(task.runSpecId)(task.taskId, newTask = Some(task))
case TaskStateChange.Expunge(task) =>
becomeWithUpdatedApp(task.runSpecId)(task.taskId, newTask = None)
case _: TaskStateChange.NoChange |
_: TaskStateChange.Failure =>
// ignore, no state change
}
val originalSender = sender()
import context.dispatcher
updateStepProcessor.process(change).recover {
case NonFatal(cause) =>
// since we currently only use ContinueOnErrorSteps, we can simply ignore failures here
//
log.warn("updateStepProcessor.process failed: {}", cause)
}.foreach { _ =>
ack.sendAck()
originalSender ! (())
}
}
}
}
| yp-engineering/marathon | src/main/scala/mesosphere/marathon/core/task/tracker/impl/TaskTrackerActor.scala | Scala | apache-2.0 | 6,049 |
package com.verizon.bda.trapezium.dal.util.zookeeper
import org.apache.curator.RetryPolicy
import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.curator.utils.CloseableUtils
object ZooKeeperClient {
var curatorFramework: CuratorFramework = _
def apply(quorum: String, retryCount: Int = 3, connectionTimeoutInMillis: Int = 2 * 1000,
sessionTimeoutInMillis: Int = 6000): Unit = {
val retryPolicy = new ExponentialBackoffRetry(1000, 10)
curatorFramework = init(quorum, connectionTimeoutInMillis, sessionTimeoutInMillis, retryPolicy)
// curatorFramework.getZookeeperClient.getZooKeeper
}
def create(znode: String): Unit = {
curatorFramework.create().forPath(znode)
}
def setData(znode: String, data: Array[Byte]): Unit = {
if (hasZnodePath(znode)) {
curatorFramework.setData().forPath(znode, data)
} else {
curatorFramework.create.creatingParentsIfNeeded.forPath(znode, data)
}
// curatorFramework.setData().forPath(znode, data)
}
def getData(znode: String): String = {
require(znode != null && znode.nonEmpty, "Key cannot be null or empty")
// val dat = curatorFramework.getData().forPath(znode)
// new String(dat, "UTF-8")
new String(curatorFramework.getData().forPath(znode))
}
def delete(znode: String): Unit = {
curatorFramework.delete().deletingChildrenIfNeeded().forPath(znode)
}
def hasZnodePath(znode: String): Boolean = {
curatorFramework.checkExists().forPath(znode) != null
}
def close(): Unit = {
CloseableUtils.closeQuietly(curatorFramework)
curatorFramework.getZookeeperClient.close()
}
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
@throws[Exception]
def createEphemeral(client: CuratorFramework, path: String, payload: Array[Byte]): Unit = {
// this will create the given EPHEMERAL ZNode with the given data
client.create.creatingParentsIfNeeded.withMode(CreateMode.EPHEMERAL).forPath(path, payload)
}
def init(zkQuorum: String, connectionTimeoutInMillis: Int,
sessionTimeoutInMillis: Int, retryPolicy: RetryPolicy):
CuratorFramework = {
val client = CuratorFrameworkFactory.builder()
.connectString(zkQuorum)
.retryPolicy(retryPolicy)
.connectionTimeoutMs(connectionTimeoutInMillis)
.sessionTimeoutMs(sessionTimeoutInMillis)
.build()
client.start()
client
}
} | Verizon/trapezium | dal/src/main/scala/com/verizon/bda/trapezium/dal/util/zookeeper/ZooKeeperClient.scala | Scala | apache-2.0 | 2,533 |
//
// Reversi - A reversi implementation for the Game Gardens platform
// http://github.com/threerings/game-gardens/blob/master/projects/games/sreversi/LICENSE
package com.samskivert.reversi
import java.awt.{BorderLayout, Color, Font, Polygon}
import java.awt.geom.Ellipse2D
import javax.swing.{BorderFactory, Icon, JButton, JLabel, JPanel}
import com.samskivert.swing.{Controller, GroupLayout, ShapeIcon, MultiLineLabel}
import com.threerings.crowd.client.PlacePanel
import com.threerings.parlor.turn.client.TurnDisplay
import com.threerings.util.MessageBundle
import com.threerings.toybox.client.{ToyBoxUI, ChatPanel}
import com.threerings.toybox.util.ToyBoxContext
/**
* Contains the primary client interface for the game.
*/
class ReversiPanel (ctx :ToyBoxContext, ctrl :ReversiController) extends PlacePanel(ctrl)
{
/** The board view. */
var bview = new ReversiBoardView(ctx, ctrl)
/** Called by the controller when the game has started. */
def gameDidStart (revobj :ReversiObject) {
val lips = new Ellipse2D.Float(0, 0, 12, 12)
_turnDisplay.setPlayerIcons(Array(
new ShapeIcon(lips, colorForColor(revobj.getColor(0)), null),
new ShapeIcon(lips, colorForColor(revobj.getColor(1)), null)))
}
protected def colorForColor (color :Reversi.Color) = color match {
case Reversi.Black => Color.black
case Reversi.White => Color.white
case _ => Color.red
}
protected val _turnDisplay = new TurnDisplay
/* ctor */ {
// this is used to look up localized strings
val msgs = ctx.getMessageManager().getBundle("reversi")
// give ourselves a wee bit of a border
setBorder(BorderFactory.createEmptyBorder(5, 5, 5, 5))
setLayout(new BorderLayout)
// give ourself a soothing blue background
setBackground(new Color(0x6699CC))
// create a container that will hold our board view in its center
val box = GroupLayout.makeHBox
// create and add our board view
box.add(bview)
box.setOpaque(false)
add(box, BorderLayout.CENTER)
// create a side panel to hold our chat and other extra interfaces
val sidePanel = GroupLayout.makeVStretchBox(5)
sidePanel.setOpaque(false)
// add a big fat label
val vlabel = new MultiLineLabel(msgs.get("m.title"))
vlabel.setFont(ToyBoxUI.fancyFont)
sidePanel.add(vlabel, GroupLayout.FIXED)
// add a standard turn display
_turnDisplay.setOpaque(false)
val triangle = new Polygon(Array(0, 12, 0), Array(0, 6, 12), 3)
_turnDisplay.setTurnIcon(new ShapeIcon(triangle, Color.yellow, null))
_turnDisplay.setWinnerText(ctx.xlate("reversi", "m.winner"))
_turnDisplay.setDrawText(ctx.xlate("reversi", "m.draw"))
sidePanel.add(_turnDisplay, GroupLayout.FIXED)
// add a chat box
sidePanel.add(new ChatPanel(ctx))
// add a "back to lobby" button
val back = Controller.createActionButton(msgs.get("m.back_to_lobby"), "backToLobby")
sidePanel.add(back, GroupLayout.FIXED)
// add our side panel to the main display
add(sidePanel, BorderLayout.EAST)
}
}
| house13/CardBox | projects/games/sreversi/src/main/scala/com/samskivert/reversi/ReversiPanel.scala | Scala | lgpl-2.1 | 3,060 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.