code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.streaming.kafka.lib.source
import java.time.Instant
import java.util.Properties
import com.twitter.bijection.Injection
import kafka.common.TopicAndPartition
import org.apache.gearpump.streaming.kafka.KafkaSource
import org.apache.gearpump.streaming.kafka.lib.KafkaMessageDecoder
import org.apache.gearpump.streaming.kafka.lib.source.consumer.FetchThread.FetchThreadFactory
import org.apache.gearpump.streaming.kafka.lib.util.KafkaClient
import KafkaClient.KafkaClientFactory
import org.apache.gearpump.streaming.kafka.lib.source.consumer.{FetchThread, KafkaMessage}
import org.apache.gearpump.streaming.kafka.lib.source.grouper.PartitionGrouper
import org.apache.gearpump.streaming.kafka.lib.util.KafkaClient
import org.apache.gearpump.streaming.kafka.util.KafkaConfig
import org.apache.gearpump.streaming.kafka.util.KafkaConfig.KafkaConfigFactory
import org.apache.gearpump.streaming.task.TaskContext
import org.apache.gearpump.streaming.transaction.api._
import org.apache.gearpump.util.LogUtil
import org.apache.gearpump.Message
import org.apache.gearpump.Time.MilliSeconds
import org.slf4j.Logger
object AbstractKafkaSource {
private val LOG: Logger = LogUtil.getLogger(classOf[KafkaSource])
}
/**
* Contains implementation for Kafka source connectors, users should use
* [[org.apache.gearpump.streaming.kafka.KafkaSource]].
*
* This is a TimeReplayableSource which is able to replay messages from kafka given a start time.
*/
abstract class AbstractKafkaSource(
topic: String,
props: Properties,
kafkaConfigFactory: KafkaConfigFactory,
kafkaClientFactory: KafkaClientFactory,
fetchThreadFactory: FetchThreadFactory)
extends TimeReplayableSource {
import org.apache.gearpump.streaming.kafka.lib.source.AbstractKafkaSource._
def this(topic: String, properties: Properties) = {
this(topic, properties, new KafkaConfigFactory, KafkaClient.factory, FetchThread.factory)
}
private lazy val config: KafkaConfig = kafkaConfigFactory.getKafkaConfig(props)
private lazy val kafkaClient: KafkaClient = kafkaClientFactory.getKafkaClient(config)
private lazy val fetchThread: FetchThread = fetchThreadFactory.getFetchThread(config, kafkaClient)
private lazy val messageDecoder = config.getConfiguredInstance(
KafkaConfig.MESSAGE_DECODER_CLASS_CONFIG, classOf[KafkaMessageDecoder])
private var watermark: Instant = Instant.EPOCH
private var checkpointStoreFactory: Option[CheckpointStoreFactory] = None
private var checkpointStores: Map[TopicAndPartition, CheckpointStore] =
Map.empty[TopicAndPartition, CheckpointStore]
override def setCheckpointStore(checkpointStoreFactory: CheckpointStoreFactory): Unit = {
this.checkpointStoreFactory = Some(checkpointStoreFactory)
}
override def open(context: TaskContext, startTime: Instant): Unit = {
import context.{parallelism, taskId}
LOG.info("KafkaSource opened at start time {}", startTime)
this.watermark = startTime
val topicList = topic.split(",", -1).toList
val grouper = config.getConfiguredInstance(KafkaConfig.PARTITION_GROUPER_CLASS_CONFIG,
classOf[PartitionGrouper])
val topicAndPartitions = grouper.group(parallelism, taskId.index,
kafkaClient.getTopicAndPartitions(topicList))
LOG.info("assigned partitions {}", s"Array(${topicAndPartitions.mkString(",")})")
fetchThread.setTopicAndPartitions(topicAndPartitions)
maybeSetupCheckpointStores(topicAndPartitions)
maybeRecover(startTime.toEpochMilli)
}
/**
* Reads a record from incoming queue, decodes, filters and checkpoints offsets
* before returns a Message. Message can be null if the incoming queue is empty.
*
* @return a [[org.apache.gearpump.Message]] or null
*/
override def read(): Message = {
fetchThread.poll.map(decodeMessageAndCheckpointOffset).orNull
}
override def close(): Unit = {
kafkaClient.close()
checkpointStores.foreach(_._2.close())
LOG.info("KafkaSource closed")
}
override def getWatermark: Instant = watermark
/**
* 1. Decodes raw bytes into Message with timestamp
* 2. Filters message against start time
* 3. Checkpoints (timestamp, kafka_offset)
*/
private def decodeMessageAndCheckpointOffset(kafkaMsg: KafkaMessage): Message = {
val msgAndWmk = messageDecoder.fromBytes(kafkaMsg.key.orNull, kafkaMsg.msg)
LOG.debug("read message and watermark {}", msgAndWmk)
val msg = msgAndWmk.message
this.watermark = msgAndWmk.watermark
val time = msg.timestamp
val offset = kafkaMsg.offset
checkpointOffsets(kafkaMsg.topicAndPartition, time, offset)
LOG.debug("checkpoint message state ({}, {})", time, offset)
msg
}
private def checkpointOffsets(tp: TopicAndPartition, time: Instant, offset: Long): Unit = {
checkpointStores.get(tp).foreach(_.persist(time.toEpochMilli,
Injection[Long, Array[Byte]](offset)))
}
private def maybeSetupCheckpointStores(tps: Array[TopicAndPartition]): Unit = {
for {
f <- checkpointStoreFactory
tp <- tps
} {
val store = f.getCheckpointStore(KafkaConfig.getCheckpointStoreNameSuffix(tp))
LOG.info("created checkpoint store for {}", tp)
checkpointStores += tp -> store
}
}
private def maybeRecover(startTime: MilliSeconds): Unit = {
checkpointStores.foreach { case (tp, store) =>
for {
bytes <- store.recover(startTime)
offset <- Injection.invert[Long, Array[Byte]](bytes).toOption
} {
LOG.info("recovered offset {} for {}", offset, tp)
fetchThread.setStartOffset(tp, offset)
}
}
// let JVM exit when other threads are closed
fetchThread.setDaemon(true)
fetchThread.start()
}
protected def addCheckpointStore(tp: TopicAndPartition, store: CheckpointStore): Unit = {
checkpointStores += tp -> store
}
}
| manuzhang/incubator-gearpump | external/kafka/src/main/scala/org/apache/gearpump/streaming/kafka/lib/source/AbstractKafkaSource.scala | Scala | apache-2.0 | 6,668 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.api.ml.serving
import java.io.File
import akka.http.scaladsl.server.StandardRoute
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.Http
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import org.apache.commons.cli.PosixParser
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
import java.util.HashMap
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import spray.json._
import java.util.concurrent.atomic.LongAdder
import scala.concurrent.{Await, Future}
import scala.math.{max, pow}
import org.apache.sysml.runtime.matrix.data.{MatrixBlock, OutputInfo}
import org.apache.sysml.parser.DataExpression
import org.apache.sysml.runtime.io.IOUtilFunctions
import org.apache.sysml.api.jmlc.Connection
import org.apache.sysml.api.jmlc.PreparedScript
import org.apache.sysml.conf.ConfigurationManager
import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool
import org.apache.sysml.runtime.matrix.MatrixCharacteristics
import org.apache.sysml.runtime.util.DataConverter
import org.apache.commons.logging.Log
import org.apache.commons.logging.LogFactory
import scala.concurrent.ExecutionContext
// format: can be file, binary, csv, ijv, jpeg, ...
case class RequestStatistics(var batchSize: Int = -1,
var execTime: Long = -1,
var execType: String = "",
var requestDeserializationTime: Long = -1,
var responseSerializationTime: Long = -1,
var modelAcquireTime: Long = -1,
var modelReleaseTime: Long = -1,
var batchingTime: Long = -1,
var unbatchingTime: Long = -1,
var queueWaitTime: Long = -1,
var queueSize: Int = -1,
var execMode: Int = 0,
var preprocWaitTime: Long = -1)
case class PredictionRequestExternal(name: String, data: Array[Double], rows: Int, cols: Int)
case class PredictionResponseExternal(response: Array[Double], rows: Int, cols: Int, statistics: RequestStatistics)
case class AddModelRequest(name: String, dml: String, inputVarName: String,
outputVarName: String, weightsDir: String,
latencyObjective: String, batchSize: Array[Int], memUse: Array[Long])
case class Model(name: String,
script: Map[String,PreparedScript],
inputVarName: String,
outputVarName: String,
latencyObjective: Duration,
weightFiles: Map[String, String],
coeffs: (Double, Double),
weightMem: Long)
case class PredictionRequest(data : MatrixBlock, modelName : String, requestSize : Int, receivedTime : Long)
case class PredictionResponse(response: MatrixBlock, batchSize: Int, statistics: RequestStatistics)
case class MatrixBlockContainer(numRows: Long, numCols: Long, nnz: Long, sum: Double, data: MatrixBlock)
trait PredictionJsonProtocol extends SprayJsonSupport with DefaultJsonProtocol {
implicit val RequestStatisticsFormat = jsonFormat13(RequestStatistics)
implicit val predictionRequestExternalFormat = jsonFormat4(PredictionRequestExternal)
implicit val predictionResponseExternalFormat = jsonFormat4(PredictionResponseExternal)
}
trait AddModelJsonProtocol extends SprayJsonSupport with DefaultJsonProtocol {
implicit val AddModelRequetFormat = jsonFormat8(AddModelRequest)
}
class PredictionService {
}
/*
Usage:
1. Compiling a fat jar with maven assembly plugin in our standalone jar created lot of issues.
Hence, for time being, we recommend downloading jar using the below script:
SCALA_VERSION="2.11"
AKKA_HTTP_VERSION="10.1.3"
AKKA_VERSION="2.5.14"
PREFIX="http://central.maven.org/maven2/com/typesafe/akka/"
JARS=""
for PKG in actor stream protobuf
do
PKG_NAME="akka-"$PKG"_"$SCALA_VERSION
JAR_FILE=$PKG_NAME"-"$AKKA_VERSION".jar"
wget $PREFIX$PKG_NAME"/"$AKKA_VERSION"/"$JAR_FILE
JARS=$JARS$JAR_FILE":"
done
for PKG in http http-core parsing
do
PKG_NAME="akka-"$PKG"_"$SCALA_VERSION
JAR_FILE=$PKG_NAME"-"$AKKA_HTTP_VERSION".jar"
wget $PREFIX$PKG_NAME"/"$AKKA_HTTP_VERSION"/"$JAR_FILE
JARS=$JARS$JAR_FILE":"
done
wget http://central.maven.org/maven2/com/typesafe/config/1.3.3/config-1.3.3.jar
wget http://central.maven.org/maven2/com/typesafe/ssl-config-core_2.11/0.2.4/ssl-config-core_2.11-0.2.4.jar
wget http://central.maven.org/maven2/org/reactivestreams/reactive-streams/1.0.2/reactive-streams-1.0.2.jar
wget http://central.maven.org/maven2/org/scala-lang/scala-library/2.11.12/scala-library-2.11.12.jar
wget http://central.maven.org/maven2/org/scala-lang/scala-parser-combinators/2.11.0-M4/scala-parser-combinators-2.11.0-M4.jar
wget http://central.maven.org/maven2/commons-cli/commons-cli/1.4/commons-cli-1.4.jar
wget http://central.maven.org/maven2/com/typesafe/akka/akka-http-spray-json-experimental_2.11/2.4.11.2/akka-http-spray-json-experimental_2.11-2.4.11.2.jar
wget http://central.maven.org/maven2/io/spray/spray-json_2.11/1.3.2/spray-json_2.11-1.3.2.jar
JARS=$JARS"config-1.3.3.jar:ssl-config-core_2.11-0.2.4.jar:reactive-streams-1.0.2.jar:commons-cli-1.4.jar:scala-parser-combinators-2.11.0-M4.jar:scala-library-2.11.12.jar:akka-http-spray-json-experimental_2.11-2.4.11.2.jar:spray-json_2.11-1.3.2.jar"
echo "Include the following jars into the classpath: "$JARS
2. Copy SystemML.jar and systemml-1.2.0-SNAPSHOT-extra.jar into the directory where akka jars are placed
3. Start the server:
java -cp $JARS org.apache.sysml.api.ml.serving.PredictionService -port 9000 -admin_password admin
4. Check the health of the server:
curl -u admin -XGET localhost:9000/health
5. Perform prediction
curl -XPOST -H "Content-Type:application/json" -d '{ "inputs":"1,2,3", "format":"csv", "model":"test", "num_input":1 }' localhost:9000/predict
6. Shutdown the server:
curl -u admin -XGET localhost:9000/shutdown
*/
object PredictionService extends PredictionJsonProtocol with AddModelJsonProtocol {
val __DEBUG__ = false
val LOG = LogFactory.getLog(classOf[PredictionService].getName)
val customConf = ConfigFactory.parseString("""
akka.http.server.idle-timeout=infinite
akka.http.client.idle-timeout=infinite
akka.http.host-connection-pool.idle-timeout=infinite
akka.http.host-connection-pool.client.idle-timeout=infinite
akka.http.server.max-connections=100000
""")
val basicConf = ConfigFactory.load()
val combined = customConf.withFallback(basicConf)
implicit val system = ActorSystem("systemml-prediction-service", ConfigFactory.load(combined))
implicit val materializer = ActorMaterializer()
implicit val executionContext = ExecutionContext.global
implicit val timeout = akka.util.Timeout(300.seconds)
val userPassword = new HashMap[String, String]()
var bindingFuture: Future[Http.ServerBinding] = null
var scheduler: Scheduler = null
val conn = new Connection()
var existantMatrixBlocks = Array[MatrixBlockContainer]()
def getCommandLineOptions(): org.apache.commons.cli.Options = {
val hostOption = new org.apache.commons.cli.Option("ip", true, "IP address")
val portOption = new org.apache.commons.cli.Option("port", true, "Port number")
val numRequestOption = new org.apache.commons.cli.Option("max_requests", true, "Maximum number of requests")
val timeoutOption = new org.apache.commons.cli.Option("timeout", true, "Timeout in milliseconds")
val passwdOption = new org.apache.commons.cli.Option("admin_password", true, "Admin password. Default: admin")
val helpOption = new org.apache.commons.cli.Option("help", false, "Show usage message")
val maxSizeOption = new org.apache.commons.cli.Option("max_bytes", true, "Maximum size of request in bytes")
val statisticsOption = new org.apache.commons.cli.Option("statistics", true, "Gather statistics on request execution")
val numCpuOption = new org.apache.commons.cli.Option("num_cpus", true, "How many CPUs should be allocated to the prediction service. Default nproc-1")
val gpusOption = new org.apache.commons.cli.Option("gpus", true, "GPUs available to this process. Default: 0")
val schedulerOption = new org.apache.commons.cli.Option("scheduler", true, "Scheduler implementation to use. Default: locality-aware")
// Only port is required option
portOption.setRequired(true)
return new org.apache.commons.cli.Options()
.addOption(hostOption).addOption(portOption).addOption(numRequestOption)
.addOption(passwdOption).addOption(timeoutOption).addOption(helpOption)
.addOption(maxSizeOption).addOption(statisticsOption).addOption(numCpuOption)
.addOption(gpusOption).addOption(schedulerOption)
}
def main(args: Array[String]): Unit = {
// Parse commandline variables:
val options = getCommandLineOptions
val line = new PosixParser().parse(getCommandLineOptions, args)
if (line.hasOption("help")) {
new org.apache.commons.cli.HelpFormatter().printHelp("systemml-prediction-service", options)
return
}
userPassword.put("admin", line.getOptionValue("admin_password", "admin"))
val currNumRequests = new LongAdder
val maxNumRequests = if (line.hasOption("max_requests"))
line.getOptionValue("max_requests").toLong else Long.MaxValue
val timeout = if (line.hasOption("timeout"))
Duration(line.getOptionValue("timeout").toLong, MILLISECONDS) else 300.seconds
val sizeDirective = if (line.hasOption("max_bytes"))
withSizeLimit(line.getOptionValue("max_bytes").toLong) else withoutSizeLimit
val numCores = if (line.hasOption("num_cpus"))
line.getOptionValue("num_cpus").toInt else Runtime.getRuntime.availableProcessors() - 1
val gpus = if (line.hasOption("gpus")) line.getOptionValue("gpus") else null
val schedulerType = line.getOptionValue("scheduler", "locality-aware")
// Initialize statistics counters
val numTimeouts = new LongAdder
val numFailures = new LongAdder
val totalTime = new LongAdder
val numCompletedPredictions = new LongAdder
// For now the models need to be loaded every time. TODO: pass the local to serialized models via commandline
var models = Map[String, Model]()
// TODO: Set the scheduler using factory
scheduler = SchedulerFactory.getScheduler(schedulerType)
val maxMemory = Runtime.getRuntime.maxMemory() // total memory is just what the JVM has currently allocated
LOG.info("Total memory allocated to server: " + maxMemory)
scheduler.start(numCores, maxMemory, gpus)
// Define unsecured routes: /predict and /health
val unsecuredRoutes = {
path("predict") {
withoutRequestTimeout {
post {
validate(currNumRequests.longValue() < maxNumRequests, "The prediction server received too many requests. Ignoring the current request.") {
entity(as[PredictionRequestExternal]) { request =>
validate(models.contains(request.name), "The model is not available.") {
try {
currNumRequests.increment()
val start = System.nanoTime()
val processedRequest = processPredictionRequest(request)
val deserializationTime = System.nanoTime() - start
val response = Await.result(
scheduler.enqueue(processedRequest, models(request.name)), timeout)
totalTime.add(System.nanoTime() - start)
numCompletedPredictions.increment()
complete(StatusCodes.OK, processPredictionResponse(response, "NOT IMPLEMENTED", deserializationTime))
} catch {
case e: scala.concurrent.TimeoutException => {
numTimeouts.increment()
complete(StatusCodes.RequestTimeout, "Timeout occured")
}
case e: Exception => {
numFailures.increment()
e.printStackTrace()
val msg = "Exception occured while executing the prediction request:"
complete(StatusCodes.InternalServerError, msg + e.getMessage)
}
} finally {
currNumRequests.decrement()
}
}
}
}
}
}
} ~ path("health") {
get {
val stats = "Number of requests (total/completed/timeout/failures):" + currNumRequests.longValue() + "/" + numCompletedPredictions.longValue() + "/"
numTimeouts.longValue() + "/" + numFailures.longValue() + ".\\n" +
"Average prediction time:" + ((totalTime.doubleValue() * 1e-6) / numCompletedPredictions.longValue()) + " ms.\\n"
complete(StatusCodes.OK, stats)
}
}
}
// For administration: This can be later extended for supporting multiple users.
val securedRoutes = {
authenticateBasicAsync(realm = "secure site", userAuthenticate) {
user =>
path("shutdown") {
get {
shutdownService(user, scheduler)
}
} ~
path("register-model") {
withoutRequestTimeout {
post {
entity(as[AddModelRequest]) { request =>
validate(!models.contains(request.name), "The model is already loaded") {
try {
val weightsInfo = processWeights(request.weightsDir)
val inputs = weightsInfo._1.keys.toArray ++ Array[String](request.inputVarName)
// compile for executor types
val scriptCpu = conn.prepareScript(
request.dml, inputs, Array[String](request.outputVarName))
var scripts = Map("CPU" -> scriptCpu)
if (gpus != null) {
GPUContextPool.AVAILABLE_GPUS = gpus
for (ix <- 0 until GPUContextPool.getAvailableCount) {
LOG.info("Compiling script for GPU: " + ix)
scripts += (s"GPU${ix}" -> conn.prepareScript(
request.dml, inputs, Array[String](request.outputVarName),
true, true, ix))
}
}
// b = cov(x,y) / var(x)
// a = mean(y) - b*mean(x)
val n = max(request.batchSize.length, 1).toDouble
val x = request.batchSize
val y = request.memUse
val mux = x.sum / n
val muy = y.sum / n
val vx = (1 / n) * x.map(v => pow(v - mux, 2.0)).sum
val b = ((1 / n) * (x.map(v => v - mux) zip y.map(v => v - muy)
).map(v => v._1 * v._2).sum) * (1 / vx)
val a = muy - b * mux
// now register the created model
val model = Model(request.name,
scripts,
request.inputVarName,
request.outputVarName,
Duration(request.latencyObjective),
weightsInfo._1, (a, b), weightsInfo._2)
models += (request.name -> model)
scheduler.addModel(model)
complete(StatusCodes.OK)
} catch {
case e: Exception => {
numFailures.increment()
e.printStackTrace()
complete(StatusCodes.InternalServerError,
"Exception occured while trying to add model:" + e.getMessage)
}
}
}
}
}
}
}
}
}
bindingFuture = Http().bindAndHandle(
sizeDirective { // Both secured and unsecured routes need to respect the size restriction
unsecuredRoutes ~ securedRoutes
},
line.getOptionValue("ip", "localhost"), line.getOptionValue("port").toInt)
println(s"Prediction Server online.")
while (true) Thread.sleep(100)
bindingFuture
.flatMap(_.unbind())
.onComplete(_ β system.terminate())
}
def processPredictionResponse(response : PredictionResponse,
format : String,
deserializationTime: Long) : PredictionResponseExternal = {
if (response != null) {
val start = System.nanoTime()
val dataArray = response.response.getDenseBlockValues
val rows = response.response.getNumRows
val cols = response.response.getNumColumns
val serializationTime = System.nanoTime() - start
if (response.statistics != null) {
response.statistics.requestDeserializationTime = deserializationTime
response.statistics.responseSerializationTime = serializationTime
}
PredictionResponseExternal(dataArray, rows, cols, response.statistics)
} else {
PredictionResponseExternal(null, -1, -1, null)
}
}
def processWeights(dirname: String) : (Map[String, String], Long) = {
val dir = new File(dirname)
if (!(dir.exists && dir.isDirectory))
throw new Exception("Weight directory: " + dirname + " is invalid")
val weightsWithSize = dir.listFiles().filter(
x => !(x.isDirectory && (x.toString contains "binary"))).map(_.toString).filter(
x => (x.slice(x.length-3, x.length) != "mtd") &&
!(x contains "_bin.mtx")).
map(x => getNameFromPath(x) -> registerWeight(x, dirname)).toMap
val weightMap = weightsWithSize.map(x => x._1 -> x._2._1)
val totalSize = weightsWithSize.map(x => x._2._2).sum
(weightMap, totalSize)
}
def getNameFromPath(path: String) : String = {
path.split("/").last.split("\\\\.")(0)
}
def registerWeight(path: String, dir: String) : (String, Long) = {
val res = convertToBinaryIfNecessary(path, dir)
scheduler.modelManager.putWeight(res._2, res._1)
(res._2, res._1.getInMemorySize)
}
def convertToBinaryIfNecessary(path: String, dir: String) : (MatrixBlock, String) = {
var pathActual = path
LOG.info("Reading weight: " + path)
val data = conn.readMatrix(path)
if (!isBinaryFormat(path)) {
LOG.info("Converting weight to binary format")
data.getMatrixCharacteristics
val binPath = dir + "/binary/" + getNameFromPath(path) + ".mtx"
DataConverter.writeMatrixToHDFS(data, binPath,
OutputInfo.BinaryBlockOutputInfo,
new MatrixCharacteristics(data.getNumRows, data.getNumColumns, ConfigurationManager.getBlocksize,
ConfigurationManager.getBlocksize, data.getNonZeros))
pathActual = binPath
}
(data, pathActual)
}
def isBinaryFormat(path: String) : Boolean = {
val mtdName = DataExpression.getMTDFileName(path)
val mtd = new DataExpression().readMetadataFile(mtdName, false)
if (mtd.containsKey("format")) mtd.getString("format") == "binary" else false
}
def processPredictionRequest(request : PredictionRequestExternal) : PredictionRequest = {
val mat = new MatrixBlock(request.rows, request.cols, false)
mat.init(request.data, request.rows, request.cols)
PredictionRequest(mat, request.name, request.rows, System.nanoTime())
}
def processMatrixInput(data : String, rows : Int, cols : Int, format : String) : MatrixBlock = {
val result = format match {
case "csv" => processTextInput(data, rows, cols, DataExpression.FORMAT_TYPE_VALUE_CSV)
case _ => throw new Exception("Only CSV Input currently supported")
}
result
}
def processTextInput(data : String, rows : Int, cols : Int, format : String) : MatrixBlock = {
val is = IOUtilFunctions.toInputStream(data)
conn.convertToMatrix(is, rows, cols, format)
}
def userAuthenticate(credentials: akka.http.scaladsl.server.directives.Credentials): Future[Option[String]] = {
credentials match {
case p@akka.http.scaladsl.server.directives.Credentials.Provided(id) =>
Future {
if (userPassword.containsKey(id) && p.verify(userPassword.get(id))) Some(id)
else None
}
case _ => Future.successful(None)
}
}
def shutdownService(user: String, scheduler: Scheduler): StandardRoute = {
if (user.equals("admin")) {
try {
Http().shutdownAllConnectionPools() andThen { case _ => bindingFuture.flatMap(_.unbind()).onComplete(_ β system.terminate()) }
scheduler.shutdown()
complete(StatusCodes.OK, "Shutting down the server.")
} finally {
new Thread(new Runnable {
def run() {
Thread.sleep(100) // wait for 100ms to send reply and then kill the prediction JVM so that we don't wait scala.io.StdIn.readLine()
System.exit(0)
}
}).start();
}
}
else {
complete(StatusCodes.BadRequest, "Only admin can shutdown the service.")
}
}
} | niketanpansare/systemml | src/main/scala/org/apache/sysml/api/ml/serving/PredictionService.scala | Scala | apache-2.0 | 25,096 |
package org.scalajs.testsuite.javalib.time
import java.time._
import java.time.temporal._
import org.junit.Test
import org.junit.Assert._
import org.scalajs.testsuite.utils.AssertThrows._
class LocalTimeTest extends TemporalTest[LocalTime] {
import DateTimeTestUtil._
import LocalTime._
import ChronoUnit._
import ChronoField._
val samples = Seq(MAX, NOON, MIN)
def isSupported(unit: ChronoUnit): Boolean = unit.isTimeBased
def isSupported(field: ChronoField): Boolean = field.isTimeBased
@Test def test_getLong(): Unit = {
assertEquals(0L, MIN.getLong(NANO_OF_SECOND))
assertEquals(0L, MIN.getLong(NANO_OF_DAY))
assertEquals(0L, MIN.getLong(MICRO_OF_SECOND))
assertEquals(0L, MIN.getLong(MICRO_OF_DAY))
assertEquals(0L, MIN.getLong(MILLI_OF_SECOND))
assertEquals(0L, MIN.getLong(MILLI_OF_DAY))
assertEquals(0L, MIN.getLong(SECOND_OF_MINUTE))
assertEquals(0L, MIN.getLong(SECOND_OF_DAY))
assertEquals(0L, MIN.getLong(MINUTE_OF_HOUR))
assertEquals(0L, MIN.getLong(MINUTE_OF_DAY))
assertEquals(0L, MIN.getLong(HOUR_OF_AMPM))
assertEquals(12L, MIN.getLong(CLOCK_HOUR_OF_AMPM))
assertEquals(0L, MIN.getLong(HOUR_OF_DAY))
assertEquals(24L, MIN.getLong(CLOCK_HOUR_OF_DAY))
assertEquals(0L, MIN.getLong(AMPM_OF_DAY))
assertEquals(999999999L, MAX.getLong(NANO_OF_SECOND))
assertEquals(86399999999999L, MAX.getLong(NANO_OF_DAY))
assertEquals(999999L, MAX.getLong(MICRO_OF_SECOND))
assertEquals(86399999999L, MAX.getLong(MICRO_OF_DAY))
assertEquals(999L, MAX.getLong(MILLI_OF_SECOND))
assertEquals(86399999L, MAX.getLong(MILLI_OF_DAY))
assertEquals(59L, MAX.getLong(SECOND_OF_MINUTE))
assertEquals(86399L, MAX.getLong(SECOND_OF_DAY))
assertEquals(59L, MAX.getLong(MINUTE_OF_HOUR))
assertEquals(1439L, MAX.getLong(MINUTE_OF_DAY))
assertEquals(11L, MAX.getLong(HOUR_OF_AMPM))
assertEquals(11L, MAX.getLong(CLOCK_HOUR_OF_AMPM))
assertEquals(23L, MAX.getLong(HOUR_OF_DAY))
assertEquals(23L, MAX.getLong(CLOCK_HOUR_OF_DAY))
assertEquals(1L, MAX.getLong(AMPM_OF_DAY))
}
@Test def test_getHour(): Unit = {
assertEquals(0, MIN.getHour)
assertEquals(12, NOON.getHour)
assertEquals(23, MAX.getHour)
}
@Test def test_getMinute(): Unit = {
assertEquals(0, MIN.getMinute)
assertEquals(30, of(0, 30).getMinute)
assertEquals(59, MAX.getMinute)
}
@Test def test_getSecond(): Unit = {
assertEquals(0, MIN.getSecond)
assertEquals(30, of(0, 0, 30).getSecond)
assertEquals(59, MAX.getSecond)
}
@Test def test_getNano(): Unit = {
assertEquals(0, MIN.getNano)
assertEquals(999999999, MAX.getNano)
}
@Test def test_with(): Unit = {
for (t <- samples) {
for (n <- Seq(0, 999, 999999, 999999999))
testDateTime(t.`with`(NANO_OF_SECOND, n))(t.withNano(n))
for (n <- Seq(0L, 1000000000L, 86399999999999L))
testDateTime(t.`with`(NANO_OF_DAY, n))(ofNanoOfDay(n))
for (n <- Seq(0, 999, 999999))
testDateTime(t.`with`(MICRO_OF_SECOND, n))(t.withNano(n * 1000))
for (n <- Seq(0L, 1000000L, 86399999999L))
testDateTime(t.`with`(MICRO_OF_DAY, n))(ofNanoOfDay(n * 1000))
for (n <- Seq(0, 500, 999))
testDateTime(t.`with`(MILLI_OF_SECOND, n))(t.withNano(n * 1000000))
for (n <- Seq(0L, 1000L, 86399999L))
testDateTime(t.`with`(MILLI_OF_DAY, n))(ofNanoOfDay(n * 1000000))
for (n <- Seq(0, 30, 59))
testDateTime(t.`with`(SECOND_OF_MINUTE, n))(t.withSecond(n))
for (n <- Seq(0, 60, 86399))
testDateTime(t.`with`(SECOND_OF_DAY, n))(ofSecondOfDay(n).withNano(t.getNano))
for (n <- Seq(0, 30, 59))
testDateTime(t.`with`(MINUTE_OF_HOUR, n))(t.withMinute(n))
for (n <- Seq(0, 60, 1439)) {
testDateTime(t.`with`(MINUTE_OF_DAY, n)) {
ofSecondOfDay(n * 60).withSecond(t.getSecond).withNano(t.getNano)
}
}
for (n <- Seq(0, 6, 11)) {
val h = (t.getHour / 12) * 12 + n
testDateTime(t.`with`(HOUR_OF_AMPM, n))(t.withHour(h))
}
for (n <- Seq(1, 6, 12)) {
val h = (t.getHour / 12) * 12 + (n % 12)
testDateTime(t.`with`(CLOCK_HOUR_OF_AMPM, n))(t.withHour(h))
}
for (n <- Seq(0, 12, 23))
testDateTime(t.`with`(HOUR_OF_DAY, n))(t.withHour(n))
for (n <- Seq(1, 12, 24))
testDateTime(t.`with`(CLOCK_HOUR_OF_DAY, n))(t.withHour(n % 24))
for (n <- Seq(0, 1)) {
val h = t.getHour % 12 + n * 12
testDateTime(t.`with`(AMPM_OF_DAY, n))(t.withHour(h))
}
for (n <- Seq(Long.MinValue, -1L, 1000000000L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(NANO_OF_SECOND, n))
for (n <- Seq(Long.MinValue, -1L, 86400000000000L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(NANO_OF_DAY, n))
for (n <- Seq(Long.MinValue, -1L, 1000000L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(MICRO_OF_SECOND, n))
for (n <- Seq(Long.MinValue, -1L, 86400000000L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(MICRO_OF_DAY, n))
for (n <- Seq(Long.MinValue, -1L, 1000L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(MILLI_OF_SECOND, n))
for (n <- Seq(Long.MinValue, -1L, 86400000L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(MILLI_OF_DAY, n))
for (n <- Seq(Long.MinValue, -1L, 60L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(SECOND_OF_MINUTE, n))
for (n <- Seq(Long.MinValue, -1L, 86400L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(SECOND_OF_DAY, n))
for (n <- Seq(Long.MinValue, -1L, 60L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(MINUTE_OF_HOUR, n))
for (n <- Seq(Long.MinValue, -1L, 1440L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(MINUTE_OF_DAY, n))
for (n <- Seq(Long.MinValue, -1L, 12L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(HOUR_OF_AMPM, n))
for (n <- Seq(Long.MinValue, 0L, 13L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(CLOCK_HOUR_OF_AMPM, n))
for (n <- Seq(Long.MinValue, -1L, 24L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(HOUR_OF_DAY, n))
for (n <- Seq(Long.MinValue, 0L, 25L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(CLOCK_HOUR_OF_DAY, n))
for (n <- Seq(Long.MinValue, -1L, 2L, Long.MaxValue))
expectThrows(classOf[DateTimeException], t.`with`(AMPM_OF_DAY, n))
}
}
@Test def test_withHour(): Unit = {
testDateTime(MIN.withHour(0))(MIN)
testDateTime(MIN.withHour(12))(NOON)
testDateTime(MIN.withHour(23))(of(23, 0))
testDateTime(MAX.withHour(0))(of(0, 59, 59, 999999999))
testDateTime(MAX.withHour(23))(MAX)
for (t <- samples) {
expectThrows(classOf[DateTimeException], t.withHour(Int.MinValue))
expectThrows(classOf[DateTimeException], t.withHour(-1))
expectThrows(classOf[DateTimeException], t.withHour(24))
expectThrows(classOf[DateTimeException], t.withHour(Int.MaxValue))
}
}
@Test def test_withMinute(): Unit = {
testDateTime(MIN.withMinute(0))(MIN)
testDateTime(MIN.withMinute(30))(of(0, 30))
testDateTime(MIN.withMinute(59))(of(0, 59))
testDateTime(MAX.withMinute(0))(of(23, 0, 59, 999999999))
testDateTime(MAX.withMinute(59))(MAX)
for (t <- samples) {
expectThrows(classOf[DateTimeException], t.withMinute(Int.MinValue))
expectThrows(classOf[DateTimeException], t.withMinute(-1))
expectThrows(classOf[DateTimeException], t.withMinute(60))
expectThrows(classOf[DateTimeException], t.withMinute(Int.MaxValue))
}
}
@Test def test_withSecond(): Unit = {
testDateTime(MIN.withSecond(0))(MIN)
testDateTime(MIN.withSecond(30))(of(0, 0, 30))
testDateTime(MIN.withSecond(59))(of(0, 0, 59))
testDateTime(MAX.withSecond(0))(of(23, 59, 0, 999999999))
testDateTime(MAX.withSecond(59))(MAX)
for (t <- samples) {
expectThrows(classOf[DateTimeException], t.withSecond(Int.MinValue))
expectThrows(classOf[DateTimeException], t.withSecond(-1))
expectThrows(classOf[DateTimeException], t.withSecond(60))
expectThrows(classOf[DateTimeException], t.withSecond(Int.MaxValue))
}
}
@Test def test_withNano(): Unit = {
testDateTime(MIN.withNano(0))(MIN)
testDateTime(MIN.withNano(500000000))(of(0, 0, 0, 500000000))
testDateTime(MIN.withNano(999999999))(of(0, 0, 0, 999999999))
testDateTime(MAX.withNano(0))(of(23, 59, 59, 0))
testDateTime(MAX.withNano(999999999))(MAX)
for (t <- samples) {
expectThrows(classOf[DateTimeException], t.withNano(Int.MinValue))
expectThrows(classOf[DateTimeException], t.withNano(-1))
expectThrows(classOf[DateTimeException], t.withNano(1000000000))
expectThrows(classOf[DateTimeException], t.withNano(Int.MaxValue))
}
}
@Test def test_truncatedTo(): Unit = {
testDateTime(MIN.truncatedTo(NANOS))(MIN)
testDateTime(MAX.truncatedTo(NANOS))(MAX)
testDateTime(MIN.truncatedTo(MICROS))(MIN)
testDateTime(MAX.truncatedTo(MICROS))(of(23, 59, 59, 999999000))
testDateTime(MIN.truncatedTo(MILLIS))(MIN)
testDateTime(MAX.truncatedTo(MILLIS))(of(23, 59, 59, 999000000))
testDateTime(MIN.truncatedTo(SECONDS))(MIN)
testDateTime(MAX.truncatedTo(SECONDS))(of(23, 59, 59))
testDateTime(MIN.truncatedTo(MINUTES))(MIN)
testDateTime(MAX.truncatedTo(MINUTES))(of(23, 59))
testDateTime(MIN.truncatedTo(HOURS))(MIN)
testDateTime(MAX.truncatedTo(HOURS))(of(23, 0))
testDateTime(MIN.truncatedTo(HALF_DAYS))(MIN)
testDateTime(MAX.truncatedTo(HALF_DAYS))(of(12, 0))
testDateTime(MIN.truncatedTo(DAYS))(MIN)
testDateTime(MAX.truncatedTo(DAYS))(MIN)
val illegalUnits = dateBasedUnits.filter(_ != DAYS)
for {
t <- samples
u <- illegalUnits
} {
expectThrows(classOf[UnsupportedTemporalTypeException], t.truncatedTo(u))
}
}
@Test def test_plus(): Unit = {
val values = Seq(Long.MinValue, -1000000000L, -86400L, -3600L, -60L, -1L, 0L,
1L, 60L, 3600L, 86400L, 1000000000L, Long.MaxValue)
for {
t <- samples
n <- values
} {
testDateTime(t.plus(n, NANOS))(t.plusNanos(n))
testDateTime(t.plus(n, MICROS))(t.plusNanos((n % 86400000000L) * 1000))
testDateTime(t.plus(n, MILLIS))(t.plusNanos((n % 86400000) * 1000000))
testDateTime(t.plus(n, SECONDS))(t.plusSeconds(n))
testDateTime(t.plus(n, MINUTES))(t.plusMinutes(n))
testDateTime(t.plus(n, HOURS))(t.plusHours(n))
testDateTime(t.plus(n, HALF_DAYS))(t.plusHours((n % 2) * 12))
}
}
@Test def test_plusHours(): Unit = {
testDateTime(MIN.plusHours(Long.MinValue))(of(16, 0))
testDateTime(MIN.plusHours(-24))(MIN)
testDateTime(MIN.plusHours(-1))(of(23, 0))
testDateTime(MIN.plusHours(0))(MIN)
testDateTime(MIN.plusHours(1))(of(1, 0))
testDateTime(MIN.plusHours(24))(MIN)
testDateTime(MIN.plusHours(Long.MaxValue))(of(7, 0))
testDateTime(MAX.plusHours(Long.MinValue))(of(15, 59, 59, 999999999))
testDateTime(MAX.plusHours(-24))(MAX)
testDateTime(MAX.plusHours(-1))(of(22, 59, 59, 999999999))
testDateTime(MAX.plusHours(0))(MAX)
testDateTime(MAX.plusHours(1))(of(0, 59, 59, 999999999))
testDateTime(MAX.plusHours(24))(MAX)
testDateTime(MAX.plusHours(Long.MaxValue))(of(6, 59, 59, 999999999))
}
@Test def test_plusMinutes(): Unit = {
testDateTime(MIN.plusMinutes(Long.MinValue))(of(5, 52))
testDateTime(MIN.plusMinutes(-1440))(MIN)
testDateTime(MIN.plusMinutes(-60))(of(23, 0))
testDateTime(MIN.plusMinutes(-1))(of(23, 59))
testDateTime(MIN.plusMinutes(0))(MIN)
testDateTime(MIN.plusMinutes(1))(of(0, 1))
testDateTime(MIN.plusMinutes(60))(of(1, 0))
testDateTime(MIN.plusMinutes(1440))(MIN)
testDateTime(MIN.plusMinutes(Long.MaxValue))(of(18, 7))
testDateTime(MAX.plusMinutes(Long.MinValue))(of(5, 51, 59, 999999999))
testDateTime(MAX.plusMinutes(-1440))(MAX)
testDateTime(MAX.plusMinutes(-60))(of(22, 59, 59, 999999999))
testDateTime(MAX.plusMinutes(-1))(of(23, 58, 59, 999999999))
testDateTime(MAX.plusMinutes(0))(MAX)
testDateTime(MAX.plusMinutes(1))(of(0, 0, 59, 999999999))
testDateTime(MAX.plusMinutes(60))(of(0, 59, 59, 999999999))
testDateTime(MAX.plusMinutes(1440))(MAX)
testDateTime(MAX.plusMinutes(Long.MaxValue))(of(18, 6, 59, 999999999))
}
@Test def test_plusSeconds(): Unit = {
testDateTime(MIN.plusSeconds(Long.MinValue))(of(8, 29, 52))
testDateTime(MIN.plusSeconds(-86400))(MIN)
testDateTime(MIN.plusSeconds(-60))(of(23, 59))
testDateTime(MIN.plusSeconds(-1))(of(23, 59, 59))
testDateTime(MIN.plusSeconds(0))(MIN)
testDateTime(MIN.plusSeconds(1))(of(0, 0, 1))
testDateTime(MIN.plusSeconds(60))(of(0, 1))
testDateTime(MIN.plusSeconds(86400))(MIN)
testDateTime(MIN.plusSeconds(Long.MaxValue))(of(15, 30, 7))
testDateTime(MAX.plusSeconds(Long.MinValue))(of(8, 29, 51, 999999999))
testDateTime(MAX.plusSeconds(-86400))(MAX)
testDateTime(MAX.plusSeconds(-60))(of(23, 58, 59, 999999999))
testDateTime(MAX.plusSeconds(-1))(of(23, 59, 58, 999999999))
testDateTime(MAX.plusSeconds(0))(MAX)
testDateTime(MAX.plusSeconds(1))(of(0, 0, 0, 999999999))
testDateTime(MAX.plusSeconds(60))(of(0, 0, 59, 999999999))
testDateTime(MAX.plusSeconds(86400))(MAX)
testDateTime(MAX.plusSeconds(Long.MaxValue))(of(15, 30, 6, 999999999))
}
@Test def test_plusNanos(): Unit = {
testDateTime(MIN.plusNanos(Long.MinValue))(of(0, 12, 43, 145224192))
testDateTime(MIN.plusNanos(-86400000000000L))(MIN)
testDateTime(MIN.plusNanos(-1000000000))(of(23, 59, 59))
testDateTime(MIN.plusNanos(-1))(MAX)
testDateTime(MIN.plusNanos(0))(MIN)
testDateTime(MIN.plusNanos(1))(of(0, 0, 0, 1))
testDateTime(MIN.plusNanos(1000000000))(of(0, 0, 1))
testDateTime(MIN.plusNanos(86400000000000L))(MIN)
testDateTime(MIN.plusNanos(Long.MaxValue))(of(23, 47, 16, 854775807))
testDateTime(MAX.plusNanos(Long.MinValue))(of(0, 12, 43, 145224191))
testDateTime(MAX.plusNanos(-86400000000000L))(MAX)
testDateTime(MAX.plusNanos(-1000000000))(of(23, 59, 58, 999999999))
testDateTime(MAX.plusNanos(-1))(of(23, 59, 59, 999999998))
testDateTime(MAX.plusNanos(0))(MAX)
testDateTime(MAX.plusNanos(1))(MIN)
testDateTime(MAX.plusNanos(1000000000))(of(0, 0, 0, 999999999))
testDateTime(MAX.plusNanos(86400000000000L))(MAX)
testDateTime(MAX.plusNanos(Long.MaxValue))(of(23, 47, 16, 854775806))
}
@Test def test_minusHours(): Unit = {
testDateTime(MIN.minusHours(Long.MinValue))(of(8, 0))
testDateTime(MIN.minusHours(-24))(MIN)
testDateTime(MIN.minusHours(-1))(of(1, 0))
testDateTime(MIN.minusHours(0))(MIN)
testDateTime(MIN.minusHours(1))(of(23, 0))
testDateTime(MIN.minusHours(24))(MIN)
testDateTime(MIN.minusHours(Long.MaxValue))(of(17, 0))
testDateTime(MAX.minusHours(Long.MinValue))(of(7, 59, 59, 999999999))
testDateTime(MAX.minusHours(-24))(MAX)
testDateTime(MAX.minusHours(-1))(of(0, 59, 59, 999999999))
testDateTime(MAX.minusHours(0))(MAX)
testDateTime(MAX.minusHours(1))(of(22, 59, 59, 999999999))
testDateTime(MAX.minusHours(24))(MAX)
testDateTime(MAX.minusHours(Long.MaxValue))(of(16, 59, 59, 999999999))
}
@Test def test_minusMinutes(): Unit = {
testDateTime(MIN.minusMinutes(Long.MinValue))(of(18, 8))
testDateTime(MIN.minusMinutes(-1440))(MIN)
testDateTime(MIN.minusMinutes(-60))(of(1, 0))
testDateTime(MIN.minusMinutes(-1))(of(0, 1))
testDateTime(MIN.minusMinutes(0))(MIN)
testDateTime(MIN.minusMinutes(1))(of(23, 59))
testDateTime(MIN.minusMinutes(60))(of(23, 0))
testDateTime(MIN.minusMinutes(1440))(MIN)
testDateTime(MIN.minusMinutes(Long.MaxValue))(of(5, 53))
testDateTime(MAX.minusMinutes(Long.MinValue))(of(18, 7, 59, 999999999))
testDateTime(MAX.minusMinutes(-1440))(MAX)
testDateTime(MAX.minusMinutes(-60))(of(0, 59, 59, 999999999))
testDateTime(MAX.minusMinutes(-1))(of(0, 0, 59, 999999999))
testDateTime(MAX.minusMinutes(0))(MAX)
testDateTime(MAX.minusMinutes(1))(of(23, 58, 59, 999999999))
testDateTime(MAX.minusMinutes(60))(of(22, 59, 59, 999999999))
testDateTime(MAX.minusMinutes(1440))(MAX)
testDateTime(MAX.minusMinutes(Long.MaxValue))(of(5, 52, 59, 999999999))
}
@Test def test_minusSeconds(): Unit = {
testDateTime(MIN.minusSeconds(Long.MinValue))(of(15, 30, 8))
testDateTime(MIN.minusSeconds(-86400))(MIN)
testDateTime(MIN.minusSeconds(-60))(of(0, 1))
testDateTime(MIN.minusSeconds(-1))(of(0, 0, 1))
testDateTime(MIN.minusSeconds(0))(MIN)
testDateTime(MIN.minusSeconds(1))(of(23, 59, 59))
testDateTime(MIN.minusSeconds(60))(of(23, 59))
testDateTime(MIN.minusSeconds(86400))(MIN)
testDateTime(MIN.minusSeconds(Long.MaxValue))(of(8, 29, 53))
testDateTime(MAX.minusSeconds(Long.MinValue))(of(15, 30, 7, 999999999))
testDateTime(MAX.minusSeconds(-86400))(MAX)
testDateTime(MAX.minusSeconds(-60))(of(0, 0, 59, 999999999))
testDateTime(MAX.minusSeconds(-1))(of(0, 0, 0, 999999999))
testDateTime(MAX.minusSeconds(0))(MAX)
testDateTime(MAX.minusSeconds(1))(of(23, 59, 58, 999999999))
testDateTime(MAX.minusSeconds(60))(of(23, 58, 59, 999999999))
testDateTime(MAX.minusSeconds(86400))(MAX)
testDateTime(MAX.minusSeconds(Long.MaxValue))(of(8, 29, 52, 999999999))
}
@Test def test_minusNanos(): Unit = {
testDateTime(MIN.minusNanos(Long.MinValue))(of(23, 47, 16, 854775808))
testDateTime(MIN.minusNanos(-86400000000000L))(MIN)
testDateTime(MIN.minusNanos(-1000000000))(of(0, 0, 1))
testDateTime(MIN.minusNanos(-1))(of(0, 0, 0, 1))
testDateTime(MIN.minusNanos(0))(MIN)
testDateTime(MIN.minusNanos(1))(MAX)
testDateTime(MIN.minusNanos(1000000000))(of(23, 59, 59))
testDateTime(MIN.minusNanos(86400000000000L))(MIN)
testDateTime(MIN.minusNanos(Long.MaxValue))(of(0, 12, 43, 145224193))
testDateTime(MAX.minusNanos(Long.MinValue))(of(23, 47, 16, 854775807))
testDateTime(MAX.minusNanos(-86400000000000L))(MAX)
testDateTime(MAX.minusNanos(-1000000000))(of(0, 0, 0, 999999999))
testDateTime(MAX.minusNanos(-1))(MIN)
testDateTime(MAX.minusNanos(0))(MAX)
testDateTime(MAX.minusNanos(1))(of(23, 59, 59, 999999998))
testDateTime(MAX.minusNanos(1000000000))(of(23, 59, 58, 999999999))
testDateTime(MAX.minusNanos(86400000000000L))(MAX)
testDateTime(MAX.minusNanos(Long.MaxValue))(of(0, 12, 43, 145224192))
}
@Test def test_adjustInto(): Unit = {
for {
t1 <- samples
t2 <- samples
} {
testDateTime(t1.adjustInto(t2))(t1)
}
val ds = Seq(LocalDate.MIN, LocalDate.MAX)
for {
t <- samples
d <- ds
} {
expectThrows(classOf[DateTimeException], t.adjustInto(d))
}
}
@Test def test_until(): Unit = {
assertEquals(86399999999999L, MIN.until(MAX, NANOS))
assertEquals(86399999999L, MIN.until(MAX, MICROS))
assertEquals(86399999L, MIN.until(MAX, MILLIS))
assertEquals(86399L, MIN.until(MAX, SECONDS) )
assertEquals(1439L, MIN.until(MAX, MINUTES))
assertEquals(23L, MIN.until(MAX, HOURS))
assertEquals(1L, MIN.until(MAX, HALF_DAYS))
for (u <- timeBasedUnits) {
assertEquals(-MIN.until(MAX, u), MAX.until(MIN, u))
assertEquals(0L, MIN.until(MIN, u))
assertEquals(0L, MAX.until(MAX, u))
}
}
@Test def test_toSecondOfDay(): Unit = {
assertEquals(0, MIN.toSecondOfDay)
assertEquals(86399, MAX.toSecondOfDay)
}
@Test def test_toNanoOfDay(): Unit = {
assertEquals(0L, MIN.toNanoOfDay)
assertEquals(86399999999999L, MAX.toNanoOfDay)
}
@Test def test_compareTo(): Unit = {
assertEquals(0, MIN.compareTo(MIN))
assertTrue(MIN.compareTo(MAX) < 0)
assertTrue(MAX.compareTo(MIN) > 0)
assertEquals(0, MAX.compareTo(MAX))
}
@Test def test_isAfter(): Unit = {
assertFalse(MIN.isAfter(MIN))
assertFalse(MIN.isAfter(MAX))
assertTrue(MAX.isAfter(MIN))
assertFalse(MAX.isAfter(MAX))
}
@Test def test_isBefore(): Unit = {
assertFalse(MIN.isBefore(MIN))
assertTrue(MIN.isBefore(MAX))
assertFalse(MAX.isBefore(MIN))
assertFalse(MAX.isBefore(MAX))
}
@Test def test_toString(): Unit = {
assertEquals("00:00", MIN.toString)
assertEquals("23:59:59.999999999", MAX.toString)
assertEquals("01:01", of(1, 1).toString)
assertEquals("01:01:01", of(1, 1, 1).toString)
assertEquals("01:01:01.000000001", of(1, 1, 1, 1).toString)
assertEquals("01:01:01.100", of(1, 1, 1, 100000000).toString)
assertEquals("01:01:01.100100", of(1, 1, 1, 100100000).toString)
assertEquals("01:01:01.100100100", of(1, 1, 1, 100100100).toString)
}
@Test def test_now(): Unit = {
assertNotNull(now())
}
@Test def test_of(): Unit = {
testDateTime(of(0, 0))(MIN)
testDateTime(of(0, 0, 0))(MIN)
testDateTime(of(0, 0, 0, 0))(MIN)
testDateTime(of(23, 59))(of(23, 59, 0, 0))
testDateTime(of(23, 59, 59))(of(23, 59, 59, 0))
testDateTime(of(23, 59, 59, 999999999))(MAX)
expectThrows(classOf[DateTimeException], of(-1, 0))
expectThrows(classOf[DateTimeException], of(0, -1))
expectThrows(classOf[DateTimeException], of(0, 0, -1))
expectThrows(classOf[DateTimeException], of(0, 0, 0, -1))
expectThrows(classOf[DateTimeException], of(24, 0))
expectThrows(classOf[DateTimeException], of(0, 60))
expectThrows(classOf[DateTimeException], of(0, 0, 60))
expectThrows(classOf[DateTimeException], of(0, 0, 0, 1000000000))
}
@Test def test_ofSecondOfDay(): Unit = {
testDateTime(ofSecondOfDay(0))(MIN)
testDateTime(ofSecondOfDay(1))(of(0, 0, 1))
testDateTime(ofSecondOfDay(60))(of(0, 1))
testDateTime(ofSecondOfDay(86399))(of(23, 59, 59))
expectThrows(classOf[DateTimeException], ofSecondOfDay(-1))
expectThrows(classOf[DateTimeException], ofSecondOfDay(86400))
}
@Test def test_ofNanoOfDay(): Unit = {
testDateTime(ofNanoOfDay(0))(MIN)
testDateTime(ofNanoOfDay(1))(of(0, 0, 0, 1))
testDateTime(ofNanoOfDay(1000000000))(of(0, 0, 1))
testDateTime(ofNanoOfDay(86399999999999L))(MAX)
expectThrows(classOf[DateTimeException], ofNanoOfDay(-1))
expectThrows(classOf[DateTimeException], ofNanoOfDay(86400000000000L))
}
@Test def test_from(): Unit = {
for (t <- samples)
testDateTime(from(t))(t)
expectThrows(classOf[DateTimeException], from(LocalDate.of(2012, 2, 29)))
expectThrows(classOf[DateTimeException], from(Month.JANUARY))
expectThrows(classOf[DateTimeException], from(DayOfWeek.MONDAY))
}
}
| sjrd/scala-js-java-time | testSuite/shared/src/test/scala/org/scalajs/testsuite/javalib/time/LocalTimeTest.scala | Scala | bsd-3-clause | 22,940 |
//val sc:org.apache.spark.SparkContext = null
/**
* To start Sparkling Water please type
cd path/to/sparkling/water
export SPARK_HOME=/Users/zelleta/spark-1.2.1-bin-hadoop1
export MASTER="local-cluster[3,2,4096]"
bin/sparkling-shell --conf spark.executor.memory=3G
*/
// Input data
val DATAFILE="examples/smalldata/smsData.txt"
import hex.deeplearning.{DeepLearningModel, DeepLearning}
import hex.deeplearning.DeepLearningParameters
import org.apache.spark.examples.h2o.DemoUtils._
import org.apache.spark.h2o._
import org.apache.spark.mllib
import org.apache.spark.mllib.feature.{IDFModel, IDF, HashingTF}
import org.apache.spark.rdd.RDD
import water.Key
// One training message
case class SMS(target: String, fv: mllib.linalg.Vector)
// Data loader
def load(dataFile: String): RDD[Array[String]] = {
sc.textFile(dataFile).map(l => l.split("\\t")).filter(r => !r(0).isEmpty)
}
// Tokenizer
def tokenize(data: RDD[String]): RDD[Seq[String]] = {
val ignoredWords = Seq("the", "a", "", "in", "on", "at", "as", "not", "for")
val ignoredChars = Seq(',', ':', ';', '/', '<', '>', '"', '.', '(', ')', '?', '-', '\\'','!','0', '1')
val texts = data.map( r=> {
var smsText = r.toLowerCase
for( c <- ignoredChars) {
smsText = smsText.replace(c, ' ')
}
val words =smsText.split(" ").filter(w => !ignoredWords.contains(w) && w.length>2).distinct
words.toSeq
})
texts
}
def buildIDFModel(tokens: RDD[Seq[String]],
minDocFreq:Int = 4,
hashSpaceSize:Int = 1 << 10): (HashingTF, IDFModel, RDD[mllib.linalg.Vector]) = {
// Hash strings into the given space
val hashingTF = new HashingTF(hashSpaceSize)
val tf = hashingTF.transform(tokens)
// Build term frequency-inverse document frequency
val idfModel = new IDF(minDocFreq = minDocFreq).fit(tf)
val expandedText = idfModel.transform(tf)
(hashingTF, idfModel, expandedText)
}
def buildDLModel(train: Frame, valid: Frame,
epochs: Int = 10, l1: Double = 0.001, l2: Double = 0.0,
hidden: Array[Int] = Array[Int](200, 200))
(implicit h2oContext: H2OContext): DeepLearningModel = {
import h2oContext._
// Build a model
val dlParams = new DeepLearningParameters()
dlParams._model_id = Key.make("dlModel.hex").asInstanceOf[water.Key[Frame]]
dlParams._train = train
dlParams._valid = valid
dlParams._response_column = 'target
dlParams._epochs = epochs
dlParams._l1 = l1
dlParams._hidden = hidden
// Create a job
val dl = new DeepLearning(dlParams)
val dlModel = dl.trainModel.get
// Compute metrics on both datasets
dlModel.score(train).delete()
dlModel.score(valid).delete()
dlModel
}
// Start H2O services
import org.apache.spark.h2o._
implicit val h2oContext = new H2OContext(sc).start()
import h2oContext._
// Initialize SQL context
import org.apache.spark.sql._
implicit val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
// Data load
val data = load(DATAFILE)
// Extract response spam or ham
val hamSpam = data.map( r => r(0))
val message = data.map( r => r(1))
// Tokenize message content
val tokens = tokenize(message)
// Build IDF model
var (hashingTF, idfModel, tfidf) = buildIDFModel(tokens)
// Merge response with extracted vectors
val resultRDD: DataFrame = hamSpam.zip(tfidf).map(v => SMS(v._1, v._2)).toDF
val table:H2OFrame = resultRDD
// Split table
val keys = Array[String]("train.hex", "valid.hex")
val ratios = Array[Double](0.8)
val frs = split(table, keys, ratios)
val (train, valid) = (frs(0), frs(1))
table.delete()
// Build a model
val dlModel = buildDLModel(train, valid)
/*
* The following code is appended life during presentation.
*/
// Collect model metrics and evaluate model quality
val trainMetrics = binomialMM(dlModel, train)
val validMetrics = binomialMM(dlModel, valid)
println(trainMetrics.auc._auc)
println(validMetrics.auc._auc)
// Spam detector
def isSpam(msg: String,
dlModel: DeepLearningModel,
hashingTF: HashingTF,
idfModel: IDFModel,
hamThreshold: Double = 0.5):Boolean = {
val msgRdd = sc.parallelize(Seq(msg))
val msgVector: DataFrame = idfModel.transform(
hashingTF.transform (
tokenize (msgRdd))).map(v => SMS("?", v)).toDF
val msgTable: H2OFrame = msgVector
msgTable.remove(0) // remove first column
val prediction = dlModel.score(msgTable)
//println(prediction)
prediction.vecs()(1).at(0) < hamThreshold
}
println(isSpam("Michal, beer tonight in MV?", dlModel, hashingTF, idfModel))
println(isSpam("We tried to contact you re your reply to our offer of a Video Handset? 750 anytime any networks mins? UNLIMITED TEXT?", dlModel, hashingTF, idfModel))
| nvoron23/sparkling-water | examples/scripts/mlconf_2015_hamSpam.script.scala | Scala | apache-2.0 | 4,762 |
package org.bowlerframework.view
import json.BigDecimalSerializer
import org.bowlerframework.{Response, Request}
import net.liftweb.json.JsonAST._
import net.liftweb.json.Extraction._
import net.liftweb.json.Printer._
import org.bowlerframework.exception.HttpException
import net.liftweb.json.Formats
import net.liftweb.json.ext.JodaTimeSerializers
/**
* JSON implementation of ViewRenderer - will take a Model or Models and render a JSON representation of said Model
*/
class JsonViewRenderer(jsonFormats: Formats = (net.liftweb.json.DefaultFormats + new BigDecimalSerializer)) extends ViewRenderer {
implicit val formats = jsonFormats ++ JodaTimeSerializers.all
def onError(request: Request, response: Response, exception: Exception) = {
if (classOf[HttpException].isAssignableFrom(exception.getClass)) {
val http = exception.asInstanceOf[HttpException]
response.sendError(http.code)
throw exception
} else {
throw exception
}
}
def renderView(request: Request, response: Response, models: Seq[Any]) = {
response.setContentType("application/json")
if (models.size == 0) {
response.setStatus(204)
} else if (models.size == 1) {
models.foreach(f => {
response.getWriter.write(compact(render(decompose(f))))
})
} else {
var json: JValue = null
models.foreach(f => {
val alias = getModelAlias(f)
val value = getValue(f)
if (json == null) json = new JField(alias, value)
else json = json ++ JField(alias, value)
})
response.getWriter.write(compact(render(json)))
}
}
/**
* renders a no model view, in the case of JSON, this simply returns a HTTP 204 - No Content response.
*/
def renderView(request: Request, response: Response) = {
response.setContentType("application/json")
response.setStatus(204)
}
private def getValue(any: Any): JValue = decompose(getModelValue(any))
}
case class ValidationError(key: String, message: String)
object JsonViewRenderer{
var formats = (net.liftweb.json.DefaultFormats + new BigDecimalSerializer)
def apply() = new JsonViewRenderer
} | bowler-framework/Bowler | core/src/main/scala/org/bowlerframework/view/JsonViewRenderer.scala | Scala | bsd-3-clause | 2,155 |
package org.openurp.edu.eams.core.web.action
import org.beangle.commons.collection.Collections
import org.beangle.commons.collection.Order
import org.beangle.data.jpa.dao.OqlBuilder
import org.beangle.commons.lang.Strings
import org.openurp.base.Department
import org.openurp.edu.base.Direction
import org.openurp.edu.base.Project
import org.openurp.edu.eams.web.action.common.ProjectSupportAction
class DirectionSearchAction extends ProjectSupportAction {
def getEntityName(): String = classOf[Direction].getName
def index(): String = {
put("project", getProject)
forward()
}
def search(): String = {
put("directions", entityDao.search(buildDirectionQuery()))
forward()
}
protected def buildDirectionQuery(): OqlBuilder[Direction] = {
val query = OqlBuilder.from(classOf[Direction], "direction")
populateConditions(query)
val projects = getProjects
if (Collections.isNotEmpty(projects)) {
query.where("direction.major.project in (:projects)", projects)
}
val departments = getDeparts
if (Collections.isNotEmpty(departments)) {
query.where("(exists(from direction.departs dd where dd.depart in (:departs)) or size(direction.departs)=0)",
departments)
}
var orderBy = get("orderBy")
if (Strings.isEmpty(orderBy)) {
orderBy = "direction.code"
}
val departId = getInt("fake.department.id")
if (departId != null) {
query.where("exists(from direction.departs dd where dd.depart.id=:departId)", departId)
}
val educationId = getInt("fake.education.id")
if (educationId != null) {
query.where("exists(from direction.departs dd where dd.education.id = :educationId)", educationId)
}
query.limit(getPageLimit)
query.orderBy(Order.parse(orderBy))
query
}
protected def getExportDatas(): Iterable[Direction] = {
val query = buildDirectionQuery()
query.limit(null)
entityDao.search(query)
}
def info(): String = {
val directionId = getIntId("direction")
if (null == directionId) {
return forwardError(Array("entity.direction", "error.model.id.needed"))
}
put("direction", entityDao.get(classOf[Direction], directionId))
forward()
}
}
| openurp/edu-eams-webapp | web/src/main/scala/org/openurp/edu/eams/core/web/action/DirectionSearchAction.scala | Scala | gpl-3.0 | 2,229 |
/*
* Copyright 2014β2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.common.effect
import slamdata.Predef._
import quasar.fp.ski.ΞΊ
import simulacrum.typeclass
import scalaz._
import scalaz.syntax.functor._
/** A source of strings unique within `F[_]`, an implementation must have the
* property that, if Applicative[F], then (freshName |@| freshName)(_ != _).
*/
@typeclass trait NameGenerator[F[_]] {
/** Returns a fresh name, guaranteed to be unique among all the other names
* generated from `F`.
*/
def freshName: F[String]
/** Returns a fresh name, prefixed with the given string. */
def prefixedName(prefix: String)(implicit F: Functor[F]): F[String] =
freshName map (prefix + _)
}
object NameGenerator extends NameGeneratorInstances
sealed abstract class NameGeneratorInstances extends NameGeneratorInstances0 {
implicit def sequenceNameGenerator[F[_]](implicit F: MonadState[F, Long]): NameGenerator[F] =
new NameGenerator[F] {
def freshName = F.bind(F.get)(n => F.put(n + 1) as n.toString)
}
}
sealed abstract class NameGeneratorInstances0 {
implicit def eitherTNameGenerator[F[_]: NameGenerator : Functor, A]: NameGenerator[EitherT[F, A, ?]] =
new NameGenerator[EitherT[F, A, ?]] {
def freshName = EitherT.rightT(NameGenerator[F].freshName)
}
implicit def readerTNameGenerator[F[_]: NameGenerator, A]: NameGenerator[ReaderT[F, A, ?]] =
new NameGenerator[ReaderT[F, A, ?]] {
def freshName = ReaderT(ΞΊ(NameGenerator[F].freshName))
}
implicit def stateTNameGenerator[F[_]: NameGenerator : Monad, S]: NameGenerator[StateT[F, S, ?]] =
new NameGenerator[StateT[F, S, ?]] {
def freshName = StateT(s => NameGenerator[F].freshName strengthL s)
}
implicit def writerTNameGenerator[F[_]: NameGenerator : Functor, W: Monoid]: NameGenerator[WriterT[F, W, ?]] =
new NameGenerator[WriterT[F, W, ?]] {
def freshName = WriterT.put(NameGenerator[F].freshName)(Monoid[W].zero)
}
}
| slamdata/slamengine | common/src/main/scala/quasar/common/effect/NameGenerator.scala | Scala | apache-2.0 | 2,539 |
package com.innoq.leanpubclient
import akka.util.ByteString
import play.api.libs.json.JsValue
import play.api.libs.ws.{StandaloneWSResponse, WSCookie}
import scala.xml.Elem
case class StubResponse(status: Int, json: JsValue) extends StandaloneWSResponse {
override def allHeaders: Map[String, Seq[String]] = Map.empty
override def underlying[T]: T = ???
override def statusText: String = ""
override def header(key: String): Option[String] = None
override def cookies: Seq[WSCookie] = Seq.empty
override def cookie(name: String): Option[WSCookie] = None
override def body: String = ""
override def xml: Elem = ???
override def bodyAsBytes: ByteString = ByteString.empty
}
| innoq/leanpub-client | src/test/scala/com/innoq/leanpubclient/StubResponse.scala | Scala | mit | 701 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.model
/**
* @author sameer
*/
import cc.factorie.infer.VariablesSettingsSampler
import cc.factorie.variable.{CategoricalDomain, LabeledCategoricalVariable}
import junit.framework.Assert._
import junit.framework._
/**
* @author sameer
* @since Sep 5, 2011
*/
class TestProposalSamplers extends TestCase with cc.factorie.util.FastLogging {
val numLabels: Int = 3
// a binary variable that takes values 0 or 1
object LabelDomain extends CategoricalDomain[Int](0 until numLabels)
class BinVar(i: Int) extends LabeledCategoricalVariable(i) {
def domain = LabelDomain
}
import scala.language.existentials
private def newFactor2(n1: BinVar, n2: BinVar, scoreEqual: Double, scoreUnequal: Double) =
new TupleFactorWithStatistics2[BinVar, BinVar](n1, n2) {
factor =>
def score(s1:BinVar#Value, s2:BinVar#Value): Double = if (s1 == s2) scoreEqual else scoreUnequal
override def equalityPrerequisite = this
}
// short for exponential
private def e(num: Double) = math.exp(num)
val eps = 1e-5
override protected def setUp() {
super.setUp()
// initialize binary variables with two values
new BinVar(0)
new BinVar(1)
}
def testV2F1() = {
implicit val random = new scala.util.Random(0)
val samples = 10000
val v1 = new BinVar(0)
val v2 = new BinVar(0)
val model = new ItemizedModel(newFactor2(v1, v2, 5, 1))
val sampler = new VariablesSettingsSampler[BinVar](model)
val origScore = model.currentScore(Seq(v1, v2))
logger.debug("orig score: " + origScore)
val assignCounts = Array.fill(numLabels, numLabels)(0)
for (i <- 0 until samples) {
sampler.process(Seq(v1, v2))
assignCounts(v1.intValue)(v2.intValue) += 1
}
val totalCount = assignCounts.toSeq.foldLeft(0.0)((s, arr) => arr.toSeq.foldLeft(s)(_ + _))
var Z = 0.0
for (p <- sampler.proposals(Seq(v1, v2))) {
p.diff.redo()
val modelScore = model.currentScore(Seq(v1, v2))
Z += e(modelScore)
p.diff.undo()
}
for (p <- sampler.proposals(Seq(v1, v2))) {
p.diff.redo()
val modelScore = model.currentScore(Seq(v1, v2))
val sampleProb = assignCounts(v1.intValue)(v2.intValue) / totalCount
logger.debug("%d %d : true: %f, prop: %f, trueProb: %f, sample: %f".format(v1.intValue, v2.intValue, modelScore - origScore, p.modelScore, e(modelScore) / Z, sampleProb))
assertEquals(modelScore - origScore, p.modelScore, eps)
assertEquals(e(modelScore) / Z, sampleProb, 0.01)
p.diff.undo()
}
}
}
| zxsted/factorie | src/test/scala/cc/factorie/model/TestProposalSamplers.scala | Scala | apache-2.0 | 3,323 |
package io.github.raptros.bson
import org.scalatest.{GivenWhenThen, Matchers, FlatSpec}
import scalaz.syntax.std.option._
import scalaz.std.option._
import com.mongodb.{DBObject, BasicDBList, BasicDBObject}
import org.bson.types.ObjectId
import org.joda.time.DateTime
import EncodeBsonField._
import Builders._
class EncodeBsonFieldSpec extends FlatSpec with Matchers with GivenWhenThen {
behavior of "the provided EncodeBsonField instances"
they should "directly encode any basic values" in {
Given("a DBObject")
val dbo = new BasicDBObject()
When("writing several values to it")
dbo.write("boolean", true)
dbo.write("double", 3.45: Double)
dbo.write("string", "something")
val objectId = new ObjectId()
dbo.write("objectId", objectId)
Then("those values should present and castable")
dbo.keySet() should contain only ("boolean", "double", "string", "objectId")
dbo.getBoolean("boolean") shouldBe true
dbo.getDouble("double") shouldBe 3.45
dbo.getString("string") shouldBe "something"
dbo.getObjectId("objectId") shouldBe objectId
}
they should "encode a date time as a date" in {
val dbo = new BasicDBObject()
val now = DateTime.now()
dbo.write("now", now)
dbo.getDate("now") shouldBe now.toDate
}
they should "derive a list field encoder from EncodeBsons" in {
val dbo = new BasicDBObject()
val l = Vector("one", "two", "three")
dbo.write("l", l)
val dbl = dbo.get("l").asInstanceOf[BasicDBList]
dbl.keySet() should contain only ("0", "1", "2")
dbl.get(0) shouldBe "one"
dbl.get(1) shouldBe "two"
dbl.get(2) shouldBe "three"
}
they should "allow optional field encoding" in {
val dbo = new BasicDBObject()
val o1 = "howdily".some
val o2 = none[String]
dbo.write("k1", o1)
dbo.write("k2", o2)
dbo.keySet should contain only "k1"
dbo.get("k1") shouldBe "howdily"
}
behavior of "EncodeBsonField in the DBOKV/DBOBuilder/DBO() context"
it should "allow construction via DBO()" in {
val dbo = DBO("one" :> 344, "zerg" :> true, "gl" :> List(2, 5, 6))
dbo.keySet() should contain only ("one", "gl", "zerg")
dbo.get("one") shouldBe 344
dbo.get("zerg") shouldBe true
val dbl = dbo.get("gl").asInstanceOf[BasicDBList]
dbl.keySet() should contain only ("0", "1", "2")
dbl.get(0) shouldBe 2
dbl.get(1) shouldBe 5
dbl.get(2) shouldBe 6
}
it should "allow construction via +@+" in {
val dbo = DBO.empty +@+ ("yo" :> "dawg")
dbo.keySet() should contain only "yo"
dbo.get("yo") shouldBe "dawg"
}
it should "allow construction via ++@++" in {
val dbo = DBO("yo" :> "dawg") ++@++ List("we" :> true, "heard" :> 3, "you" :> "like", "objects" :> List("so", "we"))
dbo.keySet() should contain only ("yo", "we", "heard", "you", "objects")
dbo.get("you") shouldBe "like"
dbo.get("heard") shouldBe 3
}
it should "allow construction via +?+" in {
val dbo = DBO.empty +?+ ("in" :?> 44.some) +?+ ("out" :?> none[String]) +?+ ("alsoIn" :?> true.some)
dbo.keySet() should contain only ("in", "alsoIn")
dbo.get("in") shouldBe 44
dbo.get("alsoIn") shouldBe true
}
it should "allow embedding DBOs inside DBOs" in {
val dbo = DBO("embed" :> DBO("embedded" :> true, "other" :> "something"))
dbo.keySet() should contain only "embed"
val embedded = dbo.get("embed").asInstanceOf[DBObject]
embedded.keySet() should contain only ("embedded", "other")
embedded.get("embedded") shouldBe true
embedded.get("other") shouldBe "something"
}
}
| raptros/the-bson | core/src/test/scala/io/github/raptros/bson/EncodeBsonFieldSpec.scala | Scala | bsd-3-clause | 3,595 |
package pl.pholda.malpompaaligxilo.dsl.expr.date
import pl.pholda.malpompaaligxilo.dsl.DslFormExpr
import pl.pholda.malpompaaligxilo.form.FormInstance
import pl.pholda.malpompaaligxilo.util.Date
case class DateCompare(operator: String, a: DslFormExpr[Any], b: DslFormExpr[Any]) extends DslFormExpr[Boolean] {
override def apply(formInstance: FormInstance[_]): Boolean = {
val aDate = a(formInstance) match {
case d: Date => d
case x => throw new Exception(s"date was expected, found: $x")
}
val bDate = b(formInstance) match {
case d: Date => d
case x => throw new Exception(s"date was expected, found: $x")
}
operator match {
case "<" => aDate < bDate
case "<=" => aDate <= bDate
case "=" => aDate == bDate
case ">=" => aDate >= bDate
case ">" => aDate > bDate
case "!=" => aDate != bDate
}
}
} | pholda/MalpompaAligxilo | dsl/shared/src/main/scala/pl/pholda/malpompaaligxilo/dsl/expr/date/DateCompare.scala | Scala | gpl-3.0 | 887 |
package com.haskforce.cabal
import com.haskforce.cabal.settings.{CabalComponentType, AddCabalPackageOptions}
import com.haskforce.cabal.settings.ui.AddCabalPackageUtil
import com.intellij.testFramework.UsefulTestCase
/**
* Test to ensure proper behavior of the AddCabalPackage data types.
*/
class AddCabalPackageTest extends UsefulTestCase {
val defaultOptions = AddCabalPackageOptions(
maybeModule = None,
packageName = "my-package",
packageVersion = "0.1",
buildType = CabalComponentType.Library,
rootDir = "path/to/my-package",
sourceDir = "src",
cabalVersion = ">=1.20",
license = None,
author = None,
email = None,
homepage = None,
synopsis = None,
category = None,
language = "Haskell2010",
generateComments = true
)
def testBuildArgsIgnoresEmptyStrings(): Unit = {
val args = AddCabalPackageUtil.buildArgs(defaultOptions.copy(cabalVersion = ""))
assertEmpty(args.filter(_.contains("cabal-version")))
}
def assertEmpty(xs: Seq[_]): Unit = {
assert(xs.isEmpty, s"Expected empty sequence, got $xs")
}
}
| carymrobbins/intellij-haskforce | tests/com/haskforce/cabal/AddCabalPackageTest.scala | Scala | apache-2.0 | 1,098 |
package io.ddf.aws.ml
import java.lang
import com.amazonaws.services.machinelearning.model.MLModelType
import io.ddf.DDF
import io.ddf.aws.AWSDDFManager
import io.ddf.aws.ml.util.CrossValidation
import io.ddf.exception.DDFException
import io.ddf.jdbc.content.{DdlCommand, Representations, SqlArrayResult}
import io.ddf.misc.ADDFFunctionalGroupHandler
import io.ddf.ml.{CrossValidationSet, IModel, ISupportML}
import scala.collection.JavaConverters._
class MLSupporter(ddf: DDF) extends ADDFFunctionalGroupHandler(ddf) with ISupportML with Serializable {
val ddfManager = ddf.getManager.asInstanceOf[AWSDDFManager]
val awsProperties = AwsConfig.getPropertiesForAmazonML(ddfManager)
val awsHelper = new AwsHelper(awsProperties.s3Properties)
val awsMLHelper = new AwsMLHelper(awsProperties)
def getAwsMLHelper = awsMLHelper
def getAwsHelper = awsHelper
override def applyModel(model: IModel): DDF = applyModel(model, hasLabels = true)
override def applyModel(model: IModel, hasLabels: Boolean): DDF = applyModel(model, hasLabels, includeFeatures = true)
override def applyModel(model: IModel, hasLabels: Boolean, includeFeatures: Boolean): DDF = {
val awsModel = model.getRawModel.asInstanceOf[AwsModel]
val tableName = ddf.getTableName
val sql = awsHelper.selectSql(tableName)
val dataSourceId = awsMLHelper.createDataSourceFromRedShift(ddf.getSchema, sql, awsModel.getMLModelType)
//this will wait for batch prediction to complete
val batchId = awsMLHelper.createBatchPrediction(awsModel, dataSourceId)
//last column is target column for supervised learning
val targetColumn = ddf.getSchema.getColumns.asScala.last
val uniqueTargetVal = if (awsModel.getMLModelType equals MLModelType.MULTICLASS) {
ddf.setAsFactor(targetColumn.getName)
ddf.getSchemaHandler.computeFactorLevelsAndLevelCounts()
ddf.getSchema.getColumn(targetColumn.getName).getOptionalFactor.getLevels.asScala.map(value => "col_" + value + " " +
"float8").mkString(",")
}
else ""
val newTableName = Identifiers.newTableName
val createTableSql = awsMLHelper.createTableSqlForModelType(awsModel.getMLModelType, newTableName, uniqueTargetVal)
val newDDF = ddf.getManager.asInstanceOf[AWSDDFManager].create(createTableSql)
//now copy the results to redshift
val manifestPath = awsHelper.createResultsManifestForRedshift(batchId)
val sqlToCopy = awsHelper.sqlToCopyFromS3ToRedshift(manifestPath, newTableName)
implicit val cat = ddfManager.catalog
DdlCommand(ddfManager.getConnection(), ddfManager.baseSchema, sqlToCopy)
//and return the ddf
newDDF
}
override def CVRandom(k: Int, trainingSize: Double, seed: lang.Long): java.util.List[CrossValidationSet] = {
val crossValidation: CrossValidation = new CrossValidation(ddf)
crossValidation.CVRandom(k, trainingSize)
}
override def CVKFold(k: Int, seed: lang.Long): java.util.List[CrossValidationSet] = {
val crossValidation: CrossValidation = new CrossValidation(ddf)
crossValidation.CVK(k)
}
override def train(trainMethodKey: String, args: AnyRef*): IModel = {
val sql = awsHelper.selectSql(ddf.getTableName)
val mlModelType = try {
MLModelType.valueOf(trainMethodKey)
}
catch {
case e: IllegalArgumentException => throw new DDFException(e)
}
val dataSourceId = awsMLHelper.createDataSourceFromRedShift(ddf.getSchema, sql, mlModelType)
val paramsMap = if (args.length < 1 || args(0) == null) {
new java.util.HashMap[String, String]()
} else {
args(0).asInstanceOf[java.util.Map[String, String]]
}
//this will wait for model creation to complete
val modelId = awsMLHelper.createModel(dataSourceId, mlModelType, paramsMap)
mlModelType match {
case MLModelType.BINARY => Model(BinaryClassification(awsMLHelper, ddf.getSchema, modelId, mlModelType))
case MLModelType.MULTICLASS => Model(MultiClassClassification(awsMLHelper, ddf.getSchema, modelId, mlModelType))
case MLModelType.REGRESSION => Model(LinearRegression(awsMLHelper, ddf.getSchema, modelId, mlModelType))
}
}
def getConfusionMatrix(iModel: IModel, v: Double): Array[Array[Long]] = {
if (iModel.getRawModel.asInstanceOf[AwsModel].getMLModelType != MLModelType.REGRESSION) {
throw new DDFException("Confusion Matrix can only be evaluated for Regression model")
}
val predictedDDF = ddf.ML.applyModel(iModel)
val matrix = Array.ofDim[Long](2, 2)
val predictDDFAsSql = predictedDDF.getRepresentationHandler.get(Representations.SQL_ARRAY_RESULT)
.asInstanceOf[SqlArrayResult].result
val result = predictDDFAsSql map (row => row(row.length - 2).asInstanceOf[Double]) zip (predictDDFAsSql map (row
=> row(row.length - 1).asInstanceOf[Double]))
for (row <- result.indices) {
val (oldVal,newVal) = result(row)
if ((oldVal < v || oldVal == v) && (newVal < v || newVal == v))
matrix(0)(0) = matrix(0)(0) + 1
else if ((oldVal < v || oldVal == v) && newVal > v)
matrix(0)(1) = matrix(0)(1) + 1
else if (oldVal > v && (newVal < v || newVal == v))
matrix(1)(0) = matrix(1)(0) + 1
else
matrix(1)(1) = matrix(1)(1) + 1
}
matrix
}
}
| ddf-project/ddf-jdbc | aws/src/main/scala/io/ddf/aws/ml/MLSupporter.scala | Scala | apache-2.0 | 5,276 |
package com.arcusys.valamis.web.servlet.slides
import java.text.Normalizer
import javax.servlet.http.HttpServletResponse
import com.arcusys.learn.liferay.util.PortletName
import com.arcusys.valamis.lrssupport.lrs.service.util.TinCanVerbs
import com.arcusys.valamis.slide.model.Slide
import com.arcusys.valamis.slide.service.SlideService
import com.arcusys.valamis.uri.model.TincanURIType
import com.arcusys.valamis.uri.service.TincanURIService
import com.arcusys.valamis.web.portlet.base.ViewPermission
import PortletName.LessonStudio
import com.arcusys.valamis.web.servlet.base.{BaseApiController, PermissionUtil}
import com.arcusys.valamis.web.servlet.file.FileUploading
import com.arcusys.valamis.web.servlet.slides.request.SlideRequest
import com.arcusys.valamis.web.servlet.slides.response.SlideConverter._
class SlideServlet extends BaseApiController with FileUploading {
private lazy val slideService = inject[SlideService]
private lazy val uriService = inject[TincanURIService]
private val req = SlideRequest(this)
private def bgImage = Some(Normalizer.normalize(req.bgImage.getOrElse(""), Normalizer.Form.NFC))
private def verbUUID = req.statementVerb.map { verbId =>
val verbName = verbId.substring(verbId.lastIndexOf("/") + 1)
if (TinCanVerbs.all.contains(verbName))
verbId
else
uriService.getOrCreate(uriService.getLocalURL(), verbId, TincanURIType.Verb, Some(verbName)).objId
}
private def categoryUUID = req.statementCategoryId.map { categoryId =>
uriService.getOrCreate(uriService.getLocalURL(), categoryId, TincanURIType.Category, Some(categoryId)).objId
}
get("/slides(/)")(jsonAction {
PermissionUtil.requirePermissionApi(ViewPermission, LessonStudio)
val slideList = if (req.isTemplate)
slideService.getTemplateSlides
else
slideService.getSlides(req.slideSetIdOption.get)
slideList.map(_.convertSlideModel)
})
get("/slides/:id/logo") {
PermissionUtil.requirePermissionApi(ViewPermission, LessonStudio)
val content = slideService.getBgImage(req.id) getOrElse {
halt(HttpServletResponse.SC_NOT_FOUND, s"Slide with id: ${req.id} doesn't exist")
}
response.reset()
response.setStatus(HttpServletResponse.SC_OK)
response.setContentType("image/png")
response.getOutputStream.write(content)
}
post("/slides/:id(/)")(jsonAction {
PermissionUtil.requirePermissionApi(ViewPermission, LessonStudio)
slideService.update(
Slide(
id = req.id,
title = req.title,
bgColor = req.bgColor,
font = req.font,
questionFont = req.questionFont,
answerFont = req.answerFont,
answerBg = req.answerBg,
duration = req.duration,
leftSlideId = req.leftSlideId,
topSlideId = req.topSlideId,
slideSetId = req.slideSetId,
statementVerb = verbUUID,
statementObject = req.statementObject,
statementCategoryId = categoryUUID,
isTemplate = req.isTemplate,
isLessonSummary = req.isLessonSummary,
playerTitle = req.playerTitleOption,
properties = req.slideProperties
)
).convertSlideModel
})
post("/slides(/)")(jsonAction {
PermissionUtil.requirePermissionApi(ViewPermission, LessonStudio)
slideService.create(
Slide(
title = req.title,
bgColor = req.bgColor,
font = req.font,
questionFont = req.questionFont,
answerFont = req.answerFont,
answerBg = req.answerBg,
duration = req.duration,
leftSlideId = req.leftSlideId,
topSlideId = req.topSlideId,
slideSetId = req.slideSetId,
statementVerb = verbUUID,
statementObject = req.statementObject,
statementCategoryId = categoryUUID,
isTemplate = req.isTemplate,
isLessonSummary = req.isLessonSummary,
playerTitle = req.playerTitleOption,
properties = req.slideProperties
)
).convertSlideModel
})
post("/slides/:id/change-bg-image(/)")(jsonAction {
PermissionUtil.requirePermissionApi(ViewPermission, LessonStudio)
slideService.updateBgImage(req.id, req.bgImage)
})
delete("/slides/:id(/)")(jsonAction {
PermissionUtil.requirePermissionApi(ViewPermission, LessonStudio)
slideService.delete(req.id)
})
}
| arcusys/Valamis | valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/slides/SlideServlet.scala | Scala | gpl-3.0 | 4,327 |
/*
* Copyright 2014 The Instalk Project
*
* The Instalk Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package im.instalk
import scala.util.Random
import play.api.libs.functional.syntax._
import play.api.libs.json._
sealed trait User {
def username: String
def info: UserInfo
}
sealed trait UserInfo {
def name: String
}
case class AnonymousUserInfo(val name: String, color: String) extends UserInfo
case class AuthenticatedUserInfo(val name: String, gravatar: String) extends UserInfo
case class AuthenticatedUser(val username: String, val info: AuthenticatedUserInfo) extends User
case class AnonymousUser(val username: String, val info: AnonymousUserInfo) extends User
object User {
def generateName(s: String): String = s.replace('-', ' ')
implicit val userFormat: Format[User] = new Format[User] {
implicit val anonFmt = Json.format[AnonymousUserInfo]
implicit val authFmt = Json.format[AuthenticatedUserInfo]
def writes(o: User): JsValue = o match {
case u: AnonymousUser =>
Json.obj(
"username" -> u.username,
"auth" -> false,
"info" -> Json.toJson(u.info)
)
case u: AuthenticatedUser =>
Json.obj(
"username" -> u.username,
"auth" -> true,
"info" -> Json.toJson(u.info)
)
}
def reads(o: JsValue): JsResult[User] = {
(o \\ "auth").asOpt[Boolean] match {
case Some(v) if v =>
//AuthenticatedUser
val reader =
((__ \\ "username").read[String] and
(__ \\ "info").read[AuthenticatedUserInfo]
)(AuthenticatedUser.apply _)
reader.reads(o)
case Some(_) | None =>
//Anonymous
val reader =
((__ \\ "username").read[String] and
(__ \\ "info").read[AnonymousUserInfo]
)(AnonymousUser.apply _)
reader.reads(o)
}
}
}
private[this] val colorStr = "ABCDEF0123456789"
def generateUsername: String = "Guest-" + Random.nextInt(5000)
def generateColor: String = "#" + (1 to 6).map(_ => colorStr.charAt(Random.nextInt(16))).mkString("")
}
| AhmedSoliman/instalk | app/im/instalk/User.scala | Scala | apache-2.0 | 2,689 |
package com.stderr.jmetagraphx
import java.io.File
import org.apache.log4j.{Level, Logger, BasicConfigurator}
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.spark.graphx._
import org.apache.spark.sql.SQLContext
import org.apache.commons.io.FileUtils
object Main {
val logger = Logger.getLogger(Main.getClass)
val homeDirectory = System.getProperty("user.home")
val vertexFilePath: String = s"$homeDirectory/vertex.dat"
val edgeFilePath: String = s"$homeDirectory/edge.dat"
def main(args: Array[String]) {
// Quiet the defaults from spark. Use INFO for development
BasicConfigurator.configure()
Logger.getRootLogger.setLevel(Level.ERROR)
cleanupFiles
// Extract the method call information from jar files
scanJars(args)
// Create Spark
val conf = new SparkConf().setAppName("JMetaGraphX").setMaster("local")
val sc = new SparkContext(conf)
// Create RDDs from the scanned classes
val vertexRDD = sc.parallelize(ClassVertex.toSeq)
val edgeRDD = sc.parallelize(MethodCall.toSeq)
// Save the RDDs for use int the spark shell
vertexRDD.saveAsObjectFile(vertexFilePath)
edgeRDD.saveAsObjectFile(edgeFilePath)
// Analysis
run(sc)
}
def scanJars(args: Array[String]) = {
val mvnRepo = s"$homeDirectory/.m2/repository"
val directoryToScan = if (args.isEmpty) new File(mvnRepo) else new File(args(0))
val allJars = recursiveListFiles(directoryToScan).filter(_.getName.endsWith(".jar"))
info(s"scanning ${allJars.length} jars")
ASMClassVisitor.visit(allJars)
}
def run(sc:SparkContext) = {
val vertexRDD = sc.objectFile[(VertexId, ClassVertex)](vertexFilePath)
val edgeRDD = sc.objectFile[Edge[MethodCall]](edgeFilePath)
val graph = Graph(vertexRDD, edgeRDD)
// Create an inverted and swapped RDD of the indegree of the map
// to create called stats
val inDegreeVertexId = graph.inDegrees.map(t => (t._2 * -1, t._1))
val topN = 10
val topCalled = inDegreeVertexId.takeOrdered(topN).map(_._2)
println(s"\\n\\n\\nTop $topN called classes")
vertexRDD.filter(x => topCalled.contains(x._1)).map(_._2.name).foreach(println)
println("\\n\\n")
val pr = pageRank(graph, 0.0001)
val importantClasses = pr.vertices.join(graph.vertices).sortBy(_._2._1, ascending=false).map(_._2._2)
println("\\n\\nImportant pageranked classes\\n")
importantClasses.take(500).foreach(v => println(v.name))
val mostCalled = graph.inDegrees.join(graph.vertices).sortBy(_._2._1, ascending=false).take(20)
println(s"\\n\\nMost called\\n")
mostCalled.foreach(println)
val callsMost = graph.outDegrees.join(graph.vertices).sortBy(_._2._1, ascending=false).take(20)
println(s"\\n\\nCalls most\\n")
callsMost.foreach(println)
println("\\n\\nReferences com/google/protobuf\\n")
graph.subgraph(isPackageCall("com/google/protobuf")).triplets.sortBy(_.srcAttr.name).map(_.srcAttr.name).distinct.take(30).foreach(println)
}
def isPackageCall(packageName: String)(e:EdgeTriplet[ClassVertex,MethodCall]):Boolean = {
e.dstAttr.name.contains(packageName) && !e.srcAttr.name.contains(packageName)
}
def pageRank(graph: Graph[ClassVertex, MethodCall], tol: Double, resetProb: Double = 0.15): Graph[Double, Double] = {
import org.apache.spark.graphx.lib.PageRank
PageRank.runUntilConvergence(graph, tol, resetProb)
}
def info(msg: String): Unit = {
logger.info(s"###\\n$msg\\n###")
}
def format(t:EdgeTriplet[ClassVertex, MethodCall]): Unit = {
println(t.srcAttr.name + " calls " + t.dstAttr.name
+ "." + t.attr.name + "(" + t.attr.desc + ")")
}
def recursiveListFiles(f:File): Array[File] = {
val these = f.listFiles
these ++ these.filter(_.isDirectory).flatMap(recursiveListFiles)
}
def cleanupFiles = {
val paths = List(vertexFilePath, edgeFilePath)
for (fileName <- paths;
file <- Some(new File(fileName)) if (file.exists());
deleted <- Some(FileUtils.deleteDirectory(file))) yield deleted
}
}
| SNoe925/JMetaGraphX | src/main/scala/com/stderr/jmetagraphx/Main.scala | Scala | apache-2.0 | 4,067 |
package jp.ken1ma.pg_select_pcap
import java.util.Properties
import java.sql._
import org.scalatest.FunSuite
class NoParamsTest extends FunSuite {
val host = "192.168.1.11"
val db = "pcap_test_db"
val user = "pcap_test_user"
val password = "pcap_test_user"
var con: Connection = _
override def withFixture(test: NoArgTest) = {
val props = new Properties
props.setProperty("user", user);
props.setProperty("password", password);
con = DriverManager.getConnection(s"jdbc:postgresql://$host/$db", props)
def execute(sql: String) {
val s = con.createStatement
try {
s.execute(sql)
} finally {
s.close
}
}
try {
execute("CREATE TABLE t0(id integer, name text)")
super.withFixture(test)
} finally {
execute("DROP TABLE T0");
con.close
}
}
/*
test("Statement.execute") {
val s = con.createStatement
try {
s.execute("select * from T0")
} finally {
s.close
}
}
test("PreparedStatement.executeQuery") {
val ps = con.prepareStatement("select * from T0")
try {
ps.executeQuery
} finally {
ps.close
}
}
*/
test("PreparedStatement.executeQuery with parameters") {
val ps = con.prepareStatement("select * from T0 where (id = ? or id = ?) and (name = ? or name = ?)")
ps.setInt(1, 3)
ps.setInt(2, 1025)
ps.setString(3, "foo")
ps.setString(4, "bar")
try {
ps.executeQuery
} finally {
ps.close
}
}
}
| ken1ma/pg-select-pcap | src/test/scala/jp.ken1ma.pg_select_pcap/NoParamsTest.scala | Scala | apache-2.0 | 1,403 |
import types._list
import scala.collection.mutable
object env {
class Env(outer: Env = null,
binds: Iterator[Any] = null,
exprs: Iterator[Any] = null) {
val data: mutable.Map[Symbol, Any] = mutable.Map()
if (binds != null && exprs != null) {
binds.foreach(b => {
val k = b.asInstanceOf[Symbol]
if (k == '&) {
data(binds.next().asInstanceOf[Symbol]) = _list(exprs.toSeq:_*)
} else {
data(k) = exprs.next()
}
})
}
def find(key: Symbol): Env = {
if (data.contains(key)) {
this
} else if (outer != null) {
outer.find(key)
} else {
null
}
}
def set(key: Symbol, value: Any): Any = {
data(key) = value
value
}
def get(key: Symbol): Any = {
val env = find(key)
if (env == null) throw new Exception("'" + key.name + "' not found")
env.data(key)
}
}
}
// vim:ts=2:sw=2
| nlfiedler/mal | scala/env.scala | Scala | mpl-2.0 | 1,000 |
package com.github.diegopacheco.sandbox.scala.netflix.karyon
import io.netty.buffer.ByteBuf;
import io.reactivex.netty.protocol.http.server.HttpServerRequest;
import io.reactivex.netty.protocol.http.server.HttpServerResponse;
import javax.inject.Inject;
import netflix.karyon.transport.interceptor.InboundInterceptor;
import rx.Observable;
import rx.functions.Func1;
class AuthInterceptor
extends InboundInterceptor[HttpServerRequest[ByteBuf], HttpServerResponse[ByteBuf]] {
@Inject
var authService:AuthenticationService = null
def in(request:HttpServerRequest[ByteBuf], response:HttpServerResponse[ByteBuf]):Observable[Void] = {
return authService.authenticate(request).map(new Func1[Boolean, Void]() {
def call(aBoolean:Boolean):Void = {
null
}
})
}
} | diegopacheco/scala-playground | scala-karyon-sbt-native-packager/src/main/scala/com/github/diegopacheco/sandbox/scala/netflix/karyon/AuthInterceptor.scala | Scala | unlicense | 838 |
package org.vegas
import org.scalatest._
import org.vegas.compiler.Compiler
import scala.io.Source
abstract class UnitTest extends FlatSpec
with OptionValues
with Matchers {
def resource(resource: String) = Source.fromURL(getClass.getResource(resource)).mkString
}
| rrdelaney/vegas | src/test/scala/org/vegas/VegasTest.scala | Scala | mit | 279 |
package com.twitter.finatra.http.test
import com.fasterxml.jackson.databind.JsonNode
import com.google.common.net.{HttpHeaders, MediaType}
import com.google.inject.Stage
import com.twitter.finagle.http._
import com.twitter.finatra.json.{FinatraObjectMapper, JsonDiff}
import com.twitter.inject.server.PortUtils.{ephemeralLoopback, loopbackAddressForPort}
import com.twitter.inject.server.{PortUtils, Ports}
import com.twitter.util.Try
import org.jboss.netty.handler.codec.http.{HttpMethod, HttpResponseStatus}
class EmbeddedHttpServer(
twitterServer: Ports,
clientFlags: Map[String, String] = Map(),
extraArgs: Seq[String] = Seq(),
waitForWarmup: Boolean = true,
stage: Stage = Stage.DEVELOPMENT,
useSocksProxy: Boolean = false,
skipAppMain: Boolean = false,
defaultRequestHeaders: Map[String, String] = Map(),
defaultHttpSecure: Boolean = false,
mapperOverride: Option[FinatraObjectMapper] = None,
httpPortFlag: String = "http.port",
streamResponse: Boolean = false)
extends com.twitter.inject.server.EmbeddedTwitterServer(
twitterServer,
clientFlags + (httpPortFlag -> ephemeralLoopback),
extraArgs,
waitForWarmup,
stage,
useSocksProxy,
skipAppMain,
defaultRequestHeaders,
streamResponse) {
protected lazy val httpClient = {
start()
createHttpClient(
"httpClient",
twitterServer.httpExternalPort.getOrElse(throw new Exception("External HTTP port not bound")))
}
protected lazy val httpsClient = {
start()
createHttpClient(
"httpsClient",
twitterServer.httpsExternalPort.getOrElse(throw new Exception("External HTTPs port not bound")),
secure = true)
}
protected lazy val mapper = mapperOverride getOrElse injector.instance[FinatraObjectMapper]
override protected def logAppStartup() {
super.logAppStartup()
println(s"ExternalHttp -> http://$externalHttpHostAndPort")
}
override protected def printNonEmptyResponseBody(response: Response): Unit = {
try {
println(mapper.writePrettyString(
response.getContentString()))
} catch {
case e: Exception =>
println(response.contentString)
}
println()
}
override protected def prettyRequestBody(request: Request): String = {
val printableBody = request.contentString.replaceAll("[\\\\p{Cntrl}&&[^\\n\\t\\r]]", "?") //replace non-printable characters
Try {
mapper.writePrettyString(printableBody)
} getOrElse {
printableBody
}
}
lazy val httpExternalPort = twitterServer.httpExternalPort.get
lazy val externalHttpHostAndPort = PortUtils.loopbackAddressForPort(httpExternalPort)
override def close() {
if (!closed) {
super.close()
if (twitterServer.httpExternalPort.isDefined) {
httpClient.close()
}
if (twitterServer.httpsExternalPort.isDefined) {
httpsClient.close()
}
closed = true
}
}
/* TODO: Extract HTTP methods into HttpClient */
def httpGet(
path: String,
accept: MediaType = null,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, HttpMethod.GET)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
def httpGetJson[T: Manifest](
path: String,
accept: MediaType = null,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = true,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): T = {
val response =
httpGet(path, accept = MediaType.JSON_UTF_8, headers = headers, suppress = suppress,
andExpect = andExpect, withLocation = withLocation,
withJsonBody = withJsonBody, withJsonBodyNormalizer = withJsonBodyNormalizer)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
def httpPost(
path: String,
postBody: String,
accept: MediaType = null,
suppress: Boolean = false,
contentType: String = Message.ContentTypeJson,
headers: Map[String, String] = Map(),
andExpect: HttpResponseStatus = null,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Post)
request.setContentString(postBody)
request.headers.set(HttpHeaders.CONTENT_LENGTH, postBody.length)
request.headers.set(HttpHeaders.CONTENT_TYPE, contentType)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
def httpPostJson[ResponseType: Manifest](
path: String,
postBody: String,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = false,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): ResponseType = {
val response = httpPost(path, postBody, MediaType.JSON_UTF_8, suppress, Message.ContentTypeJson, headers, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
def httpPut(
path: String,
putBody: String,
accept: MediaType = null,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: HttpResponseStatus = null,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Put)
request.setContentString(putBody)
request.headers().set(HttpHeaders.CONTENT_LENGTH, putBody.length)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
def httpPutJson[ResponseType: Manifest](
path: String,
putBody: String,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = false,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): ResponseType = {
val response = httpPut(path, putBody, MediaType.JSON_UTF_8, suppress, headers, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
def httpDelete(
path: String,
deleteBody: String = null,
accept: MediaType = null,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: HttpResponseStatus = null,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Delete)
if (deleteBody != null) {
request.setContentString(deleteBody)
}
jsonAwareHttpExecute(
request,
addAcceptHeader(accept, headers),
suppress,
andExpect,
withLocation,
withBody,
withJsonBody,
withJsonBodyNormalizer,
withErrors,
routeToAdminServer,
secure = secure.getOrElse(defaultHttpSecure))
}
def httpDeleteJson[ResponseType: Manifest](
path: String,
deleteBody: String,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = false,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): ResponseType = {
val response = httpDelete(path, deleteBody, MediaType.JSON_UTF_8, suppress, headers, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
def httpOptions(
path: String,
accept: MediaType = null,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, HttpMethod.OPTIONS)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
def httpPatch(
path: String,
patchBody: String,
accept: MediaType = null,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, HttpMethod.PATCH)
request.setContentString(patchBody)
request.headers().set(HttpHeaders.CONTENT_LENGTH, patchBody.length)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
def httpPatchJson[ResponseType: Manifest](
path: String,
patchBody: String,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = false,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): ResponseType = {
val response = httpPatch(path, patchBody, MediaType.JSON_UTF_8, suppress, headers, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
def httpHead(
path: String,
accept: MediaType = null,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, HttpMethod.HEAD)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
def httpFormPost(
path: String,
params: Map[String, String],
multipart: Boolean = false,
routeToAdminServer: Boolean = false,
headers: Map[String, String] = Map.empty,
andExpect: HttpResponseStatus = Status.Ok,
withBody: String = null,
withJsonBody: String = null,
secure: Option[Boolean] = None): Response = {
val request = RequestBuilder().
url(normalizeURL(path)).
addHeaders(headers).
add(paramsToElements(params)).
buildFormPost(multipart = multipart)
jsonAwareHttpExecute(
Request(request),
routeToAdminServer = routeToAdminServer,
andExpect = andExpect,
withBody = withBody,
withJsonBody = withJsonBody,
secure = secure.getOrElse(defaultHttpSecure))
}
def httpRequest(
request: Request,
suppress: Boolean = false,
andExpect: HttpResponseStatus = null,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
jsonAwareHttpExecute(request, request.headerMap.toMap, suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
/* Private */
private def jsonAwareHttpExecute(
request: Request,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: HttpResponseStatus = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null, //TODO: Deprecate
routeToAdminServer: Boolean = false,
secure: Boolean): Response = {
val (client, port) = chooseHttpClient(request.path, routeToAdminServer, secure)
request.headers.set("Host", loopbackAddressForPort(port))
val response = httpExecute(client, request, headers, suppress, andExpect, withLocation, withBody)
if (withJsonBody != null) {
if (!withJsonBody.isEmpty)
JsonDiff.jsonDiff(response.contentString, withJsonBody, withJsonBodyNormalizer, verbose = false)
else
response.contentString should equal("")
}
if (withErrors != null) {
JsonDiff.jsonDiff(response.contentString, Map("errors" -> withErrors), withJsonBodyNormalizer)
}
response
}
private def normalizeURL(path: String) = {
if (path.startsWith("http://"))
path
else
"http://localhost:8080%s".format(path)
}
private def paramsToElements(params: Map[String, String]): Seq[SimpleElement] = {
(params map { case (key, value) =>
SimpleElement(key, value)
}).toSeq
}
private def chooseHttpClient(path: String, forceAdmin: Boolean, secure: Boolean) = {
if (path.startsWith("/admin") || forceAdmin)
(httpAdminClient, twitterServer.httpAdminPort)
else if (secure)
(httpsClient, twitterServer.httpsExternalPort.get)
else
(httpClient, twitterServer.httpExternalPort.get)
}
private def addAcceptHeader(
accept: MediaType,
headers: Map[String, String]): Map[String, String] = {
if (accept != null)
headers + (HttpHeaders.ACCEPT -> accept.toString)
else
headers
}
private def jsonParseWithNormalizer[T: Manifest](
response: Response,
normalizer: JsonNode => JsonNode,
normalizeParsedJsonNode: Boolean) = {
val jsonNode = {
val parsedJsonNode = mapper.parse[JsonNode](response.contentString)
if (normalizer != null && normalizeParsedJsonNode)
normalizer(parsedJsonNode)
else
parsedJsonNode
}
try {
mapper.parse[T](jsonNode)
} catch {
case e: Exception =>
println(s"Json parsing error $e trying to parse response $response with body " + response.contentString)
throw e
}
}
}
| nkhuyu/finatra | http/src/test/scala/com/twitter/finatra/http/test/EmbeddedHttpServer.scala | Scala | apache-2.0 | 17,196 |
package huhong.scala.common.test
import java.io.{StringReader, StringWriter}
import java.util
import java.util.Date
import huhong.scala.common._
import huhong.scala.common.dao.impl.CommonDao
import huhong.scala.common.lucene.analysis.CharAnalyzer
import huhong.scala.common.test.dao.{HotelDao, UserDao}
import huhong.scala.common.test.domain.Hotel
import huhong.scala.test.domain.User
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute
import org.springframework.context.support.ClassPathXmlApplicationContext
import scala.beans.BeanProperty
import scala.concurrent.ExecutionContext
import java.util.{List => JList}
import java.util.{Map => JMap}
/**
* Created by huhong on 14/12/22.
*/
object Main extends App {
val str = "倩ζ°δΈι132abc@#$+-)(*&^.,/"
val analyzer = new CharAnalyzer
val ts = analyzer.tokenStream("test", new StringReader(str))
val term = ts.addAttribute(classOf[CharTermAttribute])
ts.reset()
while (ts.incrementToken()) {
println(term.toString)
}
ts.end()
ts.close()
import huhong.scala.common.hibernate.query._
val ctx = new ClassPathXmlApplicationContext("beans.xml")
val hd = ctx.getBean(classOf[HotelDao])
@querytables(tables = Array(new querytable(value = "User", alias = "u")))
@orderby(value = "order by u.createDate desc")
@sort(value = "address")
class UserQuery extends OptionFieldQuery with Serializable {
@query_field(value = "accout_name", op = "like", tablename = "u")
var username: Option[String] = None
//@where("u.username=?")
@query_field(value = "address", op = "=", tablename = "u")
var password: Option[String] = None
}
import scala.reflect.runtime.universe._
import scala.reflect.api._
import scala.reflect.runtime._
import huhong.scala.common.json._
val userQuery = new UserQuery
userQuery.password = Some("123456")
userQuery.username = Some("%huhong%")
println(userQuery.toActualHql())
println(userQuery.toParams())
println(userQuery.toCountHQL())
println(userQuery.toIndexQuery(hd.indexQueryBuilder[Hotel]))
println(userQuery.toIndexSort())
// val ret = userQuery.getTypeTag(userQuery).tpe.members.filter(_.typeSignature <:< typeOf[Option[_]])
// val mirror = runtimeMirror(this.getClass.getClassLoader)
// val instanceMirror = mirror.reflect(userQuery)
// ret.foreach(f => {
//
// val found = f.annotations.find(_.tpe =:= typeOf[tablename])
//
// if (found.isDefined)
// println(found.get.javaArgs.map { case (name, value) =>
// name.toString -> value
// }.find(_._1.equals("value")).get._2.toString)
//
// })
//
// println(ret)
// val foundAnno = mirror.classSymbol(classOf[UserQuery]).annotations.find(_.tpe.typeConstructor <:< typeOf[querytables])
//
// println(foundAnno)
//
//
//
// userQuery.tables.toJson(System.out)
}
| wuxihuhong/scala.common | src/test/scala/huhong/scala/common/test/Main.scala | Scala | apache-2.0 | 2,879 |
package org.crudible.core.model
trait ModelWithIdentity {
def identify(): String;
} | rehei/crudible | crudible-core/src/main/scala/org/crudible/core/model/ModelWithIdentity.scala | Scala | apache-2.0 | 92 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package api
import akka.NotUsed
import com.lightbend.lagom.scaladsl.api._
import com.lightbend.lagom.scaladsl.api.transport.Method
trait FooService extends Service {
def foo: ServiceCall[NotUsed, String]
override def descriptor = {
import Service._
named("foo-service")
.withCalls(
restCall(Method.GET, "/foo", foo)
)
.withAutoAcl(true)
}
}
| rcavalcanti/lagom | dev/sbt-plugin/src/sbt-test/sbt-plugin/service-discovery/a/api/src/main/scala/api/FooService.scala | Scala | apache-2.0 | 463 |
/*******************************************************************************
* (C) Copyright 2015 Haifeng Li
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package smile.validation
import smile.classification.{SoftClassifier, Classifier}
import smile.regression.Regression
import smile.math.Math
import smile.util._
/** Model validation.
*
* @author Haifeng Li
*/
trait Operators {
/** Test a generic classifier.
* The accuracy will be measured and printed out on standard output.
*
* @param x training data.
* @param y training labels.
* @param testx test data.
* @param testy test data labels.
* @param parTest Parallel test if true.
* @param trainer a code block to return a classifier trained on the given data.
* @tparam T the type of training and test data.
* @return the trained classifier.
*/
def test[T, C <: Classifier[T]](x: Array[T], y: Array[Int], testx: Array[T], testy: Array[Int], parTest: Boolean = true)(trainer: => (Array[T], Array[Int]) => C): C = {
println("training...")
val classifier = time {
trainer(x, y)
}
println("testing...")
val pred = time {
if (parTest)
testx.par.map(classifier.predict(_)).toArray
else
testx.map(classifier.predict(_))
}
println("Accuracy = %.2f%%" format (100.0 * new Accuracy().measure(testy, pred)))
classifier
}
/** Test a binary classifier.
* The accuracy, sensitivity, specificity, precision, F-1 score, F-2 score, and F-0.5 score will be measured
* and printed out on standard output.
*
* @param x training data.
* @param y training labels.
* @param testx test data.
* @param testy test data labels.
* @param parTest Parallel test if true.
* @param trainer a code block to return a binary classifier trained on the given data.
* @tparam T the type of training and test data.
* @return the trained classifier.
*/
def test2[T, C <: Classifier[T]](x: Array[T], y: Array[Int], testx: Array[T], testy: Array[Int], parTest: Boolean = true)(trainer: => (Array[T], Array[Int]) => C): C = {
println("training...")
val classifier = time {
trainer(x, y)
}
println("testing...")
val pred = time {
if (parTest)
testx.par.map(classifier.predict(_)).toArray
else
testx.map(classifier.predict(_))
}
println("Accuracy = %.2f%%" format (100.0 * new Accuracy().measure(testy, pred)))
println("Sensitivity/Recall = %.2f%%" format (100.0 * new Sensitivity().measure(testy, pred)))
println("Specificity = %.2f%%" format (100.0 * new Specificity().measure(testy, pred)))
println("Precision = %.2f%%" format (100.0 * new Precision().measure(testy, pred)))
println("F1-Score = %.2f%%" format (100.0 * new FMeasure().measure(testy, pred)))
println("F2-Score = %.2f%%" format (100.0 * new FMeasure(2).measure(testy, pred)))
println("F0.5-Score = %.2f%%" format (100.0 * new FMeasure(0.5).measure(testy, pred)))
classifier
}
/** Test a binary soft classifier.
* The accuracy, sensitivity, specificity, precision, F-1 score, F-2 score, F-0.5 score, and AUC will be measured
* and printed out on standard output.
*
* @param x training data.
* @param y training labels.
* @param testx test data.
* @param testy test data labels.
* @param parTest Parallel test if true.
* @param trainer a code block to return a binary classifier trained on the given data.
* @tparam T the type of training and test data.
* @return the trained classifier.
*/
def test2soft[T, C <: SoftClassifier[T]](x: Array[T], y: Array[Int], testx: Array[T], testy: Array[Int], parTest: Boolean = true)(trainer: => (Array[T], Array[Int]) => C): C = {
println("training...")
val classifier = time {
trainer(x, y)
}
println("testing...")
val results = time {
if (parTest)
testx.par.map { xi =>
val posteriori = Array(0.0, 0.0)
val yi = classifier.predict(xi, posteriori)
(yi, posteriori(1))
}.toArray
else {
val posteriori = Array(0.0, 0.0)
testx.map { xi =>
val yi = classifier.predict(xi, posteriori)
(yi, posteriori(1))
}
}
}
val (pred, prob) = results.unzip
println("Accuracy = %.2f%%" format (100.0 * new Accuracy().measure(testy, pred)))
println("Sensitivity/Recall = %.2f%%" format (100.0 * new Sensitivity().measure(testy, pred)))
println("Specificity = %.2f%%" format (100.0 * new Specificity().measure(testy, pred)))
println("Precision = %.2f%%" format (100.0 * new Precision().measure(testy, pred)))
println("F1-Score = %.2f%%" format (100.0 * new FMeasure().measure(testy, pred)))
println("F2-Score = %.2f%%" format (100.0 * new FMeasure(2).measure(testy, pred)))
println("F0.5-Score = %.2f%%" format (100.0 * new FMeasure(0.5).measure(testy, pred)))
println("AUC = %.2f%%" format (100.0 * AUC.measure(testy, prob)))
classifier
}
private def measuresOrAccuracy(measures: Seq[ClassificationMeasure]): Seq[ClassificationMeasure] = {
if (measures.isEmpty) Seq(new Accuracy) else measures
}
private def measuresOrRMSE(measures: Seq[RegressionMeasure]): Seq[RegressionMeasure] = {
if (measures.isEmpty) Seq(new RMSE) else measures
}
/** Leave-one-out cross validation on a generic classifier. LOOCV uses a single observation
* from the original sample as the validation data, and the remaining
* observations as the training data. This is repeated such that each
* observation in the sample is used once as the validation data. This is
* the same as a K-fold cross-validation with K being equal to the number of
* observations in the original sample. Leave-one-out cross-validation is
* usually very expensive from a computational point of view because of the
* large number of times the training process is repeated.
*
* @param x data samples.
* @param y sample labels.
* @param measures validation measures such as accuracy, specificity, etc.
* @param trainer a code block to return a classifier trained on the given data.
* @return measure results.
*/
def loocv[T <: Object](x: Array[T], y: Array[Int], measures: ClassificationMeasure*)(trainer: => (Array[T], Array[Int]) => Classifier[T]): Array[Double] = {
val n = x.length
val predictions = new Array[Int](n)
val split = new LOOCV(n)
for (i <- 0 until n) {
print(s"loocv ${i+1}...")
val trainx = Math.slice[T](x, split.train(i))
val trainy = Math.slice(y, split.train(i))
val model = trainer(trainx, trainy)
predictions(split.test(i)) = model.predict(x(split.test(i)))
}
measuresOrAccuracy(measures).map { measure =>
val result = measure.measure(y, predictions)
println(f"$measure%s: ${100*result}%.2f%%")
result
}.toArray
}
/** Leave-one-out cross validation on a generic regression model.
*
* @param x data samples.
* @param y response variable.
* @param measures validation measures such as MSE, AbsoluteDeviation, etc.
* @param trainer a code block to return a regression model trained on the given data.
* @return measure results.
*/
def loocv[T <: Object](x: Array[T], y: Array[Double], measures: RegressionMeasure*)(trainer: => (Array[T], Array[Double]) => Regression[T]): Array[Double] = {
val n = x.length
val predictions = new Array[Double](n)
val split = new LOOCV(n)
for (i <- 0 until n) {
print(s"loocv ${i+1}...")
val trainx = Math.slice[T](x, split.train(i))
val trainy = Math.slice(y, split.train(i))
val model = trainer(trainx, trainy)
predictions(split.test(i)) = model.predict(x(split.test(i)))
}
measuresOrRMSE(measures).map { measure =>
val result = measure.measure(y, predictions)
println(f"$measure%s: $result%.4f")
result
}.toArray
}
/** Cross validation on a generic classifier.
* Cross-validation is a technique for assessing how the results of a
* statistical analysis will generalize to an independent data set.
* It is mainly used in settings where the goal is prediction, and one
* wants to estimate how accurately a predictive model will perform in
* practice. One round of cross-validation involves partitioning a sample
* of data into complementary subsets, performing the analysis on one subset
* (called the training set), and validating the analysis on the other subset
* (called the validation set or testing set). To reduce variability, multiple
* rounds of cross-validation are performed using different partitions, and the
* validation results are averaged over the rounds.
*
* @param x data samples.
* @param y sample labels.
* @param k k-fold cross validation.
* @param measures validation measures such as accuracy, specificity, etc.
* @param trainer a code block to return a classifier trained on the given data.
* @return measure results.
*/
def cv[T <: Object](x: Array[T], y: Array[Int], k: Int, measures: ClassificationMeasure*)(trainer: => (Array[T], Array[Int]) => Classifier[T]): Array[Double] = {
val n = x.length
val predictions = new Array[Int](n)
val split = new CrossValidation(n, k)
for (i <- 0 until k) {
print(s"cv ${i+1}...")
val trainx = Math.slice[T](x, split.train(i))
val trainy = Math.slice(y, split.train(i))
val model = trainer(trainx, trainy)
split.test(i).foreach { j =>
predictions(j) = model.predict(x(j))
}
}
measuresOrAccuracy(measures).map { measure =>
val result = measure.measure(y, predictions)
println(f"$measure%s: ${100*result}%.2f%%")
result
}.toArray
}
/** Cross validation on a generic regression model.
*
* @param x data samples.
* @param y response variable.
* @param k k-fold cross validation.
* @param measures validation measures such as MSE, AbsoluteDeviation, etc.
* @param trainer a code block to return a regression model trained on the given data.
* @return measure results.
*/
def cv[T <: Object](x: Array[T], y: Array[Double], k: Int, measures: RegressionMeasure*)(trainer: => (Array[T], Array[Double]) => Regression[T]): Array[Double] = {
val n = x.length
val predictions = new Array[Double](n)
val split = new CrossValidation(n, k)
for (i <- 0 until k) {
print(s"cv ${i+1}...")
val trainx = Math.slice[T](x, split.train(i))
val trainy = Math.slice(y, split.train(i))
val model = trainer(trainx, trainy)
split.test(i).foreach { j =>
predictions(j) = model.predict(x(j))
}
}
measuresOrRMSE(measures).map { measure =>
val result = measure.measure(y, predictions)
println(f"$measure%s: $result%.4f")
result
}.toArray
}
/** Bootstrap validation on a generic classifier.
* The bootstrap is a general tool for assessing statistical accuracy. The basic
* idea is to randomly draw datasets with replacement from the training data,
* each sample the same size as the original training set. This is done many
* times (say k = 100), producing k bootstrap datasets. Then we refit the model
* to each of the bootstrap datasets and examine the behavior of the fits over
* the k replications.
*
* @param x data samples.
* @param y sample labels.
* @param k k-round bootstrap estimation.
* @param measures validation measures such as accuracy, specificity, etc.
* @param trainer a code block to return a classifier trained on the given data.
* @return measure results.
*/
def bootstrap[T <: Object](x: Array[T], y: Array[Int], k: Int, measures: ClassificationMeasure*)(trainer: => (Array[T], Array[Int]) => Classifier[T]): Array[Double] = {
val split = new Bootstrap(x.length, k)
val m = measuresOrAccuracy(measures)
val results = (0 until k).map { i =>
print(s"bootstrap ${i+1}...")
val trainx = Math.slice[T](x, split.train(i))
val trainy = Math.slice(y, split.train(i))
val model = trainer(trainx, trainy)
val nt = split.test(i).length
val truth = new Array[Int](nt)
val predictions = new Array[Int](nt)
for (j <- 0 until nt) {
val l = split.test(i)(j)
truth(j) = y(l)
predictions(j) = model.predict(x(l))
}
m.map { measure =>
val result = measure.measure(truth, predictions)
println(f"$measure%s: ${100*result}%.2f%%")
result
}.toArray
}.toArray
val avg = Math.colMean(results)
println("Bootstrap average:")
for (i <- 0 until avg.length) {
println(f"${m(i)}%s: ${100*avg(i)}%.2f%%")
}
avg
}
/** Bootstrap validation on a generic regression model.
*
* @param x data samples.
* @param y response variable.
* @param k k-round bootstrap estimation.
* @param measures validation measures such as MSE, AbsoluteDeviation, etc.
* @param trainer a code block to return a regression model trained on the given data.
* @return measure results.
*/
def bootstrap[T <: Object](x: Array[T], y: Array[Double], k: Int, measures: RegressionMeasure*)(trainer: => (Array[T], Array[Double]) => Regression[T]): Array[Double] = {
val split = new Bootstrap(x.length, k)
val m = measuresOrRMSE(measures)
val results = (0 until k).map { i =>
print(s"bootstrap ${i+1}...")
val trainx = Math.slice[T](x, split.train(i))
val trainy = Math.slice(y, split.train(i))
val model = trainer(trainx, trainy)
val nt = split.test(i).length
val truth = new Array[Double](nt)
val predictions = new Array[Double](nt)
for (j <- 0 until nt) {
val l = split.test(i)(j)
truth(j) = y(l)
predictions(j) = model.predict(x(l))
}
m.map { measure =>
val result = measure.measure(truth, predictions)
println(f"$measure%s: $result%.4f")
result
}.toArray
}.toArray
val avg = Math.colMean(results)
println("Bootstrap average:")
for (i <- 0 until avg.length) {
println(f"${m(i)}%s: ${avg(i)}%.4f")
}
avg
}
} | arehart13/smile | scala/src/main/scala/smile/validation/Operators.scala | Scala | apache-2.0 | 14,903 |
package org.dele.misc.bookFastDS
import org.apache.spark.sql.SparkSession
/**
* Created by dele on 2017-03-02.
*/
object Ch9ReadWrite extends App {
val spark = SparkSession.builder()
.master("local[*]")
.appName("Read Write tests")
.config("spark.logConf", "true")
.config("spark.logLevel", "ERROR")
.getOrCreate()
val cars = spark.read.option("header", "true").csv("res/data/cars.csv")
println(cars.count())
cars.printSchema()
cars.show(5)
cars.write.mode("overwrite").option("header", "true").csv("res/data/cars-out.csv")
cars.write.mode("overwrite").partitionBy("year").parquet("res/data/cars-out-parquet")
spark.close()
}
| new2scala/text-util | misc/src/test/scala/org/dele/misc/bookFastDS/Ch9ReadWrite.scala | Scala | apache-2.0 | 675 |
package scife
package enumeration
package benchmarks
package parallel
package test
import dependent._
import memoization._
import scife.{ enumeration => e }
import e.parallel._
import scife.util._
import scife.util.logging._
import java.util.concurrent._
import benchmarks._
import org.scalatest._
import org.scalatest.prop._
import org.scalatest.matchers._
import scala.language.existentials
import scife.enumeration.memoization.scope.AccumulatingScope
class BinarySearchTree extends FunSuite with HasLogger {
import structures._
import BSTrees._
type EnumType = Depend[(Int, Range), Tree]
val count = new java.util.concurrent.atomic.AtomicInteger(0)
var beg = System.currentTimeMillis()
var end = System.currentTimeMillis()
test("measure code") {
val scope = new AccumulatingScope
this.tdEnum = constructEnumerator(scope)
val size = 14
for (size <- 1 to size) {
val enum = tdEnum.getEnum((size, 1 to size))
for (i <- 0 until enum.size) enum(i)
}
scope.clear
// for (numOfThreads <- 2 to Runtime.getRuntime.availableProcessors) {
val numOfThreads = 2
this.size = size
initExecutor(numOfThreads)
val runnerList = runners(numOfThreads)
beg = System.currentTimeMillis()
exec.invokeAll(runnerList)
end = System.currentTimeMillis()
exec.shutdown()
info(s"size=$size, threads=$numOfThreads, time=${end - beg}")
scope.clear
// }
// println("EEEEEEEEEEEEEEEEEEND")
// System.out.flush
}
private[this] var size: Int = _
@volatile
private[this] var tdEnum: EnumType = _
private[this] var exec: ExecutorService = _ //Executors.newFixedThreadPool(numberOfThreads)
def initExecutor(numberOfThreads: Int) = exec = Executors.newFixedThreadPool(numberOfThreads)
var i = 0
def runners(numberOfThreads: Int): java.util.Collection[Callable[Object]] =
{
val al = new java.util.ArrayList[Callable[Object]]()
i = 0
while (i < numberOfThreads) {
al add Executors.callable(new Runnable {
val myInd = i
val increment = numberOfThreads
def run = {
// try {
// var myInd = Thread.currentThread().getId.toInt
info(s"my id is $myInd, my incrmeent $increment, tdEnum=${tdEnum.hashCode()}")
val s = size
// for (s <- 1 to size) {
info(s"getting enum")
val enum = tdEnum.getEnum((s, 1 to s))
info(s"got enum")
var ind = myInd
while (ind < enum.size) {
info(s"running, myInd is $myInd")
enum(ind)
ind += increment
// }
}
// } catch {
// case t: Throwable =>
// println(s"Thrown $t:${t.getStackTrace.mkString("\n")} at $i")
// }
// val cnt = count.incrementAndGet()
// if (cnt == numberOfThreads-1) {
// end = System.currentTimeMillis()
// println(s"size=$size, threads=$numberOfThreads, time=${end-beg}")
// throw new RuntimeException
// } else {
// while (true) {}
// }
}
})
i += 1
}
// println(s"list has ${al.size}")
if (al.size != numberOfThreads) throw new RuntimeException("al.size")
al
}
def tearDown(i: Int, tdEnum: EnumType, memScope: e.memoization.MemoizationScope): Unit = {
exec.shutdown()
}
val enumeratorFunction =
(self: Depend[(Int, Range), Tree], pair: (Int, Range)) => {
val (size, range) = pair
if (size <= 0) e.Singleton(Leaf)
else if (size == 1)
e.WrapArray(range map { v => Node(Leaf, v, Leaf) })
else {
val roots = e.Enum(range)
val leftSizes = e.Enum(0 until size)
val rootLeftSizePairs = e.Product(leftSizes, roots)
val leftTrees: Depend[(Int, Int), Tree] = InMap(self, { (par: (Int, Int)) =>
val (leftSize, median) = par
(leftSize, range.start to (median - 1))
})
val rightTrees: Depend[(Int, Int), Tree] =
InMap(self, { (par: (Int, Int)) =>
val (leftSize, median) = par
(size - leftSize - 1, (median + 1) to range.end)
})
val leftRightPairs: Depend[(Int, Int), (Tree, Tree)] =
Product(leftTrees, rightTrees)
memoization.Chain.breadthSearchPar[(Int, Int), (Tree, Tree), Node](rootLeftSizePairs, leftRightPairs,
(p1: (Int, Int), p2: (Tree, Tree)) => {
val ((leftSize, currRoot), (leftTree, rightTree)) = (p1, p2)
Node(leftTree, currRoot, rightTree)
})
}
}
def constructEnumerator(implicit ms: e.memoization.MemoizationScope) = {
// TODO the opt one freezes the program
val enum = Depend.memoizedConcurrentNoScope(enumeratorFunction)
// val enum = Depend.memoizedConcurrentOptNoScope(enumeratorFunction)
ms.add(enum)
enum
}
} | kaptoxic/SciFe | src/bench/test/scala/scife/enumeration/parallel/test/BinarySearchTree.scala | Scala | gpl-2.0 | 5,181 |
package com.taig.tmpltr.reflect
import Reflection.mirror
import scala.reflect.runtime.universe._
case class Companion[C]( companion: ModuleSymbol ) extends Reflection
{
def this( `class`: java.lang.Class[_] ) = this( mirror.moduleSymbol( `class` ) )
def this( name: String ) = this( mirror.staticModule( name ) )
def getAccompanyingClass: Class[C] = Class[C]( companion.fullName )
}
object Companion
{
def apply[C]( `class`: java.lang.Class[_] ): Companion[C] = new Companion[C]( `class` )
def apply[C]( name: String ): Companion[C] = new Companion[C]( name )
} | Taig/Play-Tmpltr | app/com/taig/tmpltr/reflect/Companion.scala | Scala | mit | 573 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 GatlingCql developers
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package io.github.gatling.cql.request
import akka.actor.ActorSystem
import com.datastax.driver.core.Statement
import com.google.common.util.concurrent.{Futures, MoreExecutors}
import io.gatling.commons.stats.KO
import io.gatling.commons.util.TimeHelper.nowMillis
import io.gatling.commons.validation.Validation
import io.gatling.core.action.{Action, ExitableAction}
import io.gatling.core.session.Session
import io.gatling.core.stats.StatsEngine
import io.gatling.core.stats.message.ResponseTimings
import io.github.gatling.cql.response.CqlResponseHandler
class CqlRequestAction(val name: String, val next: Action, system: ActorSystem, val statsEngine: StatsEngine, protocol: CqlProtocol, attr: CqlAttributes)
extends ExitableAction {
def execute(session: Session): Unit = {
val stmt: Validation[Statement] = attr.statement(session)
stmt.onFailure(err => {
statsEngine.logResponse(session, name, ResponseTimings(nowMillis, nowMillis), KO, None, Some("Error setting up prepared statement: " + err), Nil)
next ! session.markAsFailed
})
stmt.onSuccess({ stmt =>
stmt.setConsistencyLevel(attr.cl)
stmt.setSerialConsistencyLevel(attr.serialCl)
val start = nowMillis
val result = protocol.session.executeAsync(stmt)
Futures.addCallback(result, new CqlResponseHandler(next, session, system, statsEngine, start, attr.tag, stmt, attr.checks), MoreExecutors.sameThreadExecutor)
})
}
}
| Mishail/GatlingCql | src/main/scala/io/github/gatling/cql/request/CqlRequestAction.scala | Scala | mit | 2,600 |
/***
* Excerpted from "Seven Concurrency Models in Seven Weeks",
* published by The Pragmatic Bookshelf.
* Copyrights apply to this code. It may not be used to create training material,
* courses, books, articles, and the like. Contact us if you are in doubt.
* We make no guarantees that this code is fit for any purpose.
* Visit http://www.pragmaticprogrammer.com/titles/pb7con for more book information.
***/
package com.paulbutcher
import akka.actor._
import akka.cluster._
import akka.cluster.ClusterEvent._
import scopt.mutable.OptionParser
object HelloCluster extends App {
val opts = parseCommandline
class Options {
var localHost = "127.0.0.1"
var localPort = "2552"
var clusterHost = "127.0.0.1"
var clusterPort = 2552
}
def parseCommandline = new Options {
val optionParser = new OptionParser {
opt("h", "local-host", "hostname", { localHost = _ })
opt("p", "local-port", "port", { localPort = _ })
opt("H", "cluster-host", "hostname", { clusterHost = _ })
intOpt("P", "cluster-port", "port", { clusterPort = _ })
}
if (!optionParser.parse(args))
sys.exit(0)
}
System.setProperty("akka.remote.netty.hostname", opts.localHost)
System.setProperty("akka.remote.netty.port", opts.localPort)
val system = ActorSystem("ClusterTest")
val testActor = system.actorOf(Props[TestActor], "test-actor")
Cluster(system).subscribe(testActor, classOf[MemberEvent])
Cluster(system).join(
Address("akka", "ClusterTest", opts.clusterHost, opts.clusterPort))
}
| XBOOS/concurrency | code/ActorsScala/HelloCluster/src/main/scala/com/paulbutcher/HelloCluster.scala | Scala | gpl-2.0 | 1,553 |
/*
* Copyright 2013 MaurΓcio Linhares
*
* MaurΓcio Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.column
object DoubleEncoderDecoder extends ColumnEncoderDecoder {
override def decode(value: String): Double = value.toDouble
}
| outbrain/postgresql-async | db-async-common/src/main/scala/com/github/mauricio/async/db/column/DoubleEncoderDecoder.scala | Scala | apache-2.0 | 807 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.processor.converter
import org.orbeon.oxf.util.NetUtils
import org.orbeon.oxf.processor.impl.CacheableTransformerOutputImpl
import org.orbeon.oxf.pipeline.api.PipelineContext
import org.orbeon.oxf.processor.{ProcessorImpl, ProcessorOutput}
import org.orbeon.oxf.externalcontext.ServletURLRewriter
import org.orbeon.oxf.rewrite.Rewrite
import org.orbeon.oxf.xml.XMLConstants.XHTML_NAMESPACE_URI
import org.orbeon.oxf.xml.XMLReceiver
/**
* This rewriter always rewrites using ServletURLRewriter.
*/
class XHTMLServletRewrite extends XHTMLRewrite {
override def createOutput(name: String): ProcessorOutput =
addOutput(name, new CacheableTransformerOutputImpl(this, name) {
def readImpl(pipelineContext: PipelineContext, xmlReceiver: XMLReceiver): Unit = {
val externalContext = NetUtils.getExternalContext
val rewriter =
Rewrite.getRewriteXMLReceiver(
rewriter = new ServletURLRewriter(externalContext.getRequest),
xmlReceiver = xmlReceiver,
fragment = false,
rewriteURI = XHTML_NAMESPACE_URI
)
readInputAsSAX(pipelineContext, ProcessorImpl.INPUT_DATA, rewriter)
}
})
} | orbeon/orbeon-forms | src/main/scala/org/orbeon/oxf/processor/converter/XHTMLServletRewrite.scala | Scala | lgpl-2.1 | 1,872 |
package leo.datastructures.impl
import java.util.NoSuchElementException
import leo.datastructures.{ClauseProxy, MultiPriorityQueue, Prettier, Signature}
import scala.collection.mutable
/**
* Created by lex on 13.06.16.
*/
class MultiPriorityQueueImpl[A] extends MultiPriorityQueue[A] {
private final class ObjectProxy(x: A) {
protected[impl] val eternalElem : A = x
private var elem: A = x
def get: A = elem
def clear(): Unit = {elem = null.asInstanceOf[A]}
override def equals(o: Any) = o match {
case x:ObjectProxy @unchecked => x.elem == elem
case _ => false
}
override def hashCode: Int = if (elem == null) 0 else elem.hashCode()
}
private final def toProxyOrdering(ord: Ordering[A]): Ordering[ObjectProxy] = {
new Ordering[ObjectProxy] {
@inline final def compare(x: ObjectProxy, y: ObjectProxy): OrderingKey = {
ord.compare(x.eternalElem, y.eternalElem)
}
}
}
private var priorityQueues: Seq[mutable.PriorityQueue[ObjectProxy]] = Vector.empty
private var priorities0: Seq[Ordering[A]] = Vector.empty
private var initialized = false
private var deletedObjects: Set[A] = Set.empty
def insert(x: A): Unit = {
if (!initialized) return
if (deletedObjects.contains(x))
deletedObjects = deletedObjects - x
else {
val queues = priorityQueues.size
var i = 0
val op = new ObjectProxy(x)
while (i < queues) {
priorityQueues(i).enqueue(op)
i = i+1
}
}
}
def remove(x: A): Unit = {
if (!initialized) return
deletedObjects = deletedObjects + x
}
def addPriority(p: Ordering[A]): OrderingKey = {
val newPrioQueue = mutable.PriorityQueue.empty(toProxyOrdering(p))
val key = priorityQueues.size
if (!initialized) initialized = true
else newPrioQueue ++= priorityQueues.head
priorityQueues = priorityQueues :+ newPrioQueue
priorities0 = priorities0 :+ p
key
}
def priorityCount: Int = priorityQueues.size
def priority(key: OrderingKey): Ordering[A] = priorities0(key)
def dequeue(k: OrderingKey): A = {
if (priorityQueues.size-1 < k) throw new NoSuchElementException
else {
val result = priorityQueues(k).dequeue()
if (result.get == null) dequeue(k)
else {
val resultElement = result.get
result.clear()
if (deletedObjects.contains(resultElement)) {
deletedObjects = deletedObjects - resultElement
dequeue(k)
}
else resultElement
}
}
}
def size: Int = {
if (!initialized) throw new IllegalStateException
else {
priorityQueues.head.size // FIXME: ALso possibily counts the removed ones
}
}
def isEmpty: Boolean = {
if (!initialized) throw new IllegalStateException
else {
val pq = priorityQueues.head
if (pq.isEmpty) true
else {
val result = pq.head
if (result.get == null) {
priorityQueues.head.dequeue()
isEmpty
} else {
val elem = result.get
if (deletedObjects.contains(elem)) {
priorityQueues.head.dequeue()
isEmpty
} else false
}
}
}
}
def head(k: OrderingKey): A = {
if (priorityQueues.size-1 < k) throw new NoSuchElementException
else {
val result = priorityQueues(k).head
if (result.get == null) {
priorityQueues(k).dequeue()
head(k)
} else {
val elem = result.get
if (deletedObjects.contains(elem)) {
priorityQueues(k).dequeue()
head(k)
} else elem
}
}
}
def toSet: Set[A] = if (priorityQueues.isEmpty) Set() else priorityQueues.head.view.filterNot(elem => elem.get == null || deletedObjects.contains(elem.get)).map(_.get).toSet
def pretty : String = {
val sb = new StringBuilder
sb.append("Multipriority Queue:")
var qC = 0
val it = priorityQueues.iterator
while(it.hasNext){
val q = it.next()
sb.append(s"\n > Queue $qC: ")
qC += 1
val itE = q.clone().dequeueAll.iterator
while(itE.hasNext){
val e = itE.next().get
if(e != null && !deletedObjects.contains(e)){
sb.append(if(e.isInstanceOf[ClauseProxy]) s"[${e.asInstanceOf[ClauseProxy].id}]" else e.toString)
if(itE.hasNext) {sb.append(", ")}
}
}
}
sb.toString()
}
}
| lex-lex/Leo-III | src/main/scala/leo/datastructures/impl/MultiPriorityQueueImpl.scala | Scala | bsd-3-clause | 4,419 |
package lila.relay
package command
import org.joda.time.DateTime
import scala.util.matching.Regex
import scala.util.{ Try, Success, Failure }
sealed trait Command extends FICS.Stashable {
type Result
def str: String
def parse(lines: List[String]): Option[Result]
}
case object ListTourney extends Command {
type Result = List[Tourney]
val str = "tell relay listtourney"
def parse(lines: List[String]) = {
lines.exists(_ contains "The following tournaments are currently in progress:")
} option {
lines.collect {
case Regexp(id, name, status) => parseIntOption(id) map {
Tourney(_, name.trim, status match {
case "Round Started" => Relay.Status.Started
case "Round Over" => Relay.Status.Finished
case _ => Relay.Status.Unknown
})
}
}.flatten
}
case class Tourney(ficsId: Int, name: String, status: Relay.Status)
private val Regexp = """^:(\\d+)\\s+(.+)\\s{2,}(.+)$""".r
}
case class ListGames(id: Int) extends Command {
type Result = ListGames.Result
val str = s"tell relay listgame $id"
def parse(lines: List[String]) =
if (lines.exists(_ contains "There is no tournament with id")) Nil.some
else lines.exists(_ contains "There are ") option {
lines.collect {
case ListGames.Regexp(ficsId, white, black) => parseIntOption(ficsId) map {
ListGames.Game(_, white, black)
}
}.flatten
}
}
case object ListGames {
type Result = List[Game]
case class Game(ficsId: Int, white: String, black: String)
private val Regexp = """(?i)^:(\\d+)\\s+([a-z0-9]+)\\s+([a-z0-9]+).+$""".r
}
case class GetTime(player: String) extends Command {
type Result = GetTime.Result
import GetTime._
val str = s"time $player"
def parse(lines: List[String]) =
lines.mkString("\\n") match {
case Regexp(name, white, black) =>
if (name == player) toTenths(white) |@| toTenths(black) apply Times.apply match {
case Some(data) => Success(data).some
case None => Failure(new Exception(s"Invalid times $lines")).some
}
else Failure(new Exception(s"Got times for the wrong player $player != $name")).some
case _ => none
}
}
object GetTime {
type Result = Try[Times]
case class Times(white: Int, black: Int)
private val Regexp =
("""(?s)Game \\d+: (\\w+).*White Clock : ([0-9:\\.]+).*Black Clock : ([0-9:\\.]+)""").r.unanchored
// White Clock : 11:01.033
// White Clock : 1:31:00.000
def toTenths(clock: String): Option[Int] =
clock.replace(".", ":").split(":").flatMap(parseIntOption) match {
case Array(seconds, millis) => Some(seconds * 10 + millis / 100)
case Array(minutes, seconds, millis) => Some((60 * minutes + seconds) * 10 + millis / 100)
case Array(hours, minutes, seconds, millis) => Some((60 * 60 * hours + 60 * minutes + seconds) * 10 + millis / 100)
case _ =>
println(s"[relay] invalid clock $clock")
none
}
}
case class Moves(ficsId: Int) extends Command {
type Result = Moves.Result
val str = s"moves $ficsId"
def parse(lines: List[String]) = Moves.parse(ficsId, lines)
}
case object Moves {
type Result = Try[Game]
def parse(ficsId: Int, lines: List[String]) =
lines.find(_ contains s"Movelist for game $ficsId") map { firstLine =>
lines collectFirst {
case PlayersR(wt, wn, wr, bt, bn, br, date) => Game(
white = Player(wn, wt.some.filter(_.nonEmpty), parseIntOption(wr)),
black = Player(bn, bt.some.filter(_.nonEmpty), parseIntOption(br)),
pgn = MoveCommentR.replaceAllIn(
lines.dropWhile { l =>
!matches(MoveLineR, l)
}.takeWhile { l =>
matches(MoveLineR, l)
}.mkString(" ").trim,
""),
date = DateTime.now,
title = firstLine.trim)
} match {
case None => Failure(new Exception(s"Invalid moves data ${lines.headOption}"))
case Some(res) => Success(res)
}
}
case class Player(name: String, title: Option[String], rating: Option[Int]) {
def ficsName = s"${~title}$name"
def splitName = name.split("(?=\\\\p{Upper})") mkString " "
}
case class Game(white: Player, black: Player, pgn: String, date: DateTime, title: String)
private val MoveLineR = """^\\d+\\.(\\s+[^\\s]+){2,4}""".r
private val MoveCommentR = """\\([^\\)]+\\)""".r
private val TitleR = """(CM|NM|FM|IM|GM|WGM|WIM|WFM|)"""
private val NameR = """(\\w+)\\s\\((.+)\\)"""
private val PlayersR = (s"""^${TitleR}${NameR}\\\\svs\\\\.\\\\s${TitleR}${NameR}\\\\s---\\\\s(.+)$$""").r
private def matches(r: Regex, str: String) = r.pattern.matcher(str).matches
}
| pavelo65/lila | modules/relay/src/main/command.scala | Scala | mit | 4,720 |
package breeze.stats.distributions
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.scalatest._;
import org.scalatest.junit._;
import org.scalatest.prop._;
import org.scalacheck._;
import org.junit.runner.RunWith
import org.apache.commons.math3.random.MersenneTwister
@RunWith(classOf[JUnitRunner])
class ParetoTest extends FunSuite with Checkers with MomentsTestBase[Double] {
import Arbitrary.arbitrary
override val numSamples = 40000
def asDouble(x: Double) = x
def fromDouble(x: Double) = x
implicit def arbDistr: Arbitrary[Pareto] = Arbitrary {
for(location <- arbitrary[Double].map{x => math.abs(x) % 1000.0 + 1.1}; // Pareto pdf at 0 not defined when location == 1
scale <- arbitrary[Double].map {x => math.abs(x) % 8.0 + 4.0}) yield new Pareto(location,scale)(new RandBasis(new MersenneTwister(0)))
}
override type Distr = Pareto
}
| dlwh/breeze | math/src/test/scala/breeze/stats/distributions/ParetoTest.scala | Scala | apache-2.0 | 1,415 |
package edu.arizona.sista.twitter4food
import edu.arizona.sista.struct._
import Mixins._
import edu.arizona.sista.learning._
import scala.collection.immutable.IndexedSeq
import scala.collection.mutable.ArrayBuffer
import scala.collection.Set
/**
* Created by dfried on 2/2/14.
*/
trait NormalizationType
case object NoNorm extends NormalizationType
case object DatumNorm extends NormalizationType
case object FeatureNorm extends NormalizationType
abstract class FeatureSelector[L, F] {
def useFeature(f: F): Boolean
}
abstract class ThresholdScore[L, F](val data: Seq[Counter[F]],
val labels: Seq[L],
val numToRetain: Int,
val forceFeatures: Option[Set[F]] = None) extends FeatureSelector[L,F]{
assert(numToRetain >= 1)
val featureSet: Set[F] = data.map(_.keySet).reduce(_++_)
val labelSet: Set[L] = labels.toSet
val featureScores: Map[F, Double]
lazy val cutoff = if (numToRetain < featureScores.size)
featureScores.toSeq.sortBy(_._2).reverse(numToRetain - 1)._2
else
0
def useFeature(f: F): Boolean = {
(forceFeatures == None || forceFeatures.get.contains(f)) || (featureScores.contains(f) && featureScores(f) >= cutoff)
}
def apply(counter: Counter[F]): Counter[F] = counter.filter({ case (f, x) => useFeature(f) })
}
class PointwiseMutualInformation[L, F](data: Seq[Counter[F]],
labels: Seq[L],
numToRetain: Int,
forceFeatures: Option[Set[F]] = None) extends ThresholdScore[L, F](data,
labels, numToRetain, forceFeatures) {
val labelCounts: Map[L, Int] = labels.groupBy(identity).mapValues(_.size)
val N = data.size
assert(labels.size == N)
def pmi(f: F, l: L, countL: Int) = {
val hasF: Counter[F] => Boolean = (_.keySet.contains(f))
val countBoth = ((data zip labels).toStream filter {
case (datum, l1) => l == l1 && hasF(datum)
case _ => false
}).size
val countF = (data.toStream filter hasF).size
// println(s"f: $f, l: $l")
// println(s"countF: $countF, countL: $countL, countBoth: $countBoth")
val ratio = countBoth * N.toFloat / (countF * countL)
// println(s"prob ratio: $ratio ")
Math.log(ratio)
}
def averageMI(f: F) = {
// average mutal information
labelCounts.toSeq.map({
case (l, countL) => countL.toFloat / N * pmi(f, l, countL)
}).sum
}
lazy val featureScores = (for {
feature <- featureSet.toSeq
} yield (feature -> averageMI(feature))).toMap
}
class MutualInformation[L, F](data: Seq[Counter[F]],
labels: Seq[L],
numToRetain: Int,
forceFeatures: Option[Set[F]] = None) extends ThresholdScore[L, F](data, labels, numToRetain, forceFeatures) {
// aka information gain, in Yang's paper
def entropy[O](outcomes: Seq[O]) = {
val groupedOutcomes: Seq[Seq[O]] = outcomes.groupBy(identity).values.toSeq
val probs = groupedOutcomes.map(_.size.toFloat / outcomes.size).toSeq
-1 * probs.map(x => x * Math.log(x)).sum
}
def informationGain(f: F) = {
// mutual information
val groupedLabels = (data zip labels).groupBy({ case (datum, label) => datum.keySet.contains(f) }).mapValues(_
.map(_._2))
val (positiveLabels, negativeLabels) = (groupedLabels.get(true).getOrElse(List()),
groupedLabels.get(false).getOrElse(List()))
// calculate marginal probability of feature
val p_f: Float = positiveLabels.size.toFloat / data.size
val p_not_f: Float = negativeLabels.size.toFloat / data.size
assert(Math.abs(1 - (p_f + p_not_f)) < 1e-6, "probabilities do not sum to 1")
entropy(labels) - (p_f * entropy(positiveLabels) + p_not_f * entropy(negativeLabels))
}
lazy val featureScores = {
println("calculating feature scores")
val stuff = (for {
feature <- featureSet.toSeq
} yield (feature -> informationGain(feature))).toMap
println("done")
stuff
}
}
class CounterProcessor[A](val exampleCounters: Seq[Counter[A]],
val normalizationType: NormalizationType = NoNorm,
val thresholdPercentage: Option[Double] = None,
val thresholdValue: Option[Double] = None) extends Serializable{
assert(thresholdPercentage == None || thresholdValue == None)
val normalizer: (Counter[A] => Counter[A]) = normalizationType match {
case NoNorm => identity _
case DatumNorm => (counter => counter / counter.values.max)
case FeatureNorm => {
val maxByFeature: Counter[A] = exampleCounters.reduce(_.zipWith(Math.max)(_))
counter => (counter / Counter.withDefault(1.0)(maxByFeature)).filterValues(x => x != 0)
}
}
val threshold: (Counter[A] => Counter[A]) ={
thresholdPercentage match {
case None => thresholdValue match {
case None =>
identity _
case Some(k) => {
counter => counter.filterValues(_ >= k)
}
}
case Some(pct) => {
val totalCounts = exampleCounters.reduce(_+_)
val threshold = totalCounts.values.sum * pct
counter => {
val sstream = counter.sorted.toStream
val cumulative = sstream.tail.scanLeft(sstream.head) {
case ((_, s), (t, c)) => (t, s + c)
}
val newCounter = new Counter[A]
for ((key, count) <- cumulative.takeWhile { case (t, c) => c <= threshold })
newCounter.setCount(key, count)
newCounter
}
}
}
}
def apply(counter: Counter[A]): Counter[A] = normalizer(threshold(counter))
}
| VivianLuwenHuangfu/twitter4food | src/main/scala/edu/arizona/sista/twitter4food/Preprocessing.scala | Scala | apache-2.0 | 5,735 |
/*
* @author Daniel Strebel
*
* Copyright 2012 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.configuration
import akka.actor.ActorSystem
object ActorSystemRegistry {
var systems = Map[String, ActorSystem]()
def register(system: ActorSystem) {
synchronized {
systems += ((system.name, system))
}
}
def retrieve(name: String): Option[ActorSystem] = {
synchronized {
systems.get(name)
}
}
} | gmazlami/dcop-maxsum | src/main/scala/com/signalcollect/configuration/ActorSystemRegistry.scala | Scala | apache-2.0 | 1,014 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.rendering.actor.mixin
import vtk.vtkActor
/**
* This trait is only used as a flag to indicate that this actor is rendering images.
* It is used to prioritize the order of adding actors to a renderer.
*
* See the [[scalismo.ui.rendering.RendererPanel]] implementation for more details.
*/
trait IsImageActor extends vtkActor {}
| unibas-gravis/scalismo-ui | src/main/scala/scalismo/ui/rendering/actor/mixin/IsImageActor.scala | Scala | gpl-3.0 | 1,090 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.service.conversation
import com.waz.api
import com.waz.api.IConversation.{Access, AccessRole}
import com.waz.api.Message
import com.waz.api.NetworkMode.{OFFLINE, WIFI}
import com.waz.api.impl._
import com.waz.service.assets2._
import com.waz.content._
import com.waz.log.BasicLogging.LogTag.DerivedLogTag
import com.waz.log.LogSE._
import com.waz.model.ConversationData.{ConversationType, getAccessAndRoleForGroupConv}
import com.waz.model.GenericContent.{Location, MsgEdit}
import com.waz.model.UserData.ConnectionStatus
import com.waz.model._
import com.waz.model.sync.ReceiptType
import com.waz.service.AccountsService.InForeground
import com.waz.service.ZMessaging.currentBeDrift
import com.waz.service._
import com.waz.service.assets2.Asset.Video
import com.waz.service.conversation.ConversationsService.generateTempConversationId
import com.waz.service.messages.{MessagesContentUpdater, MessagesService}
import com.waz.service.tracking.TrackingService
import com.waz.sync.SyncServiceHandle
import com.waz.sync.client.{ConversationsClient, ErrorOr}
import com.waz.threading.{CancellableFuture, Threading}
import com.waz.utils._
import com.waz.utils.events.EventStream
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.implicitConversions
import scala.util.control.NonFatal
trait ConversationsUiService {
import ConversationsUiService._
def sendTextMessage(convId: ConvId, text: String, mentions: Seq[Mention] = Nil, exp: Option[Option[FiniteDuration]] = None): Future[Some[MessageData]]
def sendTextMessages(convs: Seq[ConvId], text: String, mentions: Seq[Mention] = Nil, exp: Option[FiniteDuration]): Future[Unit]
def sendReplyMessage(replyTo: MessageId, text: String, mentions: Seq[Mention] = Nil, exp: Option[Option[FiniteDuration]] = None): Future[Option[MessageData]]
def sendAssetMessage(convId: ConvId,
content: ContentForUpload,
confirmation: WifiWarningConfirmation = DefaultConfirmation,
exp: Option[Option[FiniteDuration]] = None): Future[Some[MessageData]]
def sendLocationMessage(convId: ConvId, l: api.MessageContent.Location): Future[Some[MessageData]] //TODO remove use of MessageContent.Location
def updateMessage(convId: ConvId, id: MessageId, text: String, mentions: Seq[Mention] = Nil): Future[Option[MessageData]]
def deleteMessage(convId: ConvId, id: MessageId): Future[Unit]
def recallMessage(convId: ConvId, id: MessageId): Future[Option[MessageData]]
def setConversationArchived(id: ConvId, archived: Boolean): Future[Option[ConversationData]]
def setConversationMuted(id: ConvId, muted: MuteSet): Future[Option[ConversationData]]
def setConversationName(id: ConvId, name: Name): Future[Option[ConversationData]]
def addConversationMembers(conv: ConvId, users: Set[UserId]): Future[Option[SyncId]]
def removeConversationMember(conv: ConvId, user: UserId): Future[Option[SyncId]]
def leaveConversation(conv: ConvId): Future[Unit]
def clearConversation(id: ConvId): Future[Option[ConversationData]]
def readReceiptSettings(convId: ConvId): Future[ReadReceiptSettings]
def setReceiptMode(id: ConvId, receiptMode: Int): Future[Option[ConversationData]]
def knock(id: ConvId): Future[Option[MessageData]]
def setLastRead(convId: ConvId, msg: MessageData): Future[Option[ConversationData]]
def setEphemeral(id: ConvId, expiration: Option[FiniteDuration]): Future[Unit]
def setEphemeralGlobal(id: ConvId, expiration: Option[FiniteDuration]): ErrorOr[Unit]
//conversation creation methods
def getOrCreateOneToOneConversation(other: UserId): Future[ConversationData]
def createGroupConversation(name: Option[Name] = None, members: Set[UserId] = Set.empty, teamOnly: Boolean = false, receiptMode: Int = 0): Future[(ConversationData, SyncId)]
def assetUploadCancelled : EventStream[Mime]
def assetUploadFailed : EventStream[ErrorResponse]
}
object ConversationsUiService {
type WifiWarningConfirmation = Long => Future[Boolean]
val DefaultConfirmation = (_: Long) => Future.successful(true)
val LargeAssetWarningThresholdInBytes = 3145728L // 3MiB
}
class ConversationsUiServiceImpl(selfUserId: UserId,
teamId: Option[TeamId],
assets: AssetService,
usersStorage: UsersStorage,
messages: MessagesService,
messagesStorage: MessagesStorage,
messagesContent: MessagesContentUpdater,
members: MembersStorage,
convsContent: ConversationsContentUpdater,
convStorage: ConversationStorage,
network: NetworkModeService,
convs: ConversationsService,
sync: SyncServiceHandle,
client: ConversationsClient,
accounts: AccountsService,
tracking: TrackingService,
errors: ErrorsService,
propertiesService: PropertiesService) extends ConversationsUiService with DerivedLogTag {
import ConversationsUiService._
import Threading.Implicits.Background
override val assetUploadCancelled = EventStream[Mime]() //size, mime
override val assetUploadFailed = EventStream[ErrorResponse]()
override def sendTextMessage(convId: ConvId, text: String, mentions: Seq[Mention] = Nil, exp: Option[Option[FiniteDuration]] = None) =
for {
rr <- readReceiptSettings(convId)
msg <- messages.addTextMessage(convId, text, rr, mentions, exp)
_ <- updateLastRead(msg)
_ <- sync.postMessage(msg.id, convId, msg.editTime)
} yield Some(msg)
override def sendTextMessages(convs: Seq[ConvId], text: String, mentions: Seq[Mention] = Nil, exp: Option[FiniteDuration]) =
Future.sequence(convs.map(id => sendTextMessage(id, text, mentions, Some(exp)))).map(_ => {})
override def sendReplyMessage(quote: MessageId, text: String, mentions: Seq[Mention] = Nil, exp: Option[Option[FiniteDuration]] = None) =
for {
Some(q) <- messagesStorage.get(quote)
rr <- readReceiptSettings(q.convId)
res <- messages.addReplyMessage(quote, text, rr, mentions, exp).flatMap {
case Some(m) =>
for {
_ <- updateLastRead(m)
_ <- sync.postMessage(m.id, m.convId, m.editTime)
} yield Some(m)
case None =>
Future.successful(None)
}
} yield res
override def sendAssetMessage(convId: ConvId,
content: ContentForUpload,
confirmation: WifiWarningConfirmation = DefaultConfirmation,
exp: Option[Option[FiniteDuration]] = None): Future[Some[MessageData]] = {
verbose(l"sendAssetMessage($convId, $content)")
val messageId = MessageId()
for {
retention <- messages.retentionPolicy2ById(convId)
rr <- readReceiptSettings(convId)
rawAsset <- assets.createAndSaveUploadAsset(content, AES_CBC_Encryption.random, public = false, retention, Some(messageId))
message <- messages.addAssetMessage(convId, messageId, rawAsset, rr, exp)
_ <- updateLastRead(message)
_ <- Future.successful(tracking.assetContribution(AssetId(rawAsset.id.str), selfUserId)) //TODO Maybe we can track raw assets contribution separately?
shouldSend <- checkSize(convId, rawAsset, message, confirmation)
_ <- if (shouldSend) sync.postMessage(message.id, convId, message.editTime) else Future.successful(())
} yield Some(message)
}
override def sendLocationMessage(convId: ConvId, l: api.MessageContent.Location): Future[Some[MessageData]] = {
for {
rr <- readReceiptSettings(convId)
msg <- messages.addLocationMessage(convId, Location(l.getLongitude, l.getLatitude, l.getName, l.getZoom, rr.selfSettings))
_ <- updateLastRead(msg)
_ <- sync.postMessage(msg.id, convId, msg.editTime)
} yield Some(msg)
}
override def updateMessage(convId: ConvId, id: MessageId, text: String, mentions: Seq[Mention] = Nil): Future[Option[MessageData]] = {
verbose(l"updateMessage($convId, $id, $mentions")
messagesStorage.update(id, {
case m if m.convId == convId && m.userId == selfUserId =>
val (tpe, ct) = MessageData.messageContent(text, mentions, weblinkEnabled = true)
verbose(l"updated content: ${(tpe, ct)}")
m.copy(
msgType = tpe,
content = ct,
protos = Seq(GenericMessage(Uid(), MsgEdit(id, GenericContent.Text(text, ct.flatMap(_.mentions), Nil, m.protoQuote, m.protoReadReceipts.getOrElse(false))))),
state = Message.Status.PENDING,
editTime = (m.time max m.editTime) + 1.millis max LocalInstant.Now.toRemote(currentBeDrift)
)
case m =>
warn(l"Can not update msg: $m")
m
}) flatMap {
case Some((_, m)) => sync.postMessage(m.id, m.convId, m.editTime) map { _ => Some(m) } // using PostMessage sync request to use the same logic for failures and retrying
case None => Future successful None
}
}
override def deleteMessage(convId: ConvId, id: MessageId): Future[Unit] = for {
_ <- messagesContent.deleteOnUserRequest(Seq(id))
_ <- sync.postDeleted(convId, id)
} yield ()
override def recallMessage(convId: ConvId, id: MessageId): Future[Option[MessageData]] =
messages.recallMessage(convId, id, selfUserId, time = LocalInstant.Now.toRemote(currentBeDrift)) flatMap {
case Some(msg) =>
sync.postRecalled(convId, msg.id, id) map { _ => Some(msg) }
case None =>
warn(l"could not recall message $convId, $id")
Future successful None
}
private def updateLastRead(msg: MessageData) = convsContent.updateConversationLastRead(msg.convId, msg.time)
override def setConversationArchived(id: ConvId, archived: Boolean): Future[Option[ConversationData]] = convs.setConversationArchived(id, archived)
override def setConversationMuted(id: ConvId, muted: MuteSet): Future[Option[ConversationData]] =
convsContent.updateLastEvent(id, LocalInstant.Now.toRemote(currentBeDrift)).flatMap { _ =>
convsContent.updateConversationMuted(id, muted) map {
case Some((_, conv)) =>
sync.postConversationState(
id,
ConversationState(muted = Some(conv.muted.oldMutedFlag), muteTime = Some(conv.muteTime), mutedStatus = Some(conv.muted.toInt))
)
Some(conv)
case None => None
}
}
override def setConversationName(id: ConvId, name: Name): Future[Option[ConversationData]] = {
verbose(l"setConversationName($id, $name)")
convsContent.updateConversationName(id, name) flatMap {
case Some((_, conv)) if conv.name.contains(name) =>
sync.postConversationName(id, conv.name.getOrElse(Name.Empty))
messages.addRenameConversationMessage(id, selfUserId, name).map(_ => Some(conv))
case conv =>
warn(l"Conversation name could not be changed for: $id, conv: $conv")
CancellableFuture.successful(None)
}
}
override def addConversationMembers(conv: ConvId, users: Set[UserId]) = {
(for {
true <- canModifyMembers(conv)
added <- members.add(conv, users) if added.nonEmpty
_ <- messages.addMemberJoinMessage(conv, selfUserId, added.map(_.userId))
syncId <- sync.postConversationMemberJoin(conv, added.map(_.userId).toSeq)
} yield Option(syncId))
.recover {
case NonFatal(e) =>
warn(l"Failed to add members: $users to conv: $conv", e)
Option.empty[SyncId]
}
}
override def removeConversationMember(conv: ConvId, user: UserId) = {
(for {
true <- canModifyMembers(conv)
Some(_) <- members.remove(conv, user)
_ <- messages.addMemberLeaveMessage(conv, selfUserId, user)
syncId <- sync.postConversationMemberLeave(conv, user)
} yield Option(syncId))
.recover {
case NonFatal(e) =>
warn(l"Failed to remove member: $user from conv: $conv", e)
Option.empty[SyncId]
}
}
private def canModifyMembers(convId: ConvId) =
for {
selfActive <- members.isActiveMember(convId, selfUserId)
isGroup <- convs.isGroupConversation(convId)
isWithService <- convs.isWithService(convId)
} yield selfActive && (isGroup || isWithService)
override def leaveConversation(conv: ConvId) = {
verbose(l"leaveConversation($conv)")
for {
_ <- convsContent.setConvActive(conv, active = false)
_ <- removeConversationMember(conv, selfUserId)
_ <- convsContent.updateConversationArchived(conv, archived = true)
} yield {}
}
override def clearConversation(id: ConvId): Future[Option[ConversationData]] = convsContent.convById(id) flatMap {
case Some(conv) if conv.convType == ConversationType.Group || conv.convType == ConversationType.OneToOne =>
verbose(l"clearConversation($conv)")
convsContent.updateConversationCleared(conv.id, conv.lastEventTime) flatMap {
case Some((_, c)) =>
for {
_ <- convsContent.updateConversationLastRead(c.id, c.cleared.getOrElse(RemoteInstant.Epoch))
_ <- convsContent.updateConversationArchived(c.id, archived = true)
_ <- c.cleared.fold(Future.successful({}))(sync.postCleared(c.id, _).map(_ => ()))
} yield Some(c)
case None =>
verbose(l"updateConversationCleared did nothing - already cleared")
Future successful None
}
case Some(conv) =>
warn(l"conversation of type ${conv.convType} can not be cleared")
Future successful None
case None =>
warn(l"conversation to be cleared not found: $id")
Future successful None
}
override def getOrCreateOneToOneConversation(other: UserId) = {
def createReal1to1() =
convsContent.convById(ConvId(other.str)) flatMap {
case Some(conv) => Future.successful(conv)
case _ => usersStorage.get(other).flatMap {
case Some(u) if u.connection == ConnectionStatus.Ignored =>
for {
conv <- convsContent.createConversationWithMembers(ConvId(other.str), u.conversation.getOrElse(RConvId()), ConversationType.Incoming, other, Set(selfUserId), hidden = true)
_ <- messages.addMemberJoinMessage(conv.id, other, Set(selfUserId), firstMessage = true)
_ <- u.connectionMessage.fold(Future.successful(conv))(messages.addConnectRequestMessage(conv.id, other, selfUserId, _, u.name).map(_ => conv))
} yield conv
case _ =>
for {
_ <- sync.postConversation(ConvId(other.str), Set(other), None, None, Set(Access.PRIVATE), AccessRole.PRIVATE, None)
conv <- convsContent.createConversationWithMembers(ConvId(other.str), RConvId(), ConversationType.OneToOne, selfUserId, Set(other))
_ <- messages.addMemberJoinMessage(conv.id, selfUserId, Set(other), firstMessage = true)
} yield conv
}
}
def createFake1To1(tId: TeamId) = {
verbose(l"Checking for 1:1 conversation with user: $other")
(for {
allConvs <- this.members.getByUsers(Set(other)).map(_.map(_.convId))
allMembers <- this.members.getByConvs(allConvs.toSet).map(_.map(m => m.convId -> m.userId))
onlyUs = allMembers.groupBy { case (c, _) => c }.map { case (cid, us) => cid -> us.map(_._2).toSet }.collect { case (c, us) if us == Set(other, selfUserId) => c }
convs <- convStorage.getAll(onlyUs).map(_.flatten)
} yield {
verbose(l"allConvs size: ${allConvs.size}")
verbose(l"allMembers size: ${allMembers.size}")
verbose(l"OnlyUs convs size: ${onlyUs.size}")
if (convs.size > 1)
warn(l"Found ${convs.size} available team conversations with user: $other, returning first conversation found")
else verbose(l"Found ${convs.size} convs with other user: $other")
convs.find(c => c.team.contains(tId) && c.name.isEmpty)
}).flatMap {
case Some(conv) => Future.successful(conv)
case _ => createAndPostConversation(ConvId(), None, Set(other)).map(_._1)
}
}
teamId match {
case Some(tId) =>
for {
user <- usersStorage.get(other)
conv <- if (user.exists(_.isGuest(tId))) createReal1to1() else createFake1To1(tId)
} yield conv
case None => createReal1to1()
}
}
override def createGroupConversation(name: Option[Name] = None, members: Set[UserId] = Set.empty, teamOnly: Boolean = false, receiptMode: Int = 0) =
createAndPostConversation(ConvId(), name, members, teamOnly, receiptMode)
private def createAndPostConversation(id: ConvId, name: Option[Name], members: Set[UserId], teamOnly: Boolean = false, receiptMode: Int = 0) = {
val (ac, ar) = getAccessAndRoleForGroupConv(teamOnly, teamId)
for {
conv <- convsContent.createConversationWithMembers(id, generateTempConversationId(members + selfUserId), ConversationType.Group, selfUserId, members, name, access = ac, accessRole = ar, receiptMode = receiptMode)
_ = verbose(l"created: $conv")
_ <- messages.addConversationStartMessage(conv.id, selfUserId, members, name, conv.readReceiptsAllowed)
syncId <- sync.postConversation(id, members, conv.name, teamId, ac, ar, Some(receiptMode))
} yield (conv, syncId)
}
override def readReceiptSettings(convId: ConvId): Future[ReadReceiptSettings] = {
for {
selfSetting <- propertiesService.readReceiptsEnabled.head
isGroup <- convs.isGroupConversation(convId)
convSetting <- if (isGroup) convStorage.get(convId).map(_.exists(_.readReceiptsAllowed)).map(Option(_))
else Future.successful(Option.empty[Boolean])
} yield ReadReceiptSettings(selfSetting, convSetting.map(if(_) 1 else 0))
}
override def setReceiptMode(id: ConvId, receiptMode: Int): Future[Option[ConversationData]] = {
messages.addReceiptModeChangeMessage(id, selfUserId, receiptMode).flatMap(_ => convs.setReceiptMode(id, receiptMode))
}
override def knock(id: ConvId): Future[Option[MessageData]] = for {
rr <- readReceiptSettings(id)
msg <- messages.addKnockMessage(id, selfUserId, rr)
_ <- sync.postMessage(msg.id, id, msg.editTime)
} yield Some(msg)
override def setLastRead(convId: ConvId, msg: MessageData): Future[Option[ConversationData]] = {
def sendReadReceipts(from: RemoteInstant, to: RemoteInstant, readReceiptSettings: ReadReceiptSettings): Future[Seq[SyncId]] = {
if (!readReceiptSettings.selfSettings && readReceiptSettings.convSetting.isEmpty) {
Future.successful(Seq())
} else {
messagesStorage.findMessagesBetween(convId, from, to).flatMap { messages =>
val msgs = messages.filter { m =>
m.userId != selfUserId && m.expectsRead.contains(true)
}
RichFuture.traverseSequential(msgs.groupBy(_.userId).toSeq)( { case (u, ms) if ms.nonEmpty =>
sync.postReceipt(convId, ms.map(_.id), u, ReceiptType.Read)
})
}
}
}
for {
readReceipts <- readReceiptSettings(convId)
update <- convsContent.updateConversationLastRead(convId, msg.time)
_ <- update.fold(Future.successful({})) {
case (_, newConv) => sync.postLastRead(convId, newConv.lastRead).map(_ => {})
}
_ <- update.fold(Future.successful({})) {
case (oldConv, newConv) =>
sendReadReceipts(oldConv.lastRead, newConv.lastRead, readReceipts).map(_ => {})
}
} yield update.map(_._2)
}
override def setEphemeral(id: ConvId, expiration: Option[FiniteDuration]) = {
convStorage.update(id, _.copy(localEphemeral = expiration)).map(_ => {})
}
override def setEphemeralGlobal(id: ConvId, expiration: Option[FiniteDuration]) =
for {
Some(conv) <- convsContent.convById(id) if conv.globalEphemeral != expiration
resp <- client.postMessageTimer(conv.remoteId, expiration).future
_ <- resp.mapFuture(_ => convStorage.update(id, _.copy(globalEphemeral = expiration)))
_ <- resp.mapFuture(_ => messages.addTimerChangedMessage(id, selfUserId, expiration, LocalInstant.Now.toRemote(currentBeDrift)))
} yield resp
//TODO Refactor this. Maybe move some part of this method into UI project
private def checkSize(convId: ConvId, rawAsset: UploadAsset, message: MessageData, confirmation: WifiWarningConfirmation) = {
val isAssetLarge = rawAsset.size > LargeAssetWarningThresholdInBytes
val isAssetTooLarge: Boolean = rawAsset.details match {
case _: Video => false
case _ => rawAsset.size > AssetData.maxAssetSizeInBytes(teamId.isDefined)
}
if (isAssetTooLarge) {
for {
_ <- messages.updateMessageState(convId, message.id, Message.Status.FAILED)
_ <- errors.addAssetTooLargeError(convId, message.id)
_ <- Future.successful(assetUploadFailed ! ErrorResponse.internalError("asset too large"))
} yield false
} else if (isAssetLarge) {
for {
mode <- network.networkMode.head
inForeground <- accounts.accountState(selfUserId).map(_ == InForeground).head
res <- if (!Set(OFFLINE, WIFI).contains(mode) && inForeground)
// will mark message as failed and ask user if it should really be sent
// marking as failed ensures that user has a way to retry even if he doesn't respond to this warning
// this is possible if app is paused or killed in meantime, we don't want to be left with message in state PENDING without a sync request
messages.updateMessageState(convId, message.id, Message.Status.FAILED).map { _ =>
confirmation(rawAsset.size).foreach {
case true => messages.retryMessageSending(convId, message.id)
case false => messagesContent.deleteMessage(message).map(_ => assetUploadCancelled ! rawAsset.mime)
}
false
}(Threading.Ui)
else Future.successful(true)
} yield res
} else {
Future.successful(true)
}
}
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/service/conversation/ConversationsUiService.scala | Scala | gpl-3.0 | 23,449 |
package io.fabric8.fab.osgi.itests
import org.ops4j.pax.exam.{MavenUtils, Option}
import scala.Array
import org.ops4j.pax.exam.CoreOptions._
import org.osgi.framework.{Bundle, BundleContext}
/**
* The basic Pax Exam configuration for our integration tests
*/
trait FabIntegrationTestSupport {
lazy val VERSION = System.getProperty("project.version")
lazy val CAMEL_VERSION = try {
MavenUtils.getArtifactVersion("org.apache.camel", "camel-core")
} catch {
case e: RuntimeException => System.getProperty("camel.version")
}
lazy val KARAF_VERSION = try {
MavenUtils.getArtifactVersion("org.apache.karaf.features", "org.apache.karaf.features.core")
} catch {
case e: RuntimeException => System.getProperty("karaf.version")
}
lazy val LOCAL_REPOSITORY = System.getProperty("org.ops4j.pax.url.mvn.localRepository")
lazy val REPOSITORIES = Array("http://repo1.maven.org/maven2/",
"https://repo.fusesource.com/nexus/content/repositories/public",
"https://repo.fusesource.com/nexus/content/groups/ea",
"http://repo.fusesource.com/nexus/groups/m2-proxy").mkString(",")
/**
* The base Pax Exam configuration
*/
val baseConfiguration: Array[Option] = Array(
junitBundles(),
// vmOption( "-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5006" ),
systemProperty("project.version").value(VERSION),
systemProperty("org.ops4j.pax.url.mvn.localRepository").value(LOCAL_REPOSITORY),
systemProperty("karaf.etc").value("src/test/resources"),
// we need the boot delegation to allow the Spring/Blueprint XML parsing with JAXP to succeed
bootDelegationPackage("com.sun.*"),
mavenBundle("org.ops4j.pax.logging", "pax-logging-api").versionAsInProject(),
mavenBundle("org.ops4j.pax.url", "pax-url-mvn").versionAsInProject(),
mavenBundle("org.apache.felix", "org.apache.felix.fileinstall").versionAsInProject(),
mavenBundle("org.apache.felix", "org.apache.felix.configadmin").versionAsInProject(),
mavenBundle("org.apache.felix", "org.apache.felix.gogo.runtime").versionAsInProject(),
mavenBundle("org.apache.karaf.jaas", "org.apache.karaf.jaas.boot").versionAsInProject(),
mavenBundle("org.apache.karaf.jaas", "org.apache.karaf.jaas.config").versionAsInProject(),
mavenBundle("org.apache.karaf.jaas", "org.apache.karaf.jaas.modules").versionAsInProject(),
mavenBundle("org.apache.servicemix.bundles", "org.apache.servicemix.bundles.asm").versionAsInProject(),
mavenBundle("org.apache.aries", "org.apache.aries.util").versionAsInProject(),
mavenBundle("org.apache.aries.proxy", "org.apache.aries.proxy").versionAsInProject(),
mavenBundle("org.apache.aries.blueprint", "org.apache.aries.blueprint.api").versionAsInProject(),
mavenBundle("org.apache.aries.blueprint", "org.apache.aries.blueprint.core").versionAsInProject(),
mavenBundle("org.apache.karaf.features", "org.apache.karaf.features.core").versionAsInProject(),
mavenBundle("org.apache.mina", "mina-core").versionAsInProject(),
mavenBundle("org.apache.sshd", "sshd-core").versionAsInProject(),
mavenBundle("org.apache.karaf.shell", "org.apache.karaf.shell.console").versionAsInProject(),
mavenBundle("org.apache.karaf.shell", "org.apache.karaf.shell.osgi").versionAsInProject(),
// and then add a few extra bundles to it to enable Scala- and FAB-support
mavenBundle("org.apache.servicemix.bundles", "org.apache.servicemix.bundles.scala-library").versionAsInProject(),
mavenBundle("io.fabric8", "common-util").versionAsInProject(),
mavenBundle("io.fabric8.fab", "fab-osgi").versionAsInProject()
)
/**
* Get the fab: url for a given example
*
* @param groupId the artifact's group id
* @param artifactId the artifact id
* @return a fab: url
*/
def fab(groupId: String, artifactId: String) = "fab:mvn:%s/%s/%s".format(groupId, artifactId, VERSION)
/**
* Determines the list of bundles added to the bundle context while executing a block of code
*
* @param context the bundle context
* @param block the block of code to be executed
* @return a set of bundles that have been added, together with the result of the code block
*/
def bundlesChanged[R](context: BundleContext)(block: => R): (Set[Bundle], R) = {
val start = context.getBundles
val result = block
(context.getBundles.toSet -- start, result)
}
}
| alexeev/jboss-fuse-mirror | fab/tests/fab-itests/src/test/scala/io/fabric8/fab/osgi/itests/FabIntegrationTestSupport.scala | Scala | apache-2.0 | 4,389 |
package de.htwg.zeta.common.models.frontend
import java.util.UUID
import de.htwg.zeta.common.models.frontend.SafeFormats.safeRead
import de.htwg.zeta.common.models.frontend.SafeFormats.safeWrite
import play.api.libs.json.Format
import play.api.libs.json.JsArray
import play.api.libs.json.Json
import play.api.libs.json.JsResult
import play.api.libs.json.JsValue
/**
* Representation of a change of the model.
* E.g. Model Saved, Entity created etc..
*/
sealed trait ModelChanged extends UserRequest
sealed trait UserRequest extends Request {
/**
* The model from which the request was send
*/
val modelId: UUID
}
case class ExecuteBondedTask(modelId: UUID, taskId: UUID) extends UserRequest
object ExecuteBondedTask {
implicit val format: Format[ExecuteBondedTask] = new Format[ExecuteBondedTask] {
override def writes(o: ExecuteBondedTask): JsValue = safeWrite {
Json.obj(
"type" -> "ExecuteBondedTask",
"model" -> o.modelId,
"task" -> o.taskId
)
}
override def reads(json: JsValue): JsResult[ExecuteBondedTask] = safeRead {
for {
modelId <- json.\\("model").validate[UUID]
taskId <- json.\\("task").validate[UUID]
} yield {
ExecuteBondedTask(modelId, taskId)
}
}
}
}
case class SavedModel(modelId: UUID) extends UserRequest with ModelChanged
object SavedModel {
implicit val format: Format[SavedModel] = new Format[SavedModel] {
override def writes(o: SavedModel): JsValue = safeWrite {
Json.obj(
"type" -> "SavedModel",
"model" -> o.modelId
)
}
override def reads(json: JsValue): JsResult[SavedModel] = safeRead {
for {
modelId <- json.\\("model").validate[UUID]
} yield {
SavedModel(modelId)
}
}
}
}
object UserRequest {
implicit val format: Format[UserRequest] = new Format[UserRequest] {
override def writes(o: UserRequest): JsValue = safeWrite {
o match {
case o: ExecuteBondedTask => ExecuteBondedTask.format.writes(o)
case o: SavedModel => SavedModel.format.writes(o)
}
}
override def reads(json: JsValue): JsResult[UserRequest] = safeRead {
json.\\("action").validate[String].flatMap {
case "ExecuteBondedTask" => ExecuteBondedTask.format.reads(json)
case "SavedModel" => SavedModel.format.reads(json)
}
}
}
}
sealed trait UserResponse extends Response
case class BondedTaskNotExecutable(taskId: UUID, reason: String) extends UserResponse
object BondedTaskNotExecutable {
implicit val format: Format[BondedTaskNotExecutable] = new Format[BondedTaskNotExecutable] {
override def writes(o: BondedTaskNotExecutable): JsValue = safeWrite {
Json.obj(
"type" -> "BondedTaskNotExecutable",
"task" -> o.taskId,
"reason" -> o.reason
)
}
override def reads(json: JsValue): JsResult[BondedTaskNotExecutable] = safeRead {
for {
taskId <- json.\\("task").validate[UUID]
reason <- json.\\("reason").validate[String]
} yield {
BondedTaskNotExecutable(taskId, reason)
}
}
}
}
case class Entry(taskId: UUID, menu: String, item: String)
object Entry {
implicit val format: Format[Entry] = new Format[Entry] {
override def writes(o: Entry): JsValue = safeWrite {
Json.obj(
"type" -> "Entry",
"task" -> o.taskId,
"menu" -> o.menu,
"item" -> o.item
)
}
override def reads(json: JsValue): JsResult[Entry] = safeRead {
for {
taskId <- json.\\("task").validate[UUID]
menu <- json.\\("menu").validate[String]
item <- json.\\("item").validate[String]
} yield {
Entry(taskId, menu, item)
}
}
}
}
case class BondedTaskList(tasks: List[Entry]) extends UserResponse
object BondedTaskList {
implicit val format: Format[BondedTaskList] = new Format[BondedTaskList] {
override def writes(o: BondedTaskList): JsValue = safeWrite {
Json.obj(
"type" -> "Entry",
"tasks" -> JsArray(o.tasks.map(Entry.format.writes))
)
}
override def reads(json: JsValue): JsResult[BondedTaskList] = safeRead {
for {
tasks <- json.\\("tasks").validate[List[Entry]]
} yield {
BondedTaskList(tasks)
}
}
}
}
case class BondedTaskCompleted(taskId: UUID, status: Int) extends UserResponse
object BondedTaskCompleted {
implicit val format: Format[BondedTaskCompleted] = new Format[BondedTaskCompleted] {
override def writes(o: BondedTaskCompleted): JsValue = safeWrite {
Json.obj(
"type" -> "BondedTaskCompleted",
"task" -> o.taskId,
"status" -> o.status
)
}
override def reads(json: JsValue): JsResult[BondedTaskCompleted] = safeRead {
for {
taskId <- json.\\("task").validate[UUID]
status <- json.\\("status").validate[Int]
} yield {
BondedTaskCompleted(taskId, status)
}
}
}
}
case class BondedTaskStarted(taskId: UUID) extends UserResponse
object BondedTaskStarted {
implicit val format: Format[BondedTaskStarted] = new Format[BondedTaskStarted] {
override def writes(o: BondedTaskStarted): JsValue = safeWrite {
Json.obj(
"type" -> "BondedTaskStarted",
"task" -> o.taskId
)
}
override def reads(json: JsValue): JsResult[BondedTaskStarted] = safeRead {
for {
taskId <- json.\\("task").validate[UUID]
} yield {
BondedTaskStarted(taskId)
}
}
}
}
object UserResponse {
implicit val format: Format[UserResponse] = new Format[UserResponse] {
override def writes(o: UserResponse): JsValue = safeWrite {
o match {
case o: BondedTaskNotExecutable => BondedTaskNotExecutable.format.writes(o)
case o: BondedTaskList => BondedTaskList.format.writes(o)
case o: BondedTaskCompleted => BondedTaskCompleted.format.writes(o)
case o: BondedTaskStarted => BondedTaskStarted.format.writes(o)
}
}
override def reads(json: JsValue): JsResult[UserResponse] = safeRead {
json.\\("type").validate[String].flatMap {
case "BondedTaskNotExecutable" => BondedTaskNotExecutable.format.reads(json)
case "BondedTaskList" => BondedTaskList.format.reads(json)
case "BondedTaskCompleted" => BondedTaskCompleted.format.reads(json)
case "BondedTaskStarted" => BondedTaskStarted.format.reads(json)
}
}
}
}
| Zeta-Project/zeta | api/common/src/main/scala/de/htwg/zeta/common/models/frontend/User.scala | Scala | bsd-2-clause | 6,526 |
package ohnosequences.scarph.test
import vertexTypes._
import properties._
import ohnosequences.scarph._
import shapeless.record.FieldType
object vertices {
/* A representation of the `User` vertex type; note how it is external to `User`, and it doesn't mention the corresponding type at all */
case class UserImpl(
val id: String,
val name: String,
val since: Int
)
case object user extends Vertex(User) { self =>
/* Now users can be created with `user ->> UserImpl(...)` */
type Rep = UserImpl
/* Provide implicits here (or elsewhere) for all (or some) properties */
implicit object readId extends GetProperty(id) {
def apply(rep: user.TaggedRep): id.Rep = (rep: user.Rep).id
}
implicit object readSince extends GetProperty(since) {
def apply(rep: self.TaggedRep): since.Rep = (rep: self.Rep).since
}
}
case object org extends Vertex(Org) { self =>
/*
We are lazy, so we will use the same representation for orgs
even though we care only about the `name` property
*/
type Rep = UserImpl
implicit object readName extends GetProperty(name) {
def apply(rep: self.TaggedRep) = rep.name
}
}
}
class VertexSuite extends org.scalatest.FunSuite {
import vertices._
test("retrieve vertex properties") {
import vertices.user._
import vertexTypes.User._
val u = user ->> UserImpl(id = "1ad3a34df", name = "Robustiano SatrΓΊstegui", since = 2349965)
val u_id = u.get(id)
/*
Too bad, no witness for `since`, so `u.get(since)` won't compile.
But we can add it here!
*/
implicit val userSince = User has since
val u_since = u.get(since)
val u_since_again = u get since
/*
We can also add a retriever for the `name` property externally:
*/
implicit object readUserName extends GetProperty(name) {
def apply(rep: user.TaggedRep) = rep.name
}
assert((u get id) === "1ad3a34df")
assert((u get name) === "Robustiano SatrΓΊstegui")
assert((u get since) === 2349965)
/*
Again, we are using user's representation for org,
even though it has only the `name` property
*/
import vertices.org._
import vertexTypes.Org._
val o = org ->> UserImpl(id = "NYSE:ORCL", name = "Oracle Inc.", since = 1977)
assert((o get name) === "Oracle Inc.")
/*
Now we realized, that we can do more things with this
representation of org so we just implement it in place:
*/
implicit val orgFounded = Org has since
implicit object readOrgSince extends org.GetProperty(since) {
def apply(rep: org.TaggedRep) = rep.since
}
assert((o get since) === 1977)
}
}
| alberskib/scarph | src/test/scala/ohnosequences/scarph/vertices.scala | Scala | agpl-3.0 | 2,716 |
object H {
def unapplySeq(m: Any): Seq[_] = List("")
}
object Test {
def unapply(m: Any) = m match {
case H(v) =>
case _ =>
}
}
// later: OK
// then: Seq[Any] is not a valid result type of an unapplySeq method of an extractor.
// and: The result type of an unapplySeq method must contain a member `get` to be used as an extractor pattern, no such member exists in Seq[Any]
// now: too many patterns for object H offering Boolean: expected 0, found 1
// was: result type Seq[_$2] of unapplySeq defined in method unapplySeq in object H does not conform to Option[_]
| scala/scala | test/files/neg/t8127a.scala | Scala | apache-2.0 | 592 |
/*
* Part of NDLA image-api
* Copyright (C) 2017 NDLA
*
* See LICENSE
*/
package no.ndla.imageapi.service
import java.awt.image.BufferedImage
import javax.imageio.ImageIO
import no.ndla.imageapi.TestData.NdlaLogoImage
import no.ndla.imageapi.model.domain.ImageStream
import no.ndla.imageapi.{TestData, TestEnvironment, UnitSuite}
import org.mockito.Mockito._
import org.scalactic.TolerantNumerics
class ImageConverterTest extends UnitSuite with TestEnvironment {
val service = new ImageConverter
val (imageWidth, imageHeight) = (1000, 1000)
val image: BufferedImage = mock[BufferedImage]
override def beforeEach() = {
when(image.getWidth).thenReturn(imageWidth)
when(image.getHeight).thenReturn(imageHeight)
}
test("transformCoordinates returns a CoordOptions object with correctly transformed coordinates") {
service.transformCoordinates(image, PercentPoint(10, 5), PercentPoint(1, 20)) should equal(PixelPoint(10, 50),
PixelPoint(100, 200))
service.transformCoordinates(image, PercentPoint(10, 20), PercentPoint(1, 5)) should equal(PixelPoint(10, 50),
PixelPoint(100, 200))
service.transformCoordinates(image, PercentPoint(1, 5), PercentPoint(10, 20)) should equal(PixelPoint(10, 50),
PixelPoint(100, 200))
service.transformCoordinates(image, PercentPoint(1, 20), PercentPoint(10, 5)) should equal(PixelPoint(10, 50),
PixelPoint(100, 200))
}
test("getWidthHeight returns the width and height of a segment to crop") {
service.getWidthHeight(PixelPoint(10, 200), PixelPoint(100, 50), image) should equal((90, 150))
}
test("getWidthHeight returns max values if one coordinate is outside of image") {
service.getWidthHeight(PixelPoint(10, 200), PixelPoint(imageWidth + 1, imageHeight + 1), image) should equal(
(990, 800))
}
test("toConvertedImage converts a BufferedImage to an ImageStream") {
val bufferedImage = new BufferedImage(200, 100, BufferedImage.TYPE_INT_RGB)
val origImage = mock[ImageStream]
when(origImage.fileName).thenReturn("full/image.jpg")
when(origImage.contentType).thenReturn("image/jpg")
when(origImage.format).thenReturn("jpg")
val result = service.toImageStream(bufferedImage, origImage)
result.fileName should equal(origImage.fileName)
result.contentType should equal(origImage.contentType)
result.format should equal(origImage.format)
}
test("crop crops an image according to given settings") {
val croppedImage = service.crop(NdlaLogoImage, PercentPoint(0, 0), PercentPoint(50, 50))
croppedImage.isSuccess should equal(true)
val image = ImageIO.read(croppedImage.get.stream)
image.getWidth should equal(94)
image.getHeight should equal(30)
}
test("resize resizes image height correctly") {
val resizedImage = service.resizeHeight(NdlaLogoImage, 30)
resizedImage.isSuccess should equal(true)
val image = ImageIO.read(resizedImage.get.stream)
image.getHeight should equal(30)
}
test("resize resizes image width correctly") {
val resizedImage = service.resizeWidth(NdlaLogoImage, 30)
resizedImage.isSuccess should equal(true)
val image = ImageIO.read(resizedImage.get.stream)
image.getWidth should equal(30)
}
test("resize not resizes image if height is to big") {
val resizedImage = service.resizeHeight(NdlaLogoImage, 400)
resizedImage.isSuccess should equal(true)
val image = ImageIO.read(resizedImage.get.stream)
image.getHeight should equal(60)
}
test("resize not resizes image if width is to big") {
val resizedImage = service.resizeWidth(NdlaLogoImage, 400)
resizedImage.isSuccess should equal(true)
val image = ImageIO.read(resizedImage.get.stream)
image.getWidth should equal(189)
}
test("resize resizes an image according to image orientation if both height and width is specified") {
val croppedImage = service.resize(NdlaLogoImage, 100, 60)
croppedImage.isSuccess should equal(true)
val image = ImageIO.read(croppedImage.get.stream)
image.getWidth should equal(100)
image.getHeight should not equal 60
}
test("dynamic cropping should work as expected") {
val croppedImage = service.dynamicCrop(NdlaLogoImage, PercentPoint(0, 0), Some(10), Some(30), None)
val image = ImageIO.read(croppedImage.get.stream)
image.getWidth should equal(10)
image.getHeight should equal(30)
}
test("dynamic cropping should scale according to original image size if only one dimension size is specified") {
val image = ImageIO.read(service.dynamicCrop(NdlaLogoImage, PercentPoint(0, 0), Some(100), None, None).get.stream)
image.getWidth should equal(100)
image.getHeight should equal(31)
val image2 = ImageIO.read(service.dynamicCrop(NdlaLogoImage, PercentPoint(0, 0), None, Some(50), None).get.stream)
image2.getWidth should equal(157)
image2.getHeight should equal(50)
}
test("dynamic crop should not manipulate image if neither target width or target height is specified") {
val image = ImageIO.read(service.dynamicCrop(NdlaLogoImage, PercentPoint(0, 0), None, None, None).get.stream)
image.getWidth should equal(NdlaLogoImage.sourceImage.getWidth)
image.getHeight should equal(NdlaLogoImage.sourceImage.getHeight)
}
test("minimalCropSizesToPreserveRatio calculates correct image sizes given ratio") {
service.minimalCropSizesToPreserveRatio(640, 426, 0.81) should equal(345, 426)
service.minimalCropSizesToPreserveRatio(851, 597, 1.5) should equal(850, 567)
service.minimalCropSizesToPreserveRatio(851, 597, 1.2) should equal(716, 597)
}
test(
"minimalCropSizesToPreserveRatio calculates image sizes with (about) correct aspect ratio for lots of ratios and image sizes") {
def testRatio(ratio: Double, width: Int, height: Int) = {
implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.1)
val (newWidth, newHeight) = service.minimalCropSizesToPreserveRatio(width, height, ratio)
val calculatedRatio = newWidth.toDouble / newHeight.toDouble
calculatedRatio should equal(ratio)
}
for {
ratio <- Seq(0.1, 0.2, 0.81, 1, 1.1, 1.5, 2, 5, 10)
width <- LazyList.range(10, 1000, 10)
height <- LazyList.range(10, 1000, 10)
} yield testRatio(ratio, width, height)
}
test("dynamic cropping with ratios should return image with (about) correct aspect ratio") {
testRatio(0.81, 57, 50, 345, 426)
testRatio(0.81, 0, 0, 345, 426)
testRatio(0.81, 10, 10, 345, 426)
testRatio(0.81, 90, 90, 345, 426)
testRatio(1.5, 50, 50, 639, 426)
testRatio(1.2, 50, 50, 511, 426)
def testRatio(ratio: Double, focalX: Int, focalY: Int, expectedWidth: Int, expectedHeight: Int): Unit = {
implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01)
val croppedImage =
service.dynamicCrop(TestData.ChildrensImage, PercentPoint(focalX, focalY), Some(100), Some(100), Some(ratio))
val image = ImageIO.read(croppedImage.get.stream)
val calculatedRatio = image.getWidth.toDouble / image.getHeight.toDouble
image.getWidth should equal(expectedWidth)
image.getHeight should equal(expectedHeight)
calculatedRatio should equal(ratio)
}
}
}
| NDLANO/image-api | src/test/scala/no/ndla/imageapi/service/ImageConverterTest.scala | Scala | gpl-3.0 | 7,623 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx
import scala.reflect.ClassTag
import org.apache.spark.Logging
/**
* Implements a Pregel-like bulk-synchronous message-passing API.
*
* Unlike the original Pregel API, the GraphX Pregel API factors the sendMessage computation over
* edges, enables the message sending computation to read both vertex attributes, and constrains
* messages to the graph structure. These changes allow for substantially more efficient
* distributed execution while also exposing greater flexibility for graph-based computation.
*
* @example We can use the Pregel abstraction to implement PageRank:
* {{{
* val pagerankGraph: Graph[Double, Double] = graph
* // Associate the degree with each vertex
* .outerJoinVertices(graph.outDegrees) {
* (vid, vdata, deg) => deg.getOrElse(0)
* }
* // Set the weight on the edges based on the degree
* .mapTriplets(e => 1.0 / e.srcAttr)
* // Set the vertex attributes to the initial pagerank values
* .mapVertices((id, attr) => 1.0)
*
* def vertexProgram(id: VertexId, attr: Double, msgSum: Double): Double =
* resetProb + (1.0 - resetProb) * msgSum
* def sendMessage(id: VertexId, edge: EdgeTriplet[Double, Double]): Iterator[(VertexId, Double)] =
* Iterator((edge.dstId, edge.srcAttr * edge.attr))
* def messageCombiner(a: Double, b: Double): Double = a + b
* val initialMessage = 0.0
* // Execute Pregel for a fixed number of iterations.
* Pregel(pagerankGraph, initialMessage, numIter)(
* vertexProgram, sendMessage, messageCombiner)
* }}}
*
*/
object Pregel extends Logging {
/**
* Execute a Pregel-like iterative vertex-parallel abstraction. The
* user-defined vertex-program `vprog` is executed in parallel on
* each vertex receiving any inbound messages and computing a new
* value for the vertex. The `sendMsg` function is then invoked on
* all out-edges and is used to compute an optional message to the
* destination vertex. The `mergeMsg` function is a commutative
* associative function used to combine messages destined to the
* same vertex.
*
* On the first iteration all vertices receive the `initialMsg` and
* on subsequent iterations if a vertex does not receive a message
* then the vertex-program is not invoked.
*
* This function iterates until there are no remaining messages, or
* for `maxIterations` iterations.
*
* @tparam VD the vertex data type
* @tparam ED the edge data type
* @tparam A the Pregel message type
*
* @param graph the input graph.
*
* @param initialMsg the message each vertex will receive at the first
* iteration
*
* @param maxIterations the maximum number of iterations to run for
*
* @param activeDirection the direction of edges incident to a vertex that received a message in
* the previous round on which to run `sendMsg`. For example, if this is `EdgeDirection.Out`, only
* out-edges of vertices that received a message in the previous round will run. The default is
* `EdgeDirection.Either`, which will run `sendMsg` on edges where either side received a message
* in the previous round. If this is `EdgeDirection.Both`, `sendMsg` will only run on edges where
* *both* vertices received a message.
*
* @param vprog the user-defined vertex program which runs on each
* vertex and receives the inbound message and computes a new vertex
* value. On the first iteration the vertex program is invoked on
* all vertices and is passed the default message. On subsequent
* iterations the vertex program is only invoked on those vertices
* that receive messages.
*
* @param sendMsg a user supplied function that is applied to out
* edges of vertices that received messages in the current
* iteration
*
* @param mergeMsg a user supplied function that takes two incoming
* messages of type A and merges them into a single message of type
* A. ''This function must be commutative and associative and
* ideally the size of A should not increase.''
*
* @return the resulting graph at the end of the computation
*
*/
def apply[VD: ClassTag, ED: ClassTag, A: ClassTag]
(graph: Graph[VD, ED],
initialMsg: A,
maxIterations: Int = Int.MaxValue,
activeDirection: EdgeDirection = EdgeDirection.Either)
(vprog: (VertexId, VD, A) => VD,
sendMsg: EdgeTriplet[VD, ED] => Iterator[(VertexId, A)],
mergeMsg: (A, A) => A)
: Graph[VD, ED] =
{
var g = graph.mapVertices((vid, vdata) => vprog(vid, vdata, initialMsg)).cache()
// compute the messages
var messages = g.mapReduceTriplets(sendMsg, mergeMsg)
var activeMessages = messages.count()
// Loop
var prevG: Graph[VD, ED] = null
var i = 0
while (activeMessages > 0 && i < maxIterations) {
// Receive the messages and update the vertices.
prevG = g
g = g.joinVertices(messages)(vprog).cache()
val oldMessages = messages
// Send new messages, skipping edges where neither side received a message. We must cache
// messages so it can be materialized on the next line, allowing us to uncache the previous
// iteration.
messages = g.mapReduceTriplets(
sendMsg, mergeMsg, Some((oldMessages, activeDirection))).cache()
// The call to count() materializes `messages` and the vertices of `g`. This hides oldMessages
// (depended on by the vertices of g) and the vertices of prevG (depended on by oldMessages
// and the vertices of g).
activeMessages = messages.count()
logInfo("Pregel finished iteration " + i)
// Unpersist the RDDs hidden by newly-materialized RDDs
oldMessages.unpersist(blocking = false)
prevG.unpersistVertices(blocking = false)
prevG.edges.unpersist(blocking = false)
// count the iteration
i += 1
}
g
} // end of apply
} // end of class Pregel
| pronix/spark | graphx/src/main/scala/org/apache/spark/graphx/Pregel.scala | Scala | apache-2.0 | 6,723 |
package clarifai
/** Imports scalaj library for handling HTTP request & response. */
import scalaj.http._
import scala.util.parsing.json.JSON
/** Configuration setting for the client wrapper. */
object Config {
val baseURL = "https://api.clarifai.com"
val version = "v1"
}
/** Client wrapper for accessing Clarifai endpoints.
*
* Currently supports the following endpoints:
* - Tag
* - Feedback
* - Color (beta)
* - Info
* - Usage
* You would need a Clarifai developer account for using the service.
* Developer page: https://developer.clarifai.com
*
* @constructor create a new client object for accessing endpoints.
* @param id client id for your Clarifai API applicaiton
* @param secret client secret for your Clarifai API applicaiton
*/
class ClarifaiClient(id: String, secret: String) {
val clientID = id
val clientSecret = secret
var accessToken = "unassigned"
var throttled = false
/** High-level functions for accessing Clarifai endpoints.
*
* The following functions allow users to access the Clarifai service directly
* and receive the response in JSON-like class structure.
*/
/** Feedback - provides the ability to give feedback to the API about images and videos that were previously tagged.
*
* @param Map of values for providing feedback to the API
* @return Feedback response from the Clarifai endpoint
*/
def feedback(feedbackReq: Map[String, Any]): Either[Option[String], FeedbackResp] = {
if ((!feedbackReq.contains("url") || feedbackReq.get("url").get.asInstanceOf[Array[String]].length < 1 )
&& (!feedbackReq.contains("docids") || feedbackReq.get("docids").get.asInstanceOf[Array[String]].length < 1 )) {
return Left(Some("Needs at least one url or docid"))
}
if (feedbackReq.contains("url") && feedbackReq.contains("docids")) {
return Left(Some("Request must provide exactly one of the following fields: urls or docids"))
}
/** Converts the user input into string format. */
var data = ""
data = data.concat(_extractStrArrWithAmp(feedbackReq, "add_tags"))
data = data.concat(_extractStrArrWithAmp(feedbackReq, "remove_tags"))
data = data.concat(_extractStrArrWithAmp(feedbackReq, "similar_docids"))
data = data.concat(_extractStrArrWithAmp(feedbackReq, "similar_url"))
data = data.concat(_extractStrArrWithAmp(feedbackReq, "dissimilar_docids"))
data = data.concat(_extractStrArrWithAmp(feedbackReq, "dissimilar_url"))
data = data.concat(_extractStrArrWithAmp(feedbackReq, "search_click"))
data = data.concat(_extractStrArrWithAmp(feedbackReq, "url"))
data = data.concat(_extractStrArrWithAmp(feedbackReq, "docids"))
data = data.dropRight(1) // Removes the last "&" character.
/** Sends the HTTP request. */
val response = _commonHTTPRequest(Some(data), "feedback", "POST", false)
/** Returns the HTTP response into JSON-like class structure. */
response match {
case Left(err) => Left(err)
case Right(result) => {
val rmap = JSON.parseFull(result).get.asInstanceOf[Map[String, Any]]
Right(
FeedbackResp(
rmap.get("status_code").get.asInstanceOf[String],
rmap.get("status_msg").get.asInstanceOf[String]
)
)
}
}
}
/** Color (beta) - retrieves the dominant colors present in your images or videos.
*
* @param Array of urls for color detection
* @return Color response from the Clarifai endpoint
*/
def color(colorReq: Array[String]): Either[Option[String], ColorResp] = {
if (colorReq.length < 1 ) {
return Left(Some("Needs at least one url"))
}
/** Converts the user input into string format. */
var data = ""
for (str <- colorReq) { data = data.concat("url=" + str + "&") }
data = data.dropRight(1) // Removes the last "&" character.
/** Sends the HTTP request. */
val response = _commonHTTPRequest(Some(data), "color", "POST", false)
/** Returns the HTTP response into JSON-like class structure. */
response match {
case Left(err) => Left(err)
case Right(result) => {
val rmap = JSON.parseFull(result).get.asInstanceOf[Map[String, Any]]
val results = rmap.get("results").get.asInstanceOf[List[Map[String, Any]]]
var resultsArr = List[ColorResults]()
results.foreach((itemR: Map[String, Any]) => {
val colors = itemR.get("colors").get.asInstanceOf[List[Map[String, Any]]]
var colorsArr = List[ResultsColors]()
colors.foreach((itemC: Map[String, Any]) => {
val w3c_color = itemC.get("w3c").get.asInstanceOf[Map[String, Any]]
val rColor:ResultsColors = ResultsColors(
Colorw3c(
w3c_color.get("hex").get.asInstanceOf[String],
w3c_color.get("name").get.asInstanceOf[String]
),
itemC.get("hex").get.asInstanceOf[String],
itemC.get("density").get.asInstanceOf[Double]
)
colorsArr :::= List(rColor)
})
val cResult:ColorResults = ColorResults(
itemR.get("docid").get.asInstanceOf[Double],
itemR.get("url").get.asInstanceOf[String],
itemR.get("docid_str").get.asInstanceOf[String],
colorsArr
)
resultsArr :::= List(cResult)
})
Right(
ColorResp(
rmap.get("status_code").get.asInstanceOf[String],
rmap.get("status_msg").get.asInstanceOf[String],
resultsArr
)
)
}
}
}
/** Info - returns the current API details as well as any usage limits your account has.
*
* @return Info response from the Clarifai endpoint
*/
def info(): Either[Option[String], InfoResp] = {
/** Sends the HTTP request. */
val response = _commonHTTPRequest(None, "info", "GET", false)
/** Returns the HTTP response into JSON-like class structure. */
response match {
case Left(err) => Left(err)
case Right(result) => {
val rmap = JSON.parseFull(result).get.asInstanceOf[Map[String, Any]]
val results = rmap.get("results").get.asInstanceOf[Map[String, Any]]
Right(
InfoResp(
rmap.get("status_code").get.asInstanceOf[String],
rmap.get("status_msg").get.asInstanceOf[String],
InfoResults(
results.get("max_image_size").get.asInstanceOf[Double],
results.get("default_language").get.asInstanceOf[String],
results.get("max_video_size").get.asInstanceOf[Double],
results.get("max_image_bytes").get.asInstanceOf[Double],
results.get("min_image_size").get.asInstanceOf[Double],
results.get("default_model").get.asInstanceOf[String],
results.get("max_video_bytes").get.asInstanceOf[Double],
results.get("max_video_duration").get.asInstanceOf[Double],
results.get("max_batch_size").get.asInstanceOf[Double],
results.get("max_video_batch_size").get.asInstanceOf[Double],
results.get("min_video_size").get.asInstanceOf[Double],
results.get("api_version").get.asInstanceOf[Double]
)
)
)
}
}
}
/** Usage - returns your API usage for the current month and hour.
*
* @return Usage response from the Clarifai endpoint
*/
def usage(): Either[Option[String], UsageResp] = {
/** Sends the HTTP request. */
val response = _commonHTTPRequest(None, "usage", "GET", false)
/** Returns the HTTP response into JSON-like class structure. */
response match {
case Left(err) => Left(err)
case Right(result) => {
val rmap = JSON.parseFull(result).get.asInstanceOf[Map[String, Any]]
val results = rmap.get("results").get.asInstanceOf[Map[String, Any]]
var utArr = List[UsageResultUT]()
val uThrottles = results.get("user_throttles").get.asInstanceOf[List[Map[String, Any]]]
uThrottles.foreach((item: Map[String, Any]) => {
val uThrottle:UsageResultUT = UsageResultUT(
item.get("name").get.asInstanceOf[String],
item.get("consumed").get.asInstanceOf[Double],
item.get("consumed_percentage").get.asInstanceOf[Double],
item.get("limit").get.asInstanceOf[Double],
item.get("units").get.asInstanceOf[String],
item.get("wait").get.asInstanceOf[Double]
)
utArr :::= List(uThrottle)
})
Right(
UsageResp(
rmap.get("status_code").get.asInstanceOf[String],
rmap.get("status_msg").get.asInstanceOf[String],
UsageResults(
utArr,
results.get("app_throttles").get.asInstanceOf[Map[String, Any]]
)
)
)
}
}
}
/** Tag - tags the contents of your images or videos.
*
* @param Map of values containing images and videos for tagging
* @return Tag response from the Clarifai endpoint
*/
def tag(tagReq: Map[String, Any]): Either[Option[String], TagResp] = {
if (!tagReq.contains("url") || tagReq.get("url").get.asInstanceOf[Array[String]].length < 1 ) {
return Left(Some("Needs at least one url"))
}
/** Converts the user input into string format. */
var data = ""
data = data.concat(_extractStringWithAmp(tagReq, "model"))
data = data.concat(_extractStringWithAmp(tagReq, "language"))
/** TODO: select classes. */
for (str <- tagReq.get("url").get.asInstanceOf[Array[String]]) {
data = data.concat("url=" + str + "&")
}
data = data.dropRight(1) // Removes the last "&" character.
val response = _commonHTTPRequest(Some(data), "tag", "POST", false)
/** Returns the HTTP response into JSON-like class structure. */
response match {
case Left(err) => Left(err)
case Right(result) => {
val rmap = JSON.parseFull(result).get.asInstanceOf[Map[String, Any]]
val meta = rmap.get("meta").get.asInstanceOf[Map[String, Any]]
val meta_tag = meta.get("tag").get.asInstanceOf[Map[String, Any]]
val results = rmap.get("results").get.asInstanceOf[List[Map[String, Any]]]
var resultsArr = List[TagResult]()
results.foreach((item: Map[String, Any]) => {
val res = item.get("result").get.asInstanceOf[Map[String, Any]]
val res_tag = res.get("tag").get.asInstanceOf[Map[String, Any]]
val tResult:TagResult = TagResult(
item.get("docid").get.asInstanceOf[Double],
item.get("url").get.asInstanceOf[String],
item.get("status_code").get.asInstanceOf[String],
item.get("status_msg").get.asInstanceOf[String],
item.get("local_id").get.asInstanceOf[String],
TagResultRes(
TagResultResTag(
// res_tag.get("concept_ids").get.asInstanceOf[List[String]],
res_tag.get("classes").get.asInstanceOf[List[String]],
res_tag.get("probs").get.asInstanceOf[List[Double]]
)
),
item.get("docid_str").get.asInstanceOf[String]
)
resultsArr :::= List(tResult)
})
Right(
TagResp(
rmap.get("status_code").get.asInstanceOf[String],
rmap.get("status_msg").get.asInstanceOf[String],
TagMeta(
TagMetaTag(
meta_tag.get("timestamp").get.asInstanceOf[Double],
meta_tag.get("model").get.asInstanceOf[String],
meta_tag.get("config").get.asInstanceOf[String]
)
),
resultsArr
)
)
}
}
}
/** Functions for establishing the underlying conneciton with the Clarifai API service.
*
* The following functions should be private. They help to establish HTTP connection
* and handle the user input and response error.
*/
/** Requests access token from the Clarifai API service. */
private def _requestAccessToken(): Option[String] = {
val form = Seq("grant_type" -> "client_credentials",
"client_id" -> clientID,
"client_secret" -> clientSecret)
val url = _buildURL("token")
val response: HttpResponse[String] = Http(url).postForm(form)
.header("Authorization", ("Bearer " + accessToken))
.header("content-type", "application/x-www-form-urlencoded")
.asString
if (response.isError) return Some("4XX OR 5XX ERROR")
val json_body:Map[String,Any] = JSON.parseFull(response.body).get.asInstanceOf[Map[String, Any]]
if (json_body.isEmpty) return Some("EMPTY_JSON")
val token = json_body.get("access_token").get.asInstanceOf[String]
if (token == "") return Some("EMPTY_TOKEN")
_setAccessToken(token)
None
}
/** Sends HTTP request to the specified enpoint with the user input data. */
private def _commonHTTPRequest(data:Option[String] ,endpoint: String, verb:String, retry: Boolean)
: Either[Option[String], String] = {
val req_data = data match {
case Some(i) => i
case None => ""
}
val url = _buildURL(endpoint)
var response: HttpResponse[String] = null
/** Sends HTTP request based on the method type. */
verb match {
case "POST" => {
response = Http(url).postData(req_data)
.header("Authorization", ("Bearer " + accessToken))
.header("content-type", "application/x-www-form-urlencoded")
.asString
}
case "GET" => {
response = Http(url).header("Authorization", ("Bearer " + accessToken)).asString
}
case _ => {
return Left(Some("ILLEGAL_VERB"))
}
}
/** Matches HTTP response code to the corresponding return value. */
response.code match {
case 200|201 => {
if (throttled) {
_setThrottle(false)
}
Right(response.body)
}
case 401 => {
if (!retry) {
val err = _requestAccessToken()
if (err != None) {
return Left(err)
}
return _commonHTTPRequest(data, endpoint, verb, true)
}
Left(Some("TOKEN_INVALID"))
}
case 429 => {
_setThrottle(true)
Left(Some("THROTTLED"))
}
case 400 => {
Left(Some("ALL_ERROR"))
}
case 500 => {
Left(Some("CLARIFAI_ERROR"))
}
case _ => {
Left(Some("UNEXPECTED_STATUS_CODE"))
}
}
}
/** Helper functions for modifying request and response data. */
def _buildURL(endpoint: String): String = {
val parts = Array(Config.baseURL, Config.version, endpoint)
return parts.mkString("/")
}
def _setAccessToken(token: String) = {
accessToken = token
}
def _setThrottle(throttle:Boolean) = {
throttled = throttle
}
def _extractStringWithAmp(obj: Map[String, Any], field: String): String = {
if (obj.contains(field)) {
field + "=" + obj.get(field).get.asInstanceOf[String] + "&"
}
else {
""
}
}
def _extractStrArrWithAmp(obj: Map[String, Any], field: String): String= {
if (obj.contains(field)) {
var data = field + "="
for (str <- obj.get(field).get.asInstanceOf[Array[String]]) {
data = data.concat(str + ",")
}
data = data.dropRight(1) // Removes the last "," character.
data + "&"
}
else {
""
}
}
} | vic317yeh/clarifai-scala | src/main/scala/clarifai/Clarifai.scala | Scala | mit | 15,733 |
package ru.tmtool.math.algebra
import ru.tmtool.math.plane.Point
import Math._
/**
* User: Sergey Kozlov skozlov@poidem.ru
* Date: 15.08.2014
* Time: 15:49
*/
case class Polynomial(coefficients: Seq[BigDecimal]) extends (BigDecimal => BigDecimal){
require(coefficients.nonEmpty)
val degree = coefficients.size - 1
val constant = degree == 0
require(coefficients.head != 0 || constant)
lazy val normalized: Polynomial =
if(coefficients.head == 0 || coefficients.head == 1) this
else Polynomial(coefficients map (_ / coefficients.head))
override lazy val toString: String = {
val builder = new StringBuilder("f(x) = ")
def serialize(coefficients: Seq[BigDecimal], needPlus: Boolean = false): Unit = {
if(coefficients.head != 0){
if(needPlus) builder append " + "
if(coefficients.size == 1) builder append coefficients.head
else{
if(coefficients.head != 1) builder.append(coefficients.head).append('*')
val degree = coefficients.size - 1
builder.append('x')
if(degree > 1) builder.append('^').append(degree)
}
}
if(coefficients.size > 1) serialize(coefficients.tail, needPlus = true)
}
if(coefficients.size == 1) builder append coefficients.head
else serialize(coefficients)
builder.toString()
}
override def apply(x: BigDecimal): BigDecimal = coefficients.foldLeft(BigDecimal(0)){
(sum, c) => sum * x + c
}
override lazy val hashCode: Int = normalized.coefficients.hashCode()
override def equals(obj: scala.Any): Boolean = obj match {
case that: Polynomial if that != null => (that canEqual this) && (this.normalized.coefficients == that.normalized.coefficients)
case _ => false
}
override def canEqual(that: Any): Boolean = that != null && that.isInstanceOf[Polynomial]
def +(that: Polynomial): Polynomial = {
val minSize = min(this.coefficients.size, that.coefficients.size)
val (thisBegin, thisEnd) = this.coefficients splitAt (this.coefficients.size - minSize)
val (thatBegin, thatEnd) = that.coefficients splitAt (that.coefficients.size - minSize)
val sumEnd: Seq[BigDecimal] = thisEnd zip thatEnd map {
case (a, b) => a + b
}
val coefficients = (thisBegin ++ (thatBegin ++ sumEnd)) dropWhile (_ != 0)
Polynomial(coefficients)
}
def +(that: BigDecimal) = Polynomial(coefficients.init :+ (coefficients.last + that))
def *(const: BigDecimal) = Polynomial (if(const == 0) Seq(0) else coefficients map (_ * const))
def *(that: Polynomial): Polynomial = coefficients.foldLeft(Polynomial(Seq(0))){
(sum, c) => {
val sumX = if(sum.coefficients.head == 0) sum else Polynomial(sum.coefficients :+ BigDecimal(0))
sumX + that * c
}
}
def /(n: BigDecimal): Polynomial = {
require(n != 0, "Division by zero")
this * (1 / n)
}
}
object Polynomial{
def apply(point1: Point, point2: Point, otherPoints: Point*): Polynomial = {
val points = point1 +: point2 +: otherPoints
def basicPolynomial(i: Int): Polynomial = {
val basicPoint = points(i)
val otherPoints: Seq[Point] = (points take i) ++ (points drop (i+1))
val denominator: BigDecimal = otherPoints.foldLeft(BigDecimal(1)){
case (res, p @ Point(x, _)) =>
require(x != basicPoint.x, s"All points must have unique x, but found $basicPoint and $p")
basicPoint.x - x
}
val numerator: Polynomial = otherPoints.foldLeft(Polynomial(Seq(0))){
case (p, Point(x, _)) => p * Polynomial(Seq(1, -x))
}
numerator / denominator
}
points.foldLeft((0, Polynomial(Seq(0)))){
case ((i, sum), Point(x, y)) => (i + 1, sum + basicPolynomial(i) * y)
}._2
}
implicit class SpecialDecimal(n: BigDecimal){
def toPolynomial = Polynomial(Seq(n))
}
} | tmtool/math | src/main/scala/ru/tmtool/math/algebra/Polynomial.scala | Scala | mit | 3,667 |
package chess
package variant
case object Atomic
extends Variant(
id = 7,
key = "atomic",
name = "Atomic",
shortName = "Atom",
title = "Nuke your opponent's king to win.",
standardInitialPosition = true
) {
def pieces = Standard.pieces
override def hasMoveEffects = true
/** Move threatens to explode the opponent's king */
private def explodesOpponentKing(situation: Situation)(move: Move): Boolean =
move.captures && {
situation.board.kingPosOf(!situation.color) exists move.dest.touches
}
/** Move threatens to illegally explode our own king */
private def explodesOwnKing(situation: Situation)(move: Move): Boolean = {
move.captures && (situation.kingPos exists move.dest.touches)
}
private def protectedByOtherKing(board: Board, to: Pos, color: Color): Boolean =
board.kingPosOf(color) exists to.touches
/**
* In atomic chess, a king cannot be threatened while it is in the perimeter of the other king as were the other player
* to capture it, their own king would explode. This effectively makes a king invincible while connected with another
* king.
*/
override def kingThreatened(
board: Board,
color: Color,
to: Pos,
filter: Piece => Boolean = _ => true
): Boolean = {
board.pieces exists {
case (pos, piece) =>
piece.color == color && filter(piece) && !protectedByOtherKing(
board,
to,
color
) && piece.attacks(pos, board.occupied).has(to)
}
}
// moves exploding opponent king are always playable
override def kingSafety(m: Move, filter: Piece => Boolean, kingPos: Option[Pos]): Boolean = {
!kingPos.exists(kingThreatened(m.after, !m.color, _, filter)) ||
explodesOpponentKing(m.situationBefore)(m)
} && !explodesOwnKing(m.situationBefore)(m)
/** If the move captures, we explode the surrounding pieces. Otherwise, nothing explodes. */
private def explodeSurroundingPieces(move: Move): Move = {
if (move.captures) {
val affectedPos = surroundingPositions(move.dest)
val afterBoard = move.after
val destination = move.dest
val boardPieces = afterBoard.pieces
// Pawns are immune (for some reason), but all pieces surrounding the captured piece and the capturing piece
// itself explode
val piecesToExplode = affectedPos.filter(boardPieces.get(_).fold(false)(_.isNot(Pawn))) + destination
val afterExplosions = boardPieces -- piecesToExplode
val newBoard = afterBoard withPieces afterExplosions
move withAfter newBoard
} else move
}
/**
* The positions surrounding a given position on the board. Any square at the edge of the board has
* less surrounding positions than the usual eight.
*/
private[chess] def surroundingPositions(pos: Pos): PosSet = PosSet.kingAttacks(pos)
override def addVariantEffect(move: Move): Move = explodeSurroundingPieces(move)
/**
* Since kings cannot confine each other, if either player has only a king
* then either a queen or multiple pieces are required for checkmate.
*/
private def insufficientAtomicWinningMaterial(board: Board) = {
val kingsAndBishopsOnly = board.pieces forall { p =>
(p._2 is King) || (p._2 is Bishop)
}
lazy val bishopsOnOppositeColors = InsufficientMatingMaterial.bishopsOnOppositeColors(board)
lazy val kingsAndKnightsOnly = board.pieces forall { p =>
(p._2 is King) || (p._2 is Knight)
}
lazy val kingsRooksAndMinorsOnly = board.pieces forall { p =>
(p._2 is King) || (p._2 is Rook) || (p._2 is Bishop) || (p._2 is Knight)
}
// Bishops of opposite color (no other pieces) endgames are dead drawn
// except if either player has multiple bishops so a helpmate is possible
if (board.count(White) >= 2 && board.count(Black) >= 2)
kingsAndBishopsOnly && board.pieces.size <= 4 && bishopsOnOppositeColors
// Queen, rook + any, bishop + any (same piece color), or 3 knights can mate
else if (kingsAndKnightsOnly) board.pieces.size <= 4
else kingsRooksAndMinorsOnly && !bishopsOnOppositeColors && board.pieces.size <= 3
}
/*
* Bishops on opposite coloured squares can never capture each other to cause a king to explode and a traditional
* mate would be not be very likely. Additionally, a player can only mate another player with sufficient material.
* We also look out for closed positions (pawns that cannot move and kings which cannot capture them.)
*/
override def isInsufficientMaterial(board: Board) = {
insufficientAtomicWinningMaterial(board) || atomicClosedPosition(board)
}
/**
* Since a king cannot capture, K + P vs K + P where none of the pawns can move is an automatic draw
*/
private def atomicClosedPosition(board: Board) = {
val closedStructure = board.actors.values.forall(actor =>
(actor.piece.is(Pawn) && actor.moves.isEmpty
&& InsufficientMatingMaterial.pawnBlockedByPawn(actor, board))
|| actor.piece.is(King) || actor.piece.is(Bishop)
)
val randomBishop = board.pieces.find { case (_, piece) => piece.is(Bishop) }
val bishopsAbsentOrPawnitized = randomBishop match {
case Some((pos, piece)) => bishopPawnitized(board, piece.color, pos.isLight)
case None => true
}
closedStructure && bishopsAbsentOrPawnitized
}
private def bishopPawnitized(board: Board, sideWithBishop: Color, bishopLight: Boolean) = {
board.actors.values.forall(actor =>
(actor.piece.is(Pawn) && actor.piece.is(sideWithBishop)) ||
(actor.piece.is(Pawn) && actor.piece.is(!sideWithBishop) && actor.pos.isLight == !bishopLight) ||
(actor.piece.is(Bishop) && actor.piece.is(sideWithBishop) && actor.pos.isLight == bishopLight) ||
actor.piece.is(King)
)
}
/**
* In atomic chess, it is possible to win with a single knight, bishop, etc, by exploding
* a piece in the opponent's king's proximity. On the other hand, a king alone or a king with
* immobile pawns is not sufficient material to win with.
*/
override def opponentHasInsufficientMaterial(situation: Situation) =
situation.board.rolesOf(!situation.color) == List(King)
/** Atomic chess has a special end where a king has been killed by exploding with an adjacent captured piece */
override def specialEnd(situation: Situation) = situation.board.kingPos.size != 2
}
| niklasf/scalachess | src/main/scala/variant/Atomic.scala | Scala | mit | 6,471 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.{util => ju}
import java.util.concurrent.ConcurrentHashMap
import org.apache.commons.pool2.{BaseKeyedPooledObjectFactory, PooledObject, SwallowedExceptionListener}
import org.apache.commons.pool2.impl.{DefaultEvictionPolicy, DefaultPooledObject, GenericKeyedObjectPool, GenericKeyedObjectPoolConfig}
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.sql.kafka010.InternalKafkaConsumerPool._
import org.apache.spark.sql.kafka010.KafkaDataConsumer.CacheKey
/**
* Provides object pool for [[InternalKafkaConsumer]] which is grouped by [[CacheKey]].
*
* This class leverages [[GenericKeyedObjectPool]] internally, hence providing methods based on
* the class, and same contract applies: after using the borrowed object, you must either call
* returnObject() if the object is healthy to return to pool, or invalidateObject() if the object
* should be destroyed.
*
* The soft capacity of pool is determined by "spark.kafka.consumer.cache.capacity" config value,
* and the pool will have reasonable default value if the value is not provided.
* (The instance will do its best effort to respect soft capacity but it can exceed when there's
* a borrowing request and there's neither free space nor idle object to clear.)
*
* This class guarantees that no caller will get pooled object once the object is borrowed and
* not yet returned, hence provide thread-safety usage of non-thread-safe [[InternalKafkaConsumer]]
* unless caller shares the object to multiple threads.
*/
private[kafka010] class InternalKafkaConsumerPool(
objectFactory: ObjectFactory,
poolConfig: PoolConfig) extends Logging {
def this(conf: SparkConf) = {
this(new ObjectFactory, new PoolConfig(conf))
}
// the class is intended to have only soft capacity
assert(poolConfig.getMaxTotal < 0)
private val pool = {
val internalPool = new GenericKeyedObjectPool[CacheKey, InternalKafkaConsumer](
objectFactory, poolConfig)
internalPool.setSwallowedExceptionListener(CustomSwallowedExceptionListener)
internalPool
}
/**
* Borrows [[InternalKafkaConsumer]] object from the pool. If there's no idle object for the key,
* the pool will create the [[InternalKafkaConsumer]] object.
*
* If the pool doesn't have idle object for the key and also exceeds the soft capacity,
* pool will try to clear some of idle objects.
*
* Borrowed object must be returned by either calling returnObject or invalidateObject, otherwise
* the object will be kept in pool as active object.
*/
def borrowObject(key: CacheKey, kafkaParams: ju.Map[String, Object]): InternalKafkaConsumer = {
updateKafkaParamForKey(key, kafkaParams)
if (size >= poolConfig.softMaxSize) {
logWarning("Pool exceeds its soft max size, cleaning up idle objects...")
pool.clearOldest()
}
pool.borrowObject(key)
}
/** Returns borrowed object to the pool. */
def returnObject(consumer: InternalKafkaConsumer): Unit = {
pool.returnObject(extractCacheKey(consumer), consumer)
}
/** Invalidates (destroy) borrowed object to the pool. */
def invalidateObject(consumer: InternalKafkaConsumer): Unit = {
pool.invalidateObject(extractCacheKey(consumer), consumer)
}
/** Invalidates all idle consumers for the key */
def invalidateKey(key: CacheKey): Unit = {
pool.clear(key)
}
/**
* Closes the keyed object pool. Once the pool is closed,
* borrowObject will fail with [[IllegalStateException]], but returnObject and invalidateObject
* will continue to work, with returned objects destroyed on return.
*
* Also destroys idle instances in the pool.
*/
def close(): Unit = {
pool.close()
}
def reset(): Unit = {
// this is the best-effort of clearing up. otherwise we should close the pool and create again
// but we don't want to make it "var" only because of tests.
pool.clear()
}
def numIdle: Int = pool.getNumIdle
def numIdle(key: CacheKey): Int = pool.getNumIdle(key)
def numActive: Int = pool.getNumActive
def numActive(key: CacheKey): Int = pool.getNumActive(key)
def size: Int = numIdle + numActive
def size(key: CacheKey): Int = numIdle(key) + numActive(key)
// TODO: revisit the relation between CacheKey and kafkaParams - for now it looks a bit weird
// as we force all consumers having same (groupId, topicPartition) to have same kafkaParams
// which might be viable in performance perspective (kafkaParams might be too huge to use
// as a part of key), but there might be the case kafkaParams could be different -
// cache key should be differentiated for both kafkaParams.
private def updateKafkaParamForKey(key: CacheKey, kafkaParams: ju.Map[String, Object]): Unit = {
// We can assume that kafkaParam should not be different for same cache key,
// otherwise we can't reuse the cached object and cache key should contain kafkaParam.
// So it should be safe to put the key/value pair only when the key doesn't exist.
val oldKafkaParams = objectFactory.keyToKafkaParams.putIfAbsent(key, kafkaParams)
require(oldKafkaParams == null || kafkaParams == oldKafkaParams, "Kafka parameters for same " +
s"cache key should be equal. old parameters: $oldKafkaParams new parameters: $kafkaParams")
}
private def extractCacheKey(consumer: InternalKafkaConsumer): CacheKey = {
new CacheKey(consumer.topicPartition, consumer.kafkaParams)
}
}
private[kafka010] object InternalKafkaConsumerPool {
object CustomSwallowedExceptionListener extends SwallowedExceptionListener with Logging {
override def onSwallowException(e: Exception): Unit = {
logError(s"Error closing Kafka consumer", e)
}
}
class PoolConfig(conf: SparkConf) extends GenericKeyedObjectPoolConfig[InternalKafkaConsumer] {
private var _softMaxSize = Int.MaxValue
def softMaxSize: Int = _softMaxSize
init()
def init(): Unit = {
_softMaxSize = conf.get(CONSUMER_CACHE_CAPACITY)
val jmxEnabled = conf.get(CONSUMER_CACHE_JMX_ENABLED)
val minEvictableIdleTimeMillis = conf.get(CONSUMER_CACHE_TIMEOUT)
val evictorThreadRunIntervalMillis = conf.get(
CONSUMER_CACHE_EVICTOR_THREAD_RUN_INTERVAL)
// NOTE: Below lines define the behavior, so do not modify unless you know what you are
// doing, and update the class doc accordingly if necessary when you modify.
// 1. Set min idle objects per key to 0 to avoid creating unnecessary object.
// 2. Set max idle objects per key to 3 but set total objects per key to infinite
// which ensures borrowing per key is not restricted.
// 3. Set max total objects to infinite which ensures all objects are managed in this pool.
setMinIdlePerKey(0)
setMaxIdlePerKey(3)
setMaxTotalPerKey(-1)
setMaxTotal(-1)
// Set minimum evictable idle time which will be referred from evictor thread
setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis)
setSoftMinEvictableIdleTimeMillis(-1)
// evictor thread will run test with ten idle objects
setTimeBetweenEvictionRunsMillis(evictorThreadRunIntervalMillis)
setNumTestsPerEvictionRun(10)
setEvictionPolicy(new DefaultEvictionPolicy[InternalKafkaConsumer]())
// Immediately fail on exhausted pool while borrowing
setBlockWhenExhausted(false)
setJmxEnabled(jmxEnabled)
setJmxNamePrefix("kafka010-cached-simple-kafka-consumer-pool")
}
}
class ObjectFactory extends BaseKeyedPooledObjectFactory[CacheKey, InternalKafkaConsumer] {
val keyToKafkaParams = new ConcurrentHashMap[CacheKey, ju.Map[String, Object]]()
override def create(key: CacheKey): InternalKafkaConsumer = {
Option(keyToKafkaParams.get(key)) match {
case Some(kafkaParams) => new InternalKafkaConsumer(key.topicPartition, kafkaParams)
case None => throw new IllegalStateException("Kafka params should be set before " +
"borrowing object.")
}
}
override def wrap(value: InternalKafkaConsumer): PooledObject[InternalKafkaConsumer] = {
new DefaultPooledObject[InternalKafkaConsumer](value)
}
override def destroyObject(key: CacheKey, p: PooledObject[InternalKafkaConsumer]): Unit = {
p.getObject.close()
}
}
}
| pgandhi999/spark | external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/InternalKafkaConsumerPool.scala | Scala | apache-2.0 | 9,181 |
package org.dbpedia.spotlight.db.io
import io.Source
import java.io.{File, FileInputStream, InputStream}
import org.dbpedia.spotlight.model.Factory.OntologyType
import scala.collection.JavaConverters._
import java.util.NoSuchElementException
import scala.collection.mutable.HashSet
import org.dbpedia.spotlight.db.WikipediaToDBpediaClosure
import org.dbpedia.spotlight.log.SpotlightLog
import scala.Predef._
import scala.Array
import org.dbpedia.spotlight.model._
import org.dbpedia.spotlight.exceptions.NotADBpediaResourceException
import org.semanticweb.yars.nx.parser.NxParser
import org.dbpedia.extraction.util.WikiUtil
/**
* Represents a source of DBpediaResources.
*
* Type definitions must be prepared beforehand, see
* src/main/scripts/types.sh
*
* @author Joachim Daiber
*/
object DBpediaResourceSource {
def fromTSVInputStream(
conceptList: InputStream,
counts: InputStream,
instanceTypes: InputStream
): java.util.Map[DBpediaResource, Int] = {
var id = 0
//The list of concepts may contain non-unique elements, hence convert it to a Set first to make sure
//we do not count elements more than once.
val resourceMap: Map[String, DBpediaResource] = (Source.fromInputStream(conceptList).getLines().toSet map {
line: String => {
val res = new DBpediaResource(line.trim)
res.id = id
id += 1
Pair(res.uri, res)
}
}).toMap
//Read counts:
Source.fromInputStream(counts).getLines() foreach {
line: String => {
val Array(uri: String, count: String) = line.trim().split('\\t')
resourceMap(new DBpediaResource(uri).uri).setSupport(count.toInt)
}
}
//Read types:
val uriNotFound = HashSet[String]()
Source.fromInputStream(instanceTypes).getLines() foreach {
line: String => {
val Array(id: String, typeURI: String) = line.trim().split('\\t')
try {
resourceMap(new DBpediaResource(id).uri).types ::= OntologyType.fromURI(typeURI)
} catch {
case e: NoSuchElementException =>
//System.err.println("WARNING: DBpedia resource not in concept list %s (%s)".format(id, typeURI) )
uriNotFound += id
}
}
}
SpotlightLog.warn(this.getClass, "URI for %d type definitions not found!", uriNotFound.size)
resourceMap.iterator.map( f => Pair(f._2, f._2.support) ).toMap.asJava
}
/**
* Normalize the URI resulting from Pig to a format useable for us.
* At the moment, the Pig URIs are in DBpedia format but double-encoded.
*
* @param uri the DBpedia URI returned by Pig
* @return
*/
def normalizePigURI(uri: String) = {
try {
//This seems a bit over the top (this is necessary because of the format of the data as it comes from pignlproc):
WikiUtil.wikiEncode(WikiUtil.wikiDecode(WikiUtil.wikiDecode(uri)))
} catch {
case e: Exception => println("Conversion to correct URI format failed at %s".format(uri)); uri
}
}
def fromPigInputStreams(
wikipediaToDBpediaClosure: WikipediaToDBpediaClosure,
resourceCounts: InputStream,
instanceTypes: (String, InputStream),
namespace: String
): java.util.Map[DBpediaResource, Int] = {
SpotlightLog.info(this.getClass, "Creating DBepdiaResourceSource.")
var id = 1
val resourceMap = new java.util.HashMap[DBpediaResource, Int]()
val resourceByURI = scala.collection.mutable.HashMap[String, DBpediaResource]()
SpotlightLog.info(this.getClass, "Reading resources+counts...")
Source.fromInputStream(resourceCounts).getLines() foreach {
line: String => {
try {
val Array(wikiurl, count) = line.trim().split('\\t')
val res = new DBpediaResource(wikipediaToDBpediaClosure.wikipediaToDBpediaURI(normalizePigURI(wikiurl)))
resourceByURI.get(res.uri) match {
case Some(oldRes) => {
oldRes.setSupport(oldRes.support + count.toInt)
resourceByURI.put(oldRes.uri, oldRes)
}
case None => {
res.id = id
id += 1
res.setSupport(count.toInt)
resourceByURI.put(res.uri, res)
}
}
} catch {
case e: NotADBpediaResourceException => //Ignore Disambiguation pages
case e: scala.MatchError => //Ignore lines with multiple tabs
}
}
}
//Read types:
if (instanceTypes != null && instanceTypes._1.equals("tsv")) {
SpotlightLog.info(this.getClass, "Reading types (tsv format)...")
val uriNotFound = HashSet[String]()
Source.fromInputStream(instanceTypes._2).getLines() foreach {
line: String => {
val Array(uri: String, typeURI: String) = line.trim().split('\\t')
try {
resourceByURI(new DBpediaResource(uri).uri).types ::= OntologyType.fromURI(typeURI)
} catch {
case e: java.util.NoSuchElementException =>
uriNotFound += uri
}
}
}
SpotlightLog.warn(this.getClass, "URI for %d type definitions not found!".format(uriNotFound.size) )
SpotlightLog.info(this.getClass, "Done.")
} else if (instanceTypes != null && instanceTypes._1.equals("nt")) {
SpotlightLog.info(this.getClass, "Reading types (nt format)...")
val uriNotFound = HashSet[String]()
val redParser = new NxParser(instanceTypes._2)
while (redParser.hasNext) {
val triple = redParser.next
val subj = triple(0).toString.replace(namespace, "")
if (!subj.contains("__")) {
val obj = triple(2).toString.replace(namespace, "")
try {
if(!obj.endsWith("owl#Thing"))
resourceByURI(new DBpediaResource(subj).uri).types ::= OntologyType.fromURI(obj)
} catch {
case e: java.util.NoSuchElementException =>
uriNotFound += subj
}
}
}
SpotlightLog.info(this.getClass, "URI for %d type definitions not found!".format(uriNotFound.size) )
SpotlightLog.info(this.getClass, "Done.")
}
resourceByURI foreach {
case (_, res) => resourceMap.put(res, res.support)
}
resourceMap
}
def fromPigFiles(
wikipediaToDBpediaClosure: WikipediaToDBpediaClosure,
counts: File,
instanceTypes: File,
namespace: String
): java.util.Map[DBpediaResource, Int] = fromPigInputStreams(
wikipediaToDBpediaClosure,
new FileInputStream(counts),
if(instanceTypes == null)
null
else
(
if(instanceTypes.getName.endsWith("nt")) "nt" else "tsv",
new FileInputStream(instanceTypes)
),
namespace
)
def fromTSVFiles(
conceptList: File,
counts: File,
instanceTypes: File
): java.util.Map[DBpediaResource, Int] = fromTSVInputStream(
new FileInputStream(conceptList),
new FileInputStream(counts),
new FileInputStream(instanceTypes)
)
}
| Skunnyk/dbpedia-spotlight-model | index/src/main/scala/org/dbpedia/spotlight/db/io/DBpediaResourceSource.scala | Scala | apache-2.0 | 6,973 |
package sample.stream
object ExposeParser {
def toExposeChange(js: JsValue): Either[ErrorMessage, ExposeChange] = Right(ExposeUpdate(1234, "expo"))
}
| franke1276/akka-stream-example | src/main/scala/sample/stream/ExposeParser.scala | Scala | cc0-1.0 | 155 |
/*
* Copyright 2013-2016 Tsukasa Kitachi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package configs
import configs.testutil.instance.anyVal._
import configs.testutil.instance.result._
import scalaprops.Property._
import scalaprops.{Properties, Scalaprops, scalazlaws}
import scalaz.std.list._
import scalaz.{Applicative, MonadError, Traverse}
object ResultTest extends Scalaprops {
// val monadErrorLaw = {
// import ResultInstance.monadError
// scalazlaws.monadError.all[Result, ConfigError]
// }
val applicativeLaw = {
import ResultInstance.applicative
scalazlaws.applicative.all[Result]
}
val traverse = {
import ResultInstance.applicative
Properties.list(
Properties.single("list", forAll { (xs: List[Int], f: Int => Result[Long]) =>
Result.traverse(xs)(f) == Traverse[List].traverse(xs)(f)
})
)
}
val sequence = {
import ResultInstance.applicative
Properties.list(
Properties.single("list", forAll { xs: List[Result[Int]] =>
Result.sequence(xs) == Traverse[List].sequence(xs)
})
)
}
val toFromEither =
forAll { a: Result[Int] =>
Result.fromEither(a.toEither) == a
}
}
object ResultInstance {
implicit val monadError: MonadError[Result, ConfigError] =
new MonadError[Result, ConfigError] {
def point[A](a: => A): Result[A] =
Result.successful(a)
def bind[A, B](fa: Result[A])(f: A => Result[B]): Result[B] =
fa.flatMap(f)
def raiseError[A](e: ConfigError): Result[A] =
Result.Failure(e)
def handleError[A](fa: Result[A])(f: ConfigError => Result[A]): Result[A] =
fa.handleWith { case e => f(e) }
}
implicit val applicative: Applicative[Result] =
new Applicative[Result] {
def point[A](a: => A): Result[A] =
Result.successful(a)
def ap[A, B](fa: => Result[A])(f: => Result[A => B]): Result[B] =
fa.ap(f)
}
}
| kxbmap/configs | core/src/test/scala/configs/ResultTest.scala | Scala | apache-2.0 | 2,460 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.inject
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.Callable
import java.util.concurrent.CompletionStage
import java.util.concurrent.ConcurrentLinkedDeque
import akka.Done
import javax.inject.Inject
import javax.inject.Singleton
import play.api.Logger
import scala.annotation.tailrec
import scala.compat.java8.FutureConverters
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.util.Failure
import scala.util.Success
import scala.util.Try
/**
* Application lifecycle register.
*
* This is used to hook into Play lifecycle events, specifically, when Play is stopped. The reason Play only provides
* lifecycle callbacks for stopping is that constructors are considered the application start callback. This has
* several advantages:
*
* - It simplifies implementation, if you want to start something, just do it in the constructor.
* - It simplifies state, there's no transitional state where an object has been created but not started yet. Hence,
* as long as you have a reference to something, it's safe to use it.
* - It solves startup dependencies in a type safe manner - the order that components must be started is enforced by the
* order that they must be instantiated due to the component graph.
*
* Stop hooks are executed when the application is shutdown, in reverse from when they were registered. Due to this
* reverse ordering, a component can know that it is safe to use the components it depends on as long as it hasn't
* received a shutdown event.
*
* To use this, declare a dependency on ApplicationLifecycle, and then register the stop hook when the component is
* started. For example:
*
* {{{
* import play.api.inject.ApplicationLifecycle
* import javax.inject.Inject
*
* class SomeDatabase @Inject() (applicationLifecycle: ApplicationLifecycle) {
*
* private val connectionPool = new SomeConnectionPool()
* applicationLifecycle.addStopHook { () =>
* Future.successful(connectionPool.shutdown())
* }
*
* ...
* }
* }}}
*/
trait ApplicationLifecycle {
/**
* Add a stop hook to be called when the application stops.
*
* The stop hook should redeem the returned future when it is finished shutting down. It is acceptable to stop
* immediately and return a successful future.
*/
def addStopHook(hook: () => Future[_]): Unit
/**
* Add a stop hook to be called when the application stops.
*
* The stop hook should redeem the returned future when it is finished shutting down. It is acceptable to stop
* immediately and return a successful future.
*/
def addStopHook(hook: Callable[_ <: CompletionStage[_]]): Unit =
addStopHook(() => { val cs = hook.call(); FutureConverters.toScala(cs) })
/**
* Call to shutdown the application and execute the registered hooks.
*
* Since 2.7.0, implementations of <code>stop</code> are expected to be idempotent so invoking the method
* several times only runs the process once.
*
* @return A future that will be redeemed once all hooks have executed.
*/
@deprecated(
"Do not invoke stop() directly. Instead, use CoordinatedShutdown.run to stop and release your resources.",
"2.7.0"
)
def stop(): Future[_]
/**
* @return the Java version for this Application Lifecycle.
*/
def asJava: play.inject.ApplicationLifecycle = new play.inject.DelegateApplicationLifecycle(this)
}
/**
* Default implementation of the application lifecycle.
*/
@Singleton
class DefaultApplicationLifecycle @Inject() () extends ApplicationLifecycle {
private val logger = Logger(getClass)
private val hooks = new ConcurrentLinkedDeque[() => Future[_]]()
override def addStopHook(hook: () => Future[_]): Unit = hooks.push(hook)
private val stopPromise: Promise[Done] = Promise()
private val started = new AtomicBoolean(false)
/**
* Call to shutdown the application.
*
* @return A future that will be redeemed once all hooks have executed.
*/
override def stop(): Future[_] = {
// run the code only once and memoize the result of the invocation in a Promise.future so invoking
// the method many times causes a single run producing the same result in all cases.
if (started.compareAndSet(false, true)) {
// Do we care if one hook executes on another hooks redeeming thread? Hopefully not.
import play.core.Execution.Implicits.trampoline
@tailrec def clearHooks(previous: Future[Any] = Future.successful[Any](())): Future[Any] = {
val hook = hooks.poll()
if (hook != null) clearHooks(previous.flatMap { _ =>
val hookFuture = Try(hook()) match {
case Success(f) => f
case Failure(e) => Future.failed(e)
}
hookFuture.recover {
case e => logger.error("Error executing stop hook", e)
}
})
else previous
}
stopPromise.completeWith(clearHooks().map(_ => Done))
}
stopPromise.future
}
}
| wegtam/playframework | core/play/src/main/scala/play/api/inject/ApplicationLifecycle.scala | Scala | apache-2.0 | 5,114 |
package recfun
object Main {
def main(args: Array[String]) {
println("Pascal's Triangle")
for (row <- 0 to 10) {
for (col <- 0 to row)
print(pascal(col, row) + " ")
println()
}
}
/**
* Exercise 1
*/
def pascal(c: Int, r: Int): Int = (c, r) match {
case (0, _) => 1
case (x, y) if (x == y) => 1
case default => pascal(c - 1, r - 1) + pascal(c, r - 1)
}
/**
* Exercise 2
*/
def balance(chars: List[Char]): Boolean = {
def balance(chars: List[Char], open: Int): Boolean = {
if (chars.isEmpty) return open == 0
else if (chars.head.equals('(')) balance(chars.tail, open + 1)
else if (chars.head.equals(')')) open > 0 && balance(chars.tail, open - 1)
else balance(chars.tail, open)
}
balance(chars, 0)
}
/**
* Exercise 3
*/
def countChange(money: Int, coins: List[Int]): Int = {
def countChangeRec(money: Int, coins: List[Int]): Int = {
if (coins.isEmpty) {
return 0
} else {
if (money == coins.head) 1
else if (coins.head > money) return 0
else countChangeRec(money - coins.head, coins) + countChange(money, coins.tail)
}
}
return countChangeRec(money, coins.sortWith(_.compareTo(_) < 0))
}
} | Cs4r/LearningScala | src/main/scala/cs4r/labs/learningscala/fpprinciples/week1/Main.scala | Scala | gpl-3.0 | 1,284 |
import scala.quoted._
object Macros {
inline def h(f: => Int => String): String = f(42)
}
| som-snytt/dotty | tests/run/i4431-b/quoted_1.scala | Scala | apache-2.0 | 93 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.scala.dsl
import builder.RouteBuilder
import org.apache.camel.model.PipelineDefinition
/**
* Scala enrichment for Camel's PipelineDefinition
*/
case class SPipelineDefinition(override val target: PipelineDefinition)(implicit val builder: RouteBuilder) extends SAbstractDefinition[PipelineDefinition] {
override def to(uris: String*) : SPipelineDefinition = {
uris.length match {
case 1 => target.to(uris(0))
case _ => {
for (uri <- uris) this.to(uri)
}
}
this
}
override def apply(block: => Unit) = wrap(super.apply(block))
}
| YMartsynkevych/camel | components/camel-scala/src/main/scala/org/apache/camel/scala/dsl/SPipelineDefinition.scala | Scala | apache-2.0 | 1,405 |
package com.zavakid.sbt
/**
*
* copy from https://github.com/jrudolph/sbt-dependency-graph/blob/master/src/main/scala/net/virtualvoid/sbt/graph/IvyGraphMLDependencies.scala
* and delete some unneed function
*/
import sbinary.{DefaultProtocol, Format}
import sbt.ConsoleLogger
import scala.collection.mutable
import scala.collection.mutable.{Set => MSet}
import scala.xml.parsing.ConstructingParser
import scala.xml.{Document, Node, NodeSeq}
object IvyGraphMLDependencies extends App {
case class ModuleId(organisation: String,
name: String,
version: String) {
def idString: String = organisation + ":" + name + ":" + version
}
case class Module(id: ModuleId,
license: Option[String] = None,
extraInfo: String = "",
evictedByVersion: Option[String] = None,
error: Option[String] = None) {
def hadError: Boolean = error.isDefined
def isUsed: Boolean = !evictedByVersion.isDefined
}
type Edge = (ModuleId, ModuleId)
case class ModuleGraph(nodes: Seq[Module], edges: Seq[Edge]) {
lazy val modules: Map[ModuleId, Module] =
nodes.map(n => (n.id, n)).toMap
def module(id: ModuleId): Module = modules(id)
lazy val dependencyMap: Map[ModuleId, Seq[Module]] =
createMap(identity)
lazy val reverseDependencyMap: Map[ModuleId, Seq[Module]] =
createMap { case (a, b) => (b, a)}
def createMap(bindingFor: ((ModuleId, ModuleId)) => (ModuleId, ModuleId)): Map[ModuleId, Seq[Module]] = {
val m = new mutable.HashMap[ModuleId, MSet[Module]] with mutable.MultiMap[ModuleId, Module]
edges.foreach { entry =>
val (f, t) = bindingFor(entry)
m.addBinding(f, module(t))
}
m.toMap.mapValues(_.toSeq.sortBy(_.id.idString)).withDefaultValue(Nil)
}
}
def graph(ivyReportFile: String): ModuleGraph =
buildGraph(buildDoc(ivyReportFile))
def buildGraph(doc: Document): ModuleGraph = {
def edgesForModule(id: ModuleId, revision: NodeSeq): Seq[Edge] =
for {
caller <- revision \\ "caller"
callerModule = moduleIdFromElement(caller, caller.attribute("callerrev").get.text)
} yield (moduleIdFromElement(caller, caller.attribute("callerrev").get.text), id)
val moduleEdges: Seq[(Module, Seq[Edge])] = for {
mod <- doc \\ "dependencies" \\ "module"
revision <- mod \\ "revision"
rev = revision.attribute("name").get.text
moduleId = moduleIdFromElement(mod, rev)
module = Module(moduleId,
(revision \\ "license").headOption.flatMap(_.attribute("name")).map(_.text),
evictedByVersion = (revision \\ "evicted-by").headOption.flatMap(_.attribute("rev").map(_.text)),
error = revision.attribute("error").map(_.text))
} yield (module, edgesForModule(moduleId, revision))
val (nodes, edges) = moduleEdges.unzip
val info = (doc \\ "info").head
def infoAttr(name: String): String =
info.attribute(name).getOrElse(throw new IllegalArgumentException("Missing attribute " + name)).text
val rootModule = Module(ModuleId(infoAttr("organisation"), infoAttr("module"), infoAttr("revision")))
ModuleGraph(rootModule +: nodes, edges.flatten)
}
def reverseGraphStartingAt(graph: ModuleGraph, root: ModuleId): ModuleGraph = {
val depsMap = graph.reverseDependencyMap
def visit(module: ModuleId, visited: Set[ModuleId]): Seq[(ModuleId, ModuleId)] =
if (visited(module))
Nil
else
depsMap.get(module) match {
case Some(deps) =>
deps.flatMap { to =>
(module, to.id) +: visit(to.id, visited + module)
}
case None => Nil
}
val edges = visit(root, Set.empty)
val nodes = edges.foldLeft(Set.empty[ModuleId])((set, edge) => set + edge._1 + edge._2).map(graph.module)
ModuleGraph(nodes.toSeq, edges)
}
def ignoreScalaLibrary(scalaVersion: String, graph: ModuleGraph): ModuleGraph = {
def isScalaLibrary(m: Module) = isScalaLibraryId(m.id)
def isScalaLibraryId(id: ModuleId) = id.organisation == "org.scala-lang" && id.name == "scala-library"
def dependsOnScalaLibrary(m: Module): Boolean =
graph.dependencyMap(m.id).exists(isScalaLibrary)
def addScalaLibraryAnnotation(m: Module): Module = {
if (dependsOnScalaLibrary(m))
m.copy(extraInfo = m.extraInfo + " [S]")
else
m
}
val newNodes = graph.nodes.map(addScalaLibraryAnnotation).filterNot(isScalaLibrary)
val newEdges = graph.edges.filterNot(e => isScalaLibraryId(e._2))
ModuleGraph(newNodes, newEdges)
}
def moduleIdFromElement(element: Node, version: String): ModuleId =
ModuleId(element.attribute("organisation").get.text, element.attribute("name").get.text, version)
private def buildDoc(ivyReportFile: String) = ConstructingParser.fromSource(io.Source.fromFile(ivyReportFile), preserveWS = false).document()
}
object ModuleGraphProtocol extends DefaultProtocol {
import com.zavakid.sbt.IvyGraphMLDependencies._
implicit def seqFormat[T: Format]: Format[Seq[T]] = wrap[Seq[T], List[T]](_.toList, _.toSeq)
implicit val ModuleIdFormat: Format[ModuleId] = asProduct3(ModuleId)(ModuleId.unapply(_).get)
implicit val ModuleFormat: Format[Module] = asProduct5(Module)(Module.unapply(_).get)
implicit val ModuleGraphFormat: Format[ModuleGraph] = asProduct2(ModuleGraph)(ModuleGraph.unapply(_).get)
}
| lustefaniak/sbt-one-log | src/main/scala/com/zavakid/sbt/Util.scala | Scala | apache-2.0 | 5,479 |
import sbt._
import sbt.Keys._
import sbtassembly.Plugin._
import AssemblyKeys._
object ApplicationBuild extends Build {
lazy val serviceDependencies = Seq(
"com.yammer.dropwizard" % "dropwizard-core" % "0.6.2",
"com.sun.mail" % "javax.mail" % "1.5.1",
"com.sun.jersey" % "jersey-core" % "1.17.1"
)
lazy val commonDependencies = Seq(
"uk.gov.defra" % "capd-common" % "1.0.2"
)
lazy val serviceTestDependencies = Seq(
"org.easytesting" % "fest-assert-core" % "2.0M10" % "test",
"com.novocode" % "junit-interface" % "0.11" % "test",
"com.yammer.dropwizard" % "dropwizard-testing" % "0.6.2" % "test"
)
val appReleaseSettings = Seq(
// Publishing options:
publishMavenStyle := true,
publishArtifact in Test := false,
pomIncludeRepository := { x => false },
publishTo <<= version { (v: String) =>
val nexus = "https://defranexus.kainos.com/"
if (v.trim.endsWith("SNAPSHOT"))
Some("sonatype-snapshots" at nexus + "content/repositories/snapshots")
else
Some("sonatype-releases" at nexus + "content/repositories/releases")
},
credentials += Credentials(Path.userHome / ".ivy2" / ".credentials")
)
def defaultResolvers = Seq(
"DEFRA Nexus Release repo" at "https://defranexus.kainos.com/content/repositories/releases/"
)
def commonSettings = Defaults.defaultSettings ++ Seq(
organization := "uk.gov.defra",
autoScalaLibrary := false,
crossPaths := false,
exportJars := false,
scalaVersion := "2.10.2",
resolvers ++= defaultResolvers
)
def standardSettingsWithAssembly = commonSettings ++ assemblySettings ++ appReleaseSettings ++ Seq(
mergeStrategy in assembly <<= (mergeStrategy in assembly) { (old) =>
{
case "about.html" => MergeStrategy.rename
case x => old(x)
}
})
lazy val root = Project("sendmail", file("."), settings = Defaults.defaultSettings ++ appReleaseSettings ++ Seq(
name := "uk.gov.defra.capd.mail",
resolvers ++= defaultResolvers,
libraryDependencies ++= commonDependencies
)) aggregate(SendEmailService, SendEmailApi)
lazy val SendEmailService: Project = Project("sendmail-service", file("uk.gov.defra.capd.mail.service"),
settings = standardSettingsWithAssembly ++ Seq(
name := "uk.gov.defra.capd.mail.service",
libraryDependencies ++= commonDependencies ++ serviceDependencies ++ serviceTestDependencies
)) dependsOn(SendEmailApi)
lazy val SendEmailApi = Project("sendmail-api", file("uk.gov.defra.capd.mail.api"),
settings = standardSettingsWithAssembly ++ Seq(
name := "uk.gov.defra.capd.mail.api",
libraryDependencies ++= commonDependencies
))
}
| Defra/send-email-service | project/Build.scala | Scala | mit | 2,705 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.layers
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, IdentityOutputShape}
import com.intel.analytics.bigdl.dllib.nn.internal.KerasLayer
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.keras.Net
import com.intel.analytics.bigdl.dllib.keras.layers.utils.KerasUtils
import scala.reflect.ClassTag
/**
* Applies the soft shrinkage function element-wise to the input.
*
* β§ x - value, if x > value
* f(x) = β¨ x + value, if x < -value
* β© 0, otherwise
*
* When you use this layer as the first layer of a model, you need to provide
* the argument inputShape (a Single Shape, does not include the batch dimension).
*
* Remark: This layer is from Torch and wrapped in Keras style.
*
* @param value The threshold value. Default is 0.5.
* @param inputShape A Single Shape, does not include the batch dimension.
* @tparam T The numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class SoftShrink[T: ClassTag](
val value: Double = 0.5,
val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends KerasLayer[Tensor[T], Tensor[T], T](KerasUtils.addBatch(inputShape))
with IdentityOutputShape with Net {
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val layer = com.intel.analytics.bigdl.dllib.nn.SoftShrink(value)
layer.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
}
object SoftShrink {
def apply[@specialized(Float, Double) T: ClassTag](
value: Double = 0.5,
inputShape: Shape = null)(implicit ev: TensorNumeric[T]): SoftShrink[T] = {
new SoftShrink[T](value, inputShape)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/keras/layers/SoftShrink.scala | Scala | apache-2.0 | 2,478 |
package com.technophobia.substeps.domain
import com.technophobia.substeps.domain.execution.RunResult
import com.technophobia.substeps.domain.repositories.SubstepRepository
class OutlinedScenario(outlineTitle: String, val derivedScenarios: Seq[BasicScenario], tags: Set[Tag]) extends Scenario(outlineTitle, tags) {
def run(): RunResult = {
derivedScenarios.foldLeft[RunResult](RunResult.NoneRun)((b, a) => b.combine(a.run()))
}
}
object OutlinedScenario {
def apply(substepRepository: SubstepRepository, outlineTitle: String, outline: Seq[String], examples: List[Map[String, String]], tags: Set[Tag]): OutlinedScenario = {
def applyExampleToSubstepInvocation(example: Map[String, String], outlineLine: String) = {
example.foldLeft[String](outlineLine)((b, a) => b.replaceAll("<" + a._1 + ">", a._2))
}
val derivedStepsForAllExamples : Seq[Seq[String]] = examples.map((example) => outline.map(applyExampleToSubstepInvocation(example, _)))
val derivedStepsWithIndexes: Seq[(Seq[String], Int)] = (derivedStepsForAllExamples zip Stream.from(1))
val derivedScenarios = derivedStepsWithIndexes.map{case (derivedSteps, index) => BasicScenario(substepRepository, outlineTitle + ": " + index, derivedSteps, tags)}
new OutlinedScenario(outlineTitle, derivedScenarios, tags)
}
} | G2G3Digital/substeps-scala-core | src/main/scala/com/technophobia/substeps/domain/OutlinedScenario.scala | Scala | lgpl-3.0 | 1,321 |
package statemachine.scala.model
/**
* Az automata Γ‘llapota (a szabΓ‘ly neve, avagy nemterminΓ‘lis szimbΓ³lumok)
*/
object StateMachineState extends Enumeration {
type StateMachineState = Value
val START_SIGNED,
START_UNSIGNED,
ILLEGAL_STATE_0,
LEGAL_STATE_1,
LEGAL_STATE_2,
LEGAL_STATE_3,
LEGAL_STATE_4,
LEGAL_STATE_5 = Value
def isFinalState(state: StateMachineState): Boolean ={
state == LEGAL_STATE_1 ||
state == LEGAL_STATE_2 ||
state == LEGAL_STATE_3 ||
state == LEGAL_STATE_4 ||
state == LEGAL_STATE_5
}
}
| zporky/langs-and-paradigms | projects/KD1OUR/automata/automata-scala/src/statemachine/scala/model/StateMachineState.scala | Scala | mit | 577 |
package org.cmt
class MathSuite extends munit.FunSuite {
test("Adding 0 to any integer value should return the same value") {
import Math._
for {
i <- 1 to 100
} assertEquals(add(i, 0), i)
}
test("Verify basic addition") {
import Math._
assertEquals(add(100, -5), 95)
}
}
| lightbend-training/course-management-tools | course-templates/scala3-cmt-template-no-common/step_000_initial_state/src/test/scala/org/cmt/MathSuite.scala | Scala | apache-2.0 | 311 |
package io.getquill.context.ndbc
import java.util.Properties
import scala.util.control.NonFatal
import com.typesafe.config.Config
import io.trane.ndbc.DataSource
case class NdbcContextConfig(config: Config) {
private def configProperties = {
import scala.jdk.CollectionConverters._
val p = new Properties
for (entry <- config.entrySet.asScala)
p.setProperty(entry.getKey, entry.getValue.unwrapped.toString)
p
}
def dataSource =
try
DataSource.fromProperties("ndbc", configProperties)
catch {
case NonFatal(ex) =>
throw new IllegalStateException(s"Failed to load data source for config: '$config'", ex)
}
} | getquill/quill | quill-ndbc/src/main/scala/io/getquill/context/ndbc/NdbcContextConfig.scala | Scala | apache-2.0 | 669 |
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <max.c.lv@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks
import java.util
import java.io.{OutputStream, InputStream, ByteArrayInputStream, ByteArrayOutputStream, IOException, FileOutputStream}
import java.util.Locale
import java.lang.Math
import android.app.backup.BackupManager
import android.app.{Activity, AlertDialog, ProgressDialog}
import android.content._
import android.content.pm.{PackageInfo, PackageManager}
import android.content.res.AssetManager
import android.graphics.{Bitmap, Color, Typeface}
import android.net.{Uri, VpnService}
import android.os._
import android.preference._
import android.util.{DisplayMetrics, Log}
import android.view.View.OnLongClickListener
import android.view._
import android.webkit.{WebView, WebViewClient}
import android.widget._
import com.github.mrengineer13.snackbar._
import com.github.shadowsocks.aidl.{IShadowsocksService, IShadowsocksServiceCallback}
import com.github.shadowsocks.database._
import com.github.shadowsocks.preferences.{PasswordEditTextPreference, ProfileEditTextPreference, SummaryEditTextPreference}
import com.github.shadowsocks.utils._
import com.google.android.gms.ads.{AdRequest, AdSize, AdView}
import com.google.android.gms.analytics.HitBuilders
import com.google.zxing.integration.android.IntentIntegrator
import com.nostra13.universalimageloader.core.download.BaseImageDownloader
import net.simonvt.menudrawer.MenuDrawer
import com.joanzapata.android.iconify.Iconify
import com.joanzapata.android.iconify.Iconify.IconValue
import com.joanzapata.android.iconify.IconDrawable
import net.glxn.qrgen.android.QRCode
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.concurrent.ops._
class ProfileIconDownloader(context: Context, connectTimeout: Int, readTimeout: Int)
extends BaseImageDownloader(context, connectTimeout, readTimeout) {
def this(context: Context) {
this(context, 0, 0)
}
override def getStreamFromOtherSource(imageUri: String, extra: AnyRef): InputStream = {
val text = imageUri.substring(Scheme.PROFILE.length)
val size = Utils.dpToPx(context, 16).toInt
val idx = text.getBytes.last % 6
val color = Seq(Color.MAGENTA, Color.GREEN, Color.YELLOW, Color.BLUE, Color.DKGRAY, Color.CYAN)(
idx)
val bitmap = Utils.getBitmap(text, size, size, color)
val os = new ByteArrayOutputStream()
bitmap.compress(Bitmap.CompressFormat.PNG, 100, os)
new ByteArrayInputStream(os.toByteArray)
}
}
object Typefaces {
def get(c: Context, assetPath: String): Typeface = {
cache synchronized {
if (!cache.containsKey(assetPath)) {
try {
val t: Typeface = Typeface.createFromAsset(c.getAssets, assetPath)
cache.put(assetPath, t)
} catch {
case e: Exception =>
Log.e(TAG, "Could not get typeface '" + assetPath + "' because " + e.getMessage)
return null
}
}
return cache.get(assetPath)
}
}
private final val TAG = "Typefaces"
private final val cache = new util.Hashtable[String, Typeface]
}
object Shadowsocks {
// Constants
val TAG = "Shadowsocks"
val REQUEST_CONNECT = 1
val PREFS_NAME = "Shadowsocks"
val PROXY_PREFS = Array(Key.profileName, Key.proxy, Key.remotePort, Key.localPort, Key.sitekey,
Key.encMethod)
val FEATRUE_PREFS = Array(Key.route, Key.isGlobalProxy, Key.proxyedApps,
Key.isUdpDns, Key.isAutoConnect)
val EXECUTABLES = Array(Executable.PDNSD, Executable.REDSOCKS, Executable.SS_TUNNEL, Executable.SS_LOCAL, Executable.TUN2SOCKS)
// Helper functions
def updateListPreference(pref: Preference, value: String) {
pref.asInstanceOf[ListPreference].setValue(value)
}
def updatePasswordEditTextPreference(pref: Preference, value: String) {
pref.setSummary(value)
pref.asInstanceOf[PasswordEditTextPreference].setText(value)
}
def updateSummaryEditTextPreference(pref: Preference, value: String) {
pref.setSummary(value)
pref.asInstanceOf[SummaryEditTextPreference].setText(value)
}
def updateProfileEditTextPreference(pref: Preference, value: String) {
pref.asInstanceOf[ProfileEditTextPreference].resetSummary(value)
pref.asInstanceOf[ProfileEditTextPreference].setText(value)
}
def updateCheckBoxPreference(pref: Preference, value: Boolean) {
pref.asInstanceOf[CheckBoxPreference].setChecked(value)
}
def updatePreference(pref: Preference, name: String, profile: Profile) {
name match {
case Key.profileName => updateProfileEditTextPreference(pref, profile.name)
case Key.proxy => updateSummaryEditTextPreference(pref, profile.host)
case Key.remotePort => updateSummaryEditTextPreference(pref, profile.remotePort.toString)
case Key.localPort => updateSummaryEditTextPreference(pref, profile.localPort.toString)
case Key.sitekey => updatePasswordEditTextPreference(pref, profile.password)
case Key.encMethod => updateListPreference(pref, profile.method)
case Key.route => updateListPreference(pref, profile.route)
case Key.isGlobalProxy => updateCheckBoxPreference(pref, profile.global)
case Key.isUdpDns => updateCheckBoxPreference(pref, profile.udpdns)
case _ =>
}
}
}
class Shadowsocks
extends PreferenceActivity
with CompoundButton.OnCheckedChangeListener
with MenuAdapter.MenuListener {
// Flags
val MSG_CRASH_RECOVER: Int = 1
val STATE_MENUDRAWER = "com.github.shadowsocks.menuDrawer"
val STATE_ACTIVE_VIEW_ID = "com.github.shadowsocks.activeViewId"
var singlePane: Int = -1
// Variables
var switchButton: Switch = null
var progressDialog: ProgressDialog = null
var progressTag = -1
var state = State.INIT
var prepared = false
var currentProfile = new Profile
var vpnEnabled = -1
// Services
var currentServiceName = classOf[ShadowsocksNatService].getName
var bgService: IShadowsocksService = null
val callback = new IShadowsocksServiceCallback.Stub {
override def stateChanged(state: Int, msg: String) {
onStateChanged(state, msg)
}
}
val connection = new ServiceConnection {
override def onServiceConnected(name: ComponentName, service: IBinder) {
// Initialize the background service
bgService = IShadowsocksService.Stub.asInterface(service)
try {
bgService.registerCallback(callback)
} catch {
case ignored: RemoteException => // Nothing
}
// Update the UI
if (switchButton != null) switchButton.setEnabled(true)
if (State.isAvailable(bgService.getState)) {
setPreferenceEnabled(enabled = true)
} else {
changeSwitch(checked = true)
setPreferenceEnabled(enabled = false)
}
state = bgService.getState
// set the listener
switchButton.setOnCheckedChangeListener(Shadowsocks.this)
if (!status.getBoolean(getVersionName, false)) {
status.edit.putBoolean(getVersionName, true).commit()
recovery();
}
}
override def onServiceDisconnected(name: ComponentName) {
if (switchButton != null) switchButton.setEnabled(false)
try {
if (bgService != null) bgService.unregisterCallback(callback)
} catch {
case ignored: RemoteException => // Nothing
}
bgService = null
}
}
private lazy val settings = PreferenceManager.getDefaultSharedPreferences(this)
private lazy val status = getSharedPreferences(Key.status, Context.MODE_PRIVATE)
private lazy val preferenceReceiver = new PreferenceBroadcastReceiver
private lazy val drawer = MenuDrawer.attach(this)
private lazy val menuAdapter = new MenuAdapter(this, getMenuList)
private lazy val listView = new ListView(this)
private lazy val profileManager =
new ProfileManager(settings, getApplication.asInstanceOf[ShadowsocksApplication].dbHelper)
private lazy val application = getApplication.asInstanceOf[ShadowsocksApplication]
var handler: Handler = null
def isSinglePane: Boolean = {
if (singlePane == -1) {
val metrics = new DisplayMetrics()
getWindowManager.getDefaultDisplay.getMetrics(metrics)
val widthPixels = metrics.widthPixels
val scaleFactor = metrics.density
val widthDp = widthPixels / scaleFactor
singlePane = if (widthDp <= 720) 1 else 0
}
singlePane == 1
}
private def changeSwitch(checked: Boolean) {
switchButton.setOnCheckedChangeListener(null)
switchButton.setChecked(checked)
if (switchButton.isEnabled) {
switchButton.setEnabled(false)
handler.postDelayed(new Runnable {
override def run() {
switchButton.setEnabled(true)
}
}, 1000)
}
switchButton.setOnCheckedChangeListener(this)
}
private def showProgress(msg: Int): Handler = {
clearDialog()
progressDialog = ProgressDialog.show(this, "", getString(msg), true, false)
progressTag = msg
new Handler {
override def handleMessage(msg: Message) {
clearDialog()
}
}
}
private def copyAssets(path: String) {
val assetManager: AssetManager = getAssets
var files: Array[String] = null
try {
files = assetManager.list(path)
} catch {
case e: IOException =>
Log.e(Shadowsocks.TAG, e.getMessage)
}
if (files != null) {
for (file <- files) {
var in: InputStream = null
var out: OutputStream = null
try {
if (path.length > 0) {
in = assetManager.open(path + "/" + file)
} else {
in = assetManager.open(file)
}
out = new FileOutputStream(Path.BASE + file)
copyFile(in, out)
in.close()
in = null
out.flush()
out.close()
out = null
} catch {
case ex: Exception =>
Log.e(Shadowsocks.TAG, ex.getMessage)
}
}
}
}
private def copyFile(in: InputStream, out: OutputStream) {
val buffer: Array[Byte] = new Array[Byte](1024)
var read: Int = 0
while ( {
read = in.read(buffer)
read
} != -1) {
out.write(buffer, 0, read)
}
}
private def crashRecovery() {
val cmd = new ArrayBuffer[String]()
for (task <- Array("ss-local", "ss-tunnel", "pdnsd", "redsocks", "tun2socks")) {
cmd.append("chmod 666 %s%s-nat.pid".formatLocal(Locale.ENGLISH, Path.BASE, task))
cmd.append("chmod 666 %s%s-vpn.pid".formatLocal(Locale.ENGLISH, Path.BASE, task))
}
Console.runRootCommand(cmd.toArray)
cmd.clear()
for (task <- Array("ss-local", "ss-tunnel", "pdnsd", "redsocks", "tun2socks")) {
try {
val pid_nat = scala.io.Source.fromFile(Path.BASE + task + "-nat.pid").mkString.trim.toInt
val pid_vpn = scala.io.Source.fromFile(Path.BASE + task + "-vpn.pid").mkString.trim.toInt
cmd.append("kill -9 %d".formatLocal(Locale.ENGLISH, pid_nat))
cmd.append("kill -9 %d".formatLocal(Locale.ENGLISH, pid_vpn))
Process.killProcess(pid_nat)
Process.killProcess(pid_vpn)
} catch {
case e: Throwable => Log.e(Shadowsocks.TAG, "unable to kill " + task)
}
cmd.append("rm -f %s%s-nat.pid".formatLocal(Locale.ENGLISH, Path.BASE, task))
cmd.append("rm -f %s%s-nat.conf".formatLocal(Locale.ENGLISH, Path.BASE, task))
cmd.append("rm -f %s%s-vpn.pid".formatLocal(Locale.ENGLISH, Path.BASE, task))
cmd.append("rm -f %s%s-vpn.conf".formatLocal(Locale.ENGLISH, Path.BASE, task))
}
Console.runCommand(cmd.toArray)
Console.runRootCommand(cmd.toArray)
Console.runRootCommand(Utils.getIptables + " -t nat -F OUTPUT")
}
private def getVersionName: String = {
var version: String = null
try {
val pi: PackageInfo = getPackageManager.getPackageInfo(getPackageName, 0)
version = pi.versionName
} catch {
case e: PackageManager.NameNotFoundException =>
version = "Package name not found"
}
version
}
def isTextEmpty(s: String, msg: String): Boolean = {
if (s == null || s.length <= 0) {
new SnackBar.Builder(this)
.withMessage(msg)
.withActionMessageId(R.string.error)
.withStyle(SnackBar.Style.ALERT)
.withDuration(SnackBar.LONG_SNACK)
.show()
return true
}
false
}
def cancelStart() {
clearDialog()
changeSwitch(checked = false)
}
def isReady(): Boolean = {
if (!checkText(Key.proxy)) return false
if (!checkText(Key.sitekey)) return false
if (!checkNumber(Key.localPort, low = false)) return false
if (!checkNumber(Key.remotePort, low = true)) return false
if (bgService == null) return false
true
}
def prepareStartService() {
showProgress(R.string.connecting)
spawn {
if (isVpnEnabled) {
val intent = VpnService.prepare(this)
if (intent != null) {
startActivityForResult(intent, Shadowsocks.REQUEST_CONNECT)
} else {
onActivityResult(Shadowsocks.REQUEST_CONNECT, Activity.RESULT_OK, null)
}
} else {
serviceStart()
}
}
}
def onCheckedChanged(compoundButton: CompoundButton, checked: Boolean) {
if (compoundButton eq switchButton) {
checked match {
case true =>
if (isReady)
prepareStartService()
else
changeSwitch(checked = false)
case false =>
serviceStop()
}
if (switchButton.isEnabled) {
switchButton.setEnabled(false)
handler.postDelayed(new Runnable {
override def run() {
switchButton.setEnabled(true)
}
}, 1000)
}
}
}
def getLayoutView(view: ViewParent): LinearLayout = {
view match {
case layout: LinearLayout => layout
case _ => if (view != null) getLayoutView(view.getParent) else null
}
}
def initAdView() {
if (settings.getString(Key.proxy, "") == "198.199.101.152") {
val layoutView = {
if (Build.VERSION.SDK_INT > 10) {
drawer.getContentContainer.getChildAt(0)
} else {
getLayoutView(drawer.getContentContainer.getParent)
}
}
if (layoutView != null) {
val adView = new AdView(this)
adView.setAdUnitId("ca-app-pub-9097031975646651/7760346322")
adView.setAdSize(AdSize.SMART_BANNER)
layoutView.asInstanceOf[ViewGroup].addView(adView, 0)
adView.loadAd(new AdRequest.Builder().build())
}
}
}
override def setContentView(layoutResId: Int) {
drawer.setContentView(layoutResId)
initAdView()
onContentChanged()
}
override def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
handler = new Handler()
addPreferencesFromResource(R.xml.pref_all)
// Update the profile
if (!status.getBoolean(getVersionName, false)) {
currentProfile = profileManager.create()
}
// Initialize the profile
currentProfile = {
profileManager.getProfile(settings.getInt(Key.profileId, -1)) getOrElse currentProfile
}
// Initialize drawer
menuAdapter.setActiveId(settings.getInt(Key.profileId, -1))
menuAdapter.setListener(this)
listView.setAdapter(menuAdapter)
drawer.setMenuView(listView)
if (Utils.isLollipopOrAbove) {
drawer.setDrawerIndicatorEnabled(false)
} else {
// The drawable that replaces the up indicator in the action bar
drawer.setSlideDrawable(R.drawable.ic_drawer)
// Whether the previous drawable should be shown
drawer.setDrawerIndicatorEnabled(true)
}
if (!isSinglePane) {
drawer.openMenu(false)
}
// Initialize action bar
val switchLayout = getLayoutInflater
.inflate(R.layout.layout_switch, null)
.asInstanceOf[RelativeLayout]
val title: TextView = switchLayout.findViewById(R.id.title).asInstanceOf[TextView]
val tf: Typeface = Typefaces.get(this, "fonts/Iceland.ttf")
if (tf != null) title.setTypeface(tf)
switchButton = switchLayout.findViewById(R.id.switchButton).asInstanceOf[Switch]
getActionBar.setCustomView(switchLayout)
getActionBar.setDisplayShowTitleEnabled(false)
getActionBar.setDisplayShowCustomEnabled(true)
if (Utils.isLollipopOrAbove) {
getWindow.addFlags(WindowManager.LayoutParams.FLAG_DRAWS_SYSTEM_BAR_BACKGROUNDS);
getWindow.setStatusBarColor(getResources().getColor(R.color.grey3));
getActionBar.setDisplayHomeAsUpEnabled(true)
getActionBar.setHomeAsUpIndicator(R.drawable.ic_drawer)
} else {
getActionBar.setIcon(R.drawable.ic_stat_shadowsocks)
}
title.setOnLongClickListener(new OnLongClickListener {
override def onLongClick(v: View): Boolean = {
if (Utils.isLollipopOrAbove && bgService != null
&& (bgService.getState == State.INIT || bgService.getState == State.STOPPED)) {
val natEnabled = status.getBoolean(Key.isNAT, false)
status.edit().putBoolean(Key.isNAT, !natEnabled).commit()
if (!natEnabled) {
Toast.makeText(getBaseContext, R.string.enable_nat, Toast.LENGTH_LONG).show()
} else {
Toast.makeText(getBaseContext, R.string.disable_nat, Toast.LENGTH_LONG).show()
}
true
} else {
false
}
}
})
// Register broadcast receiver
registerReceiver(preferenceReceiver, new IntentFilter(Action.UPDATE_PREFS))
// Bind to the service
spawn {
val isRoot = (!Utils.isLollipopOrAbove || status.getBoolean(Key.isNAT, false)) && Console.isRoot
handler.post(new Runnable {
override def run() {
status.edit.putBoolean(Key.isRoot, isRoot).commit()
attachService()
}
})
}
}
def attachService() {
if (bgService == null) {
val s = if (!isVpnEnabled) classOf[ShadowsocksNatService] else classOf[ShadowsocksVpnService]
val intent = new Intent(this, s)
intent.setAction(Action.SERVICE)
bindService(intent, connection, Context.BIND_AUTO_CREATE)
startService(new Intent(this, s))
}
}
def deattachService() {
if (bgService != null) {
try {
bgService.unregisterCallback(callback)
} catch {
case ignored: RemoteException => // Nothing
}
bgService = null
unbindService(connection)
}
}
override def onRestoreInstanceState(inState: Bundle) {
super.onRestoreInstanceState(inState)
drawer.restoreState(inState.getParcelable(STATE_MENUDRAWER))
}
override def onSaveInstanceState(outState: Bundle) {
super.onSaveInstanceState(outState)
outState.putParcelable(STATE_MENUDRAWER, drawer.saveState())
outState.putInt(STATE_ACTIVE_VIEW_ID, currentProfile.id)
}
override def onBackPressed() {
val drawerState = drawer.getDrawerState
if (drawerState == MenuDrawer.STATE_OPEN || drawerState == MenuDrawer.STATE_OPENING) {
drawer.closeMenu()
return
}
super.onBackPressed()
}
override def onActiveViewChanged(v: View, pos: Int) {
drawer.setActiveView(v, pos)
}
def newProfile(id: Int) {
val builder = new AlertDialog.Builder(this)
builder
.setTitle(R.string.add_profile)
.setItems(R.array.add_profile_methods, new DialogInterface.OnClickListener() {
def onClick(dialog: DialogInterface, which: Int) {
which match {
case 0 =>
dialog.dismiss()
val h = showProgress(R.string.loading)
h.postDelayed(new Runnable() {
def run() {
val integrator = new IntentIntegrator(Shadowsocks.this)
val list = new java.util.ArrayList(IntentIntegrator.TARGET_ALL_KNOWN)
list.add("tw.com.quickmark")
integrator.setTargetApplications(list)
integrator.initiateScan()
h.sendEmptyMessage(0)
}
}, 600)
case 1 =>
dialog.dismiss()
addProfile(id)
case _ =>
}
}
})
builder.create().show()
}
def reloadProfile() {
drawer.closeMenu(true)
val h = showProgress(R.string.loading)
handler.postDelayed(new Runnable {
def run() {
currentProfile = {
profileManager.getProfile(settings.getInt(Key.profileId, -1)) getOrElse currentProfile
}
menuAdapter.updateList(getMenuList, currentProfile.id)
updatePreferenceScreen()
h.sendEmptyMessage(0)
}
}, 600)
}
def addProfile(profile: Profile) {
drawer.closeMenu(true)
val h = showProgress(R.string.loading)
handler.postDelayed(new Runnable {
def run() {
currentProfile = profile
profileManager.createOrUpdateProfile(currentProfile)
profileManager.reload(currentProfile.id)
menuAdapter.updateList(getMenuList, currentProfile.id)
updatePreferenceScreen()
h.sendEmptyMessage(0)
}
}, 600)
}
def addProfile(id: Int) {
drawer.closeMenu(true)
val h = showProgress(R.string.loading)
handler.postDelayed(new Runnable {
def run() {
currentProfile = profileManager.reload(id)
profileManager.save()
menuAdapter.updateList(getMenuList, currentProfile.id)
updatePreferenceScreen()
h.sendEmptyMessage(0)
}
}, 600)
}
def updateProfile(id: Int) {
drawer.closeMenu(true)
val h = showProgress(R.string.loading)
handler.postDelayed(new Runnable {
def run() {
currentProfile = profileManager.reload(id)
menuAdapter.setActiveId(id)
menuAdapter.notifyDataSetChanged()
updatePreferenceScreen()
h.sendEmptyMessage(0)
}
}, 600)
}
def delProfile(id: Int): Boolean = {
drawer.closeMenu(true)
val profile = profileManager.getProfile(id)
if (!profile.isDefined) return false
new AlertDialog.Builder(this)
.setMessage(String.format(Locale.ENGLISH, getString(R.string.remove_profile), profile.get.name))
.setCancelable(false)
.setNegativeButton(R.string.no, new DialogInterface.OnClickListener() {
override def onClick(dialog: DialogInterface, i: Int) = dialog.cancel()
})
.setPositiveButton(R.string.yes, new DialogInterface.OnClickListener() {
override def onClick(dialog: DialogInterface, i: Int) {
profileManager.delProfile(id)
val profileId = {
val profiles = profileManager.getAllProfiles.getOrElse(List[Profile]())
if (profiles.isEmpty) -1 else profiles(0).id
}
currentProfile = profileManager.load(profileId)
menuAdapter.updateList(getMenuList, currentProfile.id)
updatePreferenceScreen()
dialog.dismiss()
}
})
.create()
.show()
true
}
def getProfileList: List[Item] = {
val list = profileManager.getAllProfiles getOrElse List[Profile]()
list.map(p => new IconItem(p.id, p.name, -1, updateProfile, delProfile))
}
def getMenuList: List[Any] = {
val buf = new ListBuffer[Any]()
buf += new Category(getString(R.string.profiles))
buf ++= getProfileList
buf +=
new DrawableItem(-400, getString(R.string.add_profile), new IconDrawable(this, IconValue.fa_plus_circle)
.colorRes(android.R.color.darker_gray).sizeDp(26), newProfile)
buf += new Category(getString(R.string.settings))
buf += new DrawableItem(-100, getString(R.string.recovery), new IconDrawable(this, IconValue.fa_recycle)
.colorRes(android.R.color.darker_gray).sizeDp(26), _ => {
// send event
application.tracker.send(new HitBuilders.EventBuilder()
.setCategory(Shadowsocks.TAG)
.setAction("reset")
.setLabel(getVersionName)
.build())
recovery()
})
buf +=
new DrawableItem(-200, getString(R.string.flush_dnscache), new IconDrawable(this, IconValue.fa_refresh)
.colorRes(android.R.color.darker_gray).sizeDp(26), _ => {
// send event
application.tracker.send(new HitBuilders.EventBuilder()
.setCategory(Shadowsocks.TAG)
.setAction("flush_dnscache")
.setLabel(getVersionName)
.build())
flushDnsCache()
})
buf +=
new DrawableItem(-300, getString(R.string.qrcode), new IconDrawable(this, IconValue.fa_qrcode)
.colorRes(android.R.color.darker_gray).sizeDp(26), _ => {
// send event
application.tracker.send(new HitBuilders.EventBuilder()
.setCategory(Shadowsocks.TAG)
.setAction("qrcode")
.setLabel(getVersionName)
.build())
showQrCode()
})
buf += new DrawableItem(-400, getString(R.string.about), new IconDrawable(this, IconValue.fa_info_circle)
.colorRes(android.R.color.darker_gray).sizeDp(26), _ => {
// send event
application.tracker.send(new HitBuilders.EventBuilder()
.setCategory(Shadowsocks.TAG)
.setAction("about")
.setLabel(getVersionName)
.build())
showAbout()
})
buf.toList
}
override def onOptionsItemSelected(item: MenuItem): Boolean = {
item.getItemId match {
case android.R.id.home =>
drawer.toggleMenu()
return true
}
super.onOptionsItemSelected(item)
}
protected override def onPause() {
super.onPause()
switchButton.setOnCheckedChangeListener(null)
prepared = false
}
protected override def onResume() {
super.onResume()
if (bgService != null) {
bgService.getState match {
case State.CONNECTED =>
changeSwitch(checked = true)
case State.CONNECTING =>
changeSwitch(checked = true)
case _ =>
changeSwitch(checked = false)
}
state = bgService.getState
// set the listener
switchButton.setOnCheckedChangeListener(Shadowsocks.this)
}
ConfigUtils.refresh(this)
// Check if profile list changed
val id = settings.getInt(Key.profileId, -1)
if (id != -1 && id != currentProfile.id)
reloadProfile()
}
private def setPreferenceEnabled(enabled: Boolean) {
for (name <- Shadowsocks.PROXY_PREFS) {
val pref = findPreference(name)
if (pref != null) {
pref.setEnabled(enabled)
}
}
for (name <- Shadowsocks.FEATRUE_PREFS) {
val pref = findPreference(name)
if (pref != null) {
if (Seq(Key.isGlobalProxy, Key.proxyedApps)
.contains(name)) {
pref.setEnabled(enabled && (Utils.isLollipopOrAbove || !isVpnEnabled))
} else {
pref.setEnabled(enabled)
}
}
}
}
private def updatePreferenceScreen() {
val profile = currentProfile
for (name <- Shadowsocks.PROXY_PREFS) {
val pref = findPreference(name)
Shadowsocks.updatePreference(pref, name, profile)
}
for (name <- Shadowsocks.FEATRUE_PREFS) {
val pref = findPreference(name)
Shadowsocks.updatePreference(pref, name, profile)
}
}
override def onStart() {
super.onStart()
}
override def onStop() {
super.onStop()
clearDialog()
}
override def onDestroy() {
super.onDestroy()
deattachService()
unregisterReceiver(preferenceReceiver)
new BackupManager(this).dataChanged()
if (handler != null) {
handler.removeCallbacksAndMessages(null)
handler = null
}
}
def copyToSystem() {
val ab = new ArrayBuffer[String]
ab.append("mount -o rw,remount -t yaffs2 /dev/block/mtdblock3 /system")
for (executable <- Shadowsocks.EXECUTABLES) {
ab.append("cp %s%s /system/bin/".formatLocal(Locale.ENGLISH, Path.BASE, executable))
ab.append("chmod 755 /system/bin/" + executable)
ab.append("chown root:shell /system/bin/" + executable)
}
ab.append("mount -o ro,remount -t yaffs2 /dev/block/mtdblock3 /system")
Console.runRootCommand(ab.toArray)
}
def install() {
copyAssets(System.getABI)
val ab = new ArrayBuffer[String]
for (executable <- Shadowsocks.EXECUTABLES) {
ab.append("chmod 755 " + Path.BASE + executable)
}
Console.runCommand(ab.toArray)
}
def reset() {
crashRecovery()
install()
}
private def recovery() {
serviceStop()
val h = showProgress(R.string.recovering)
spawn {
reset()
h.sendEmptyMessage(0)
}
}
private def dp2px(dp: Int): Int = {
val displayMetrics = getBaseContext.getResources.getDisplayMetrics()
Math.round(dp * (displayMetrics.xdpi / DisplayMetrics.DENSITY_DEFAULT))
}
private def showQrCode() {
val image = new ImageView(this)
image.setPadding(0, dp2px(20), 0, dp2px(20))
image.setLayoutParams(new LinearLayout.LayoutParams(-1, -1))
val qrcode = QRCode.from(Parser.generate(currentProfile))
.withSize(dp2px(250), dp2px(250)).asInstanceOf[QRCode]
image.setImageBitmap(qrcode.bitmap())
new AlertDialog.Builder(this)
.setCancelable(true)
.setNegativeButton(getString(R.string.close), new DialogInterface.OnClickListener() {
override def onClick(dialog: DialogInterface, id: Int) {
dialog.cancel()
}
})
.setView(image)
.create()
.show()
}
private def flushDnsCache() {
val h = showProgress(R.string.flushing)
spawn {
Utils.toggleAirplaneMode(getBaseContext)
h.sendEmptyMessage(0)
}
}
override def onActivityResult(requestCode: Int, resultCode: Int, data: Intent) {
val scanResult = IntentIntegrator.parseActivityResult(requestCode, resultCode, data)
if (scanResult != null) {
Parser.parse(scanResult.getContents) match {
case Some(profile) => addProfile(profile)
case _ => // ignore
}
} else {
resultCode match {
case Activity.RESULT_OK =>
prepared = true
serviceStart()
case _ =>
cancelStart()
Log.e(Shadowsocks.TAG, "Failed to start VpnService")
}
}
}
def isVpnEnabled: Boolean = {
if (vpnEnabled < 0) {
vpnEnabled = if (!status.getBoolean(Key.isRoot, false)) {
1
} else {
0
}
}
if (vpnEnabled == 1) true else false
}
def serviceStop() {
if (bgService != null) bgService.stop()
}
def checkText(key: String): Boolean = {
val text = settings.getString(key, "")
!isTextEmpty(text, getString(R.string.proxy_empty))
}
def checkNumber(key: String, low: Boolean): Boolean = {
val text = settings.getString(key, "")
if (isTextEmpty(text, getString(R.string.port_empty))) return false
try {
val port: Int = Integer.valueOf(text)
if (!low && port <= 1024) {
new SnackBar.Builder(this)
.withMessageId(R.string.port_alert)
.withActionMessageId(R.string.error)
.withStyle(SnackBar.Style.ALERT)
.withDuration(SnackBar.LONG_SNACK)
.show()
return false
}
} catch {
case ex: Exception =>
new SnackBar.Builder(this)
.withMessageId(R.string.port_alert)
.withActionMessageId(R.string.error)
.withStyle(SnackBar.Style.ALERT)
.withDuration(SnackBar.LONG_SNACK)
.show()
return false
}
true
}
/** Called when connect button is clicked. */
def serviceStart() {
bgService.start(ConfigUtils.load(settings))
if (isVpnEnabled) {
changeSwitch(checked = false)
}
}
private def showAbout() {
val web = new WebView(this)
web.loadUrl("file:///android_asset/pages/about.html")
web.setWebViewClient(new WebViewClient() {
override def shouldOverrideUrlLoading(view: WebView, url: String): Boolean = {
startActivity(new Intent(Intent.ACTION_VIEW, Uri.parse(url)))
true
}
})
var versionName = ""
try {
versionName = getPackageManager.getPackageInfo(getPackageName, 0).versionName
} catch {
case ex: PackageManager.NameNotFoundException =>
versionName = ""
}
new AlertDialog.Builder(this)
.setTitle(getString(R.string.about_title).formatLocal(Locale.ENGLISH, versionName))
.setCancelable(false)
.setNegativeButton(getString(R.string.ok_iknow), new DialogInterface.OnClickListener() {
override def onClick(dialog: DialogInterface, id: Int) {
dialog.cancel()
}
})
.setView(web)
.create()
.show()
}
def clearDialog() {
if (progressDialog != null) {
progressDialog.dismiss()
progressDialog = null
progressTag = -1
}
}
def onStateChanged(s: Int, m: String) {
handler.post(new Runnable {
override def run() {
if (state != s) {
state = s
state match {
case State.CONNECTING =>
if (progressDialog == null) {
progressDialog = ProgressDialog
.show(Shadowsocks.this, "", getString(R.string.connecting), true, true)
progressTag = R.string.connecting
}
setPreferenceEnabled(enabled = false)
case State.CONNECTED =>
if (progressTag == R.string.connecting) {
clearDialog()
}
changeSwitch(checked = true)
setPreferenceEnabled(enabled = false)
case State.STOPPED =>
if (progressTag == R.string.stopping || progressTag == R.string.connecting) {
clearDialog()
}
changeSwitch(checked = false)
if (m != null) {
new SnackBar.Builder(Shadowsocks.this)
.withMessage(getString(R.string.vpn_error).formatLocal(Locale.ENGLISH, m))
.withActionMessageId(R.string.error)
.withStyle(SnackBar.Style.ALERT)
.withDuration(SnackBar.LONG_SNACK)
.show()
}
setPreferenceEnabled(enabled = true)
case State.STOPPING =>
if (progressDialog == null) {
progressDialog = ProgressDialog
.show(Shadowsocks.this, "", getString(R.string.stopping), true, true)
progressTag = R.string.stopping
}
}
}
}
})
}
class PreferenceBroadcastReceiver extends BroadcastReceiver {
override def onReceive(context: Context, intent: Intent) {
currentProfile = profileManager.save()
menuAdapter.updateList(getMenuList, currentProfile.id)
}
}
}
| Axure/shadowsocks-android | src/main/scala/com/github/shadowsocks/Shadowsocks.scala | Scala | gpl-3.0 | 36,280 |
/*
* Copyright 2013-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.parse.rst
import laika.parse.core.Parser
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import org.scalatest.Matchers
import laika.parse.helper.DefaultParserHelpers
import laika.parse.helper.ParseResultHelpers
import laika.tree.Elements.Span
import laika.tree.helper.ModelBuilder
import laika.parse.rst.Elements._
import laika.tree.Elements._
import laika.parse.rst.TextRoles.RoleDirectivePart
import laika.parse.rst.Directives.DirectivePart
class TableParsersSpec extends FlatSpec
with Matchers
with BlockParsers
with InlineParsers
with ParseResultHelpers
with DefaultParserHelpers[RootElement]
with ModelBuilder {
val defaultParser: Parser[RootElement] = rootElement
def blockDirective (name: String): Option[DirectivePart[Block]] = None
def spanDirective (name: String): Option[DirectivePart[Span]] = None
def textRole (name: String): Option[RoleDirectivePart[String => Span]] = None
"The grid table parser" should "parse a small table with 2 rows and 2 cells" in {
val input = """+---+---+
|| a | b |
|+---+---+
|| c | d |
|+---+---+""".stripMargin
Parsing (input) should produce (root( table(strrow("a","b"), strrow("c","d"))))
}
it should "parse a table with horizontally merged cells in the first row" in {
val input = """+---+---+
|| a b |
|+---+---+
|| c | d |
|+---+---+""".stripMargin
Parsing (input) should produce (root( table(row(cell("a b", 2, 1)), strrow("c","d"))))
}
it should "parse a table with horizontally merged cells in the second row" in {
val input = """+---+---+
|| a | b |
|+---+---+
|| c d |
|+---+---+""".stripMargin
Parsing (input) should produce (root( table(strrow("a","b"), row(cell("c d", 2, 1)))))
}
it should "parse a table with vertically merged cells in the left column" in {
val input = """+---+---+
|| a | d |
|+ b +---+
|| c | e |
|+---+---+""".stripMargin
Parsing (input) should produce (root( table(row(cell("a\\nb\\nc", 1, 2), cell("d")), strrow("e"))))
}
it should "parse a table with vertically merged cells in the right column" in {
val input = """+---+---+
|| a | b |
|+---+ c +
|| e | d |
|+---+---+""".stripMargin
Parsing (input) should produce (root( table(row(cell("a"), cell("b\\nc\\nd", 1, 2)), strrow("e"))))
}
it should "parse a table with vertically and horizontally merged cells" in {
val input = """+---+---+---+
|| a | b | c |
|+---+---+---+
|| 1-1 | d |
|| 2-2 +---+
|| 3-3 | e |
|+---+---+---+""".stripMargin
Parsing (input) should produce (root( table(strrow("a","b","c"), row(cell("1-1\\n2-2\\n3-3", 2, 2), cell("d")), strrow("e"))))
}
it should "parse tables with empty cells" in {
val input = """+---+---+
|| | |
|+---+---+
|| | |
|+---+---+""".stripMargin
Parsing (input) should produce (root( table( row(cell(),cell()), row(cell(),cell()))))
}
it should "fail in case of illegal merging of cells (variant 1)" in {
val input = """+---+---+
|| a | b |
|+ +---+
|| c d |
|+---+---+""".stripMargin
Parsing (input) should produce (root( p(input)))
}
it should "fail in case of illegal merging of cells (variant 2)" in {
val input = """+---+---+
|| a | b |
|+---+ +
|| c d |
|+---+---+""".stripMargin
Parsing (input) should produce (root( p(input)))
}
it should "fail in case of illegal merging of cells (variant 3)" in {
val input = """+---+---+
|| a b |
|+ +---+
|| c | d |
|+---+---+""".stripMargin
Parsing (input) should produce (root( p(input)))
}
it should "fail in case of illegal merging of cells (variant 4)" in {
val input = """+---+---+
|| a b |
|+---+ +
|| c | d |
|+---+---+""".stripMargin
Parsing (input) should produce (root( p(input)))
}
it should "parse cells containing multiple block elements" in {
val input = """+---+---------+
|| a | Text |
|| | |
|| | * Line1 |
|| | Line2 |
|| | |
|| | * Line3 |
|+---+---------+
|| c | d |
|+---+---------+""".stripMargin
Parsing (input) should produce (root( table(row(cell("a"), cell(p("Text"), bulletList() + "Line1\\nLine2" + "Line3")), strrow("c","d"))))
}
it should "parse tables with header cells" in {
val input = """+---+---+
|| a | b |
|+===+===+
|| c | d |
|+---+---+""".stripMargin
Parsing (input) should produce (root( Table(TableHead(List(row(Cell(HeadCell,List(p("a"))), Cell(HeadCell,List(p("b")))))),
TableBody(List(strrow("c","d"))))))
}
"The simple table parser" should "parse a small table with 2 rows and 2 cells" in {
val input = """=== ===
| a b
| c d
|=== ===""".stripMargin
Parsing (input) should produce (root( table(strrow("a","b"), strrow("c","d"))))
}
it should "parse a table with horizontally merged cells in the first row" in {
val input = """=== ===
| a b
|--------
| c d
|=== ===""".stripMargin
Parsing (input) should produce (root( table(row(cell("a b", 2, 1)), strrow("c","d"))))
}
it should "parse a table with horizontally merged cells in the second row" in {
val input = """=== ===
| a b
| c d
|========""".stripMargin
Parsing (input) should produce (root( table(strrow("a","b"), row(cell("c d", 2, 1)))))
}
it should "parse tables with empty cells" in {
val input = """=== ===
| a
| c
|=== ===""".stripMargin
Parsing (input) should produce (root( table(row(cell("a"),cell()), row(cell("c"),cell()))))
}
it should "parse cells containing multiple block elements" in {
val input = """=== ===
| a Text
|
| * Line1
| Line2
|
| * Line3
|
| c d
|=== ===""".stripMargin
Parsing (input) should produce (root( table(row(cell("a"), cell(p("Text"), bulletList() + "Line1\\nLine2" + "Line3")), strrow("c","d"))))
}
it should "parse tables with header cells" in {
val input = """=== ===
| a b
|=== ===
| c d
|=== ===""".stripMargin
Parsing (input) should produce (root( Table(TableHead(List(row(Cell(HeadCell,List(p("a"))), Cell(HeadCell,List(p("b")))))),
TableBody(List(strrow("c","d"))))))
}
}
| amuramatsu/Laika | core/src/test/scala/laika/parse/rst/TableParsersSpec.scala | Scala | apache-2.0 | 7,523 |
object Terminal {
def apply[a] : a => Unit = { a => () }
val i0 = Terminal.apply[Int]
val i1 = (Terminal)[Int]
val i2 = Terminal[Int]
}
| AlexSikia/dotty | tests/pos/t1034.scala | Scala | bsd-3-clause | 144 |
/*
Copyright (c) 2014, Elliot Stirling
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.package hardware
*/
package hardware
import scala.annotation.tailrec
import scala.util.Random
abstract class Simulation {
case class WorkItem(time: Long, action: Action)
private type Agenda = List[WorkItem]
private var agenda: Agenda = List()
private var currentTime = 0L
// add a small amount of variance
private val random: Random = new Random()
private def insert(ag: Agenda, item: WorkItem): Agenda = {
if (ag.isEmpty || item.time < ag.head.time)
item :: ag
else
ag.head :: insert(ag.tail, item)
}
// delay lower bounds
val GateDelay = 1000
def getGateVariance: Int = random.nextInt(10)
def afterDelay(delay: Int)(block: => Unit): Unit = {
val item = WorkItem(currentTime + delay, () => block)
agenda = insert(agenda, item)
}
private def next(): Unit = {
agenda match {
case WorkItem(time, action) :: rest =>
agenda = rest
currentTime = time
action()
case List() =>
}
}
// run the simulation till its stable
def run(): Long = {
val startTime = currentTime
while (agenda.nonEmpty)
next()
currentTime - startTime
}
def probe(name: String, wire: Wire): Unit = {
wire addAction { () =>
println(name + " " + currentTime + " new_value = " + wire.getSignal)
}
}
} | Trugath/hardware | src/main/scala/hardware/Simulation.scala | Scala | bsd-3-clause | 2,857 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.{LinkedHashMap => JLinkedHashMap}
import java.util.Map.Entry
import scala.collection.mutable
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.execution.streaming.FileStreamSource.FileEntry
import org.apache.spark.sql.internal.SQLConf
class FileStreamSourceLog(
metadataLogVersion: Int,
sparkSession: SparkSession,
path: String)
extends CompactibleFileStreamLog[FileEntry](metadataLogVersion, sparkSession, path) {
import CompactibleFileStreamLog._
// Configurations about metadata compaction
protected override val defaultCompactInterval: Int =
sparkSession.sessionState.conf.fileSourceLogCompactInterval
require(defaultCompactInterval > 0,
s"Please set ${SQLConf.FILE_SOURCE_LOG_COMPACT_INTERVAL.key} " +
s"(was $defaultCompactInterval) to a positive value.")
protected override val fileCleanupDelayMs =
sparkSession.sessionState.conf.fileSourceLogCleanupDelay
protected override val isDeletingExpiredLog = sparkSession.sessionState.conf.fileSourceLogDeletion
private implicit val formats = Serialization.formats(NoTypeHints)
// A fixed size log entry cache to cache the file entries belong to the compaction batch. It is
// used to avoid scanning the compacted log file to retrieve it's own batch data.
private val cacheSize = compactInterval
private val fileEntryCache = new JLinkedHashMap[Long, Array[FileEntry]] {
override def removeEldestEntry(eldest: Entry[Long, Array[FileEntry]]): Boolean = {
size() > cacheSize
}
}
def compactLogs(logs: Seq[FileEntry]): Seq[FileEntry] = {
logs
}
override def add(batchId: Long, logs: Array[FileEntry]): Boolean = {
if (super.add(batchId, logs)) {
if (isCompactionBatch(batchId, compactInterval)) {
fileEntryCache.put(batchId, logs)
}
true
} else {
false
}
}
override def get(startId: Option[Long], endId: Option[Long]): Array[(Long, Array[FileEntry])] = {
val startBatchId = startId.getOrElse(0L)
val endBatchId = endId.orElse(getLatest().map(_._1)).getOrElse(0L)
val (existedBatches, removedBatches) = (startBatchId to endBatchId).map { id =>
if (isCompactionBatch(id, compactInterval) && fileEntryCache.containsKey(id)) {
(id, Some(fileEntryCache.get(id)))
} else {
val logs = super.get(id).map(_.filter(_.batchId == id))
(id, logs)
}
}.partition(_._2.isDefined)
// The below code may only be happened when original metadata log file has been removed, so we
// have to get the batch from latest compacted log file. This is quite time-consuming and may
// not be happened in the current FileStreamSource code path, since we only fetch the
// latest metadata log file.
val searchKeys = removedBatches.map(_._1)
val retrievedBatches = if (searchKeys.nonEmpty) {
logWarning(s"Get batches from removed files, this is unexpected in the current code path!!!")
val latestBatchId = getLatestBatchId().getOrElse(-1L)
if (latestBatchId < 0) {
Map.empty[Long, Option[Array[FileEntry]]]
} else {
val latestCompactedBatchId = getAllValidBatches(latestBatchId, compactInterval)(0)
val allLogs = new mutable.HashMap[Long, mutable.ArrayBuffer[FileEntry]]
super.get(latestCompactedBatchId).foreach { entries =>
entries.foreach { e =>
allLogs.put(e.batchId, allLogs.getOrElse(e.batchId, mutable.ArrayBuffer()) += e)
}
}
searchKeys.map(id => id -> allLogs.get(id).map(_.toArray)).filter(_._2.isDefined).toMap
}
} else {
Map.empty[Long, Option[Array[FileEntry]]]
}
val batches =
(existedBatches ++ retrievedBatches).map(i => i._1 -> i._2.get).toArray.sortBy(_._1)
if (startBatchId <= endBatchId) {
HDFSMetadataLog.verifyBatchIds(batches.map(_._1), startId, endId)
}
batches
}
}
object FileStreamSourceLog {
val VERSION = 1
}
| dbtsai/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSourceLog.scala | Scala | apache-2.0 | 4,895 |
package auth.controllers
import java.time.Clock
import java.util.UUID
import auth.models.AuthToken
import auth.models.services.AuthTokenService
import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository
import com.mohiva.play.silhouette.api.services.AvatarService
import com.mohiva.play.silhouette.api.util.{ PasswordHasher, PasswordHasherRegistry, PasswordInfo }
import com.mohiva.play.silhouette.test._
import core.AuthSpecification
import core.models.User
import core.models.services.UserService
import net.codingwell.scalaguice.ScalaModule
import org.specs2.control.NoLanguageFeatures
import org.specs2.mock.Mockito
import play.api.http.HeaderNames
import play.api.i18n.Messages
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json.Json
import play.api.libs.mailer.{ Email, MailerClient }
import play.api.test.CSRFTokenHelper._
import play.api.test.{ FakeRequest, WithApplication }
import test.ApiSpecification
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* Test case for the [[SignUpController]] class.
*/
class SignUpControllerSpec
extends ApiSpecification
with AuthSpecification
with Mockito
with NoLanguageFeatures {
sequential
"The `signUp` action" should {
"return HTTP status 403 if the user is authenticated" in new Context {
new WithApplication(application) {
val request = FakeRequest().withAuthenticator(loginInfo)
Response(
FORBIDDEN,
controller.signUp(request),
"auth.forbidden",
Messages("auth.forbidden")
)
}
}
"return HTTP status 400 if the `email` field is invalid" in new Context {
new WithApplication(application) {
val request = FakeRequest().withJsonBody(Json.obj(
"email" -> "invalid",
"password" -> password,
"name" -> name
)).withCSRFToken
Response(
BAD_REQUEST,
controller.signUp(request),
"auth.signUp.form.invalid",
Messages("invalid.form"),
Seq(FormError("email", Messages("error.email")))
)
}
}
"return HTTP status 400 if the `password` field is missing" in new Context {
new WithApplication(application) {
val request = FakeRequest().withJsonBody(Json.obj(
"email" -> email,
"password" -> "",
"name" -> "John Doe"
)).withCSRFToken
Response(
BAD_REQUEST,
controller.signUp(request),
"auth.signUp.form.invalid",
Messages("invalid.form"),
Seq(FormError("password", Messages("error.required")))
)
}
}
"return HTTP status 400 if the `name` field is missing" in new Context {
new WithApplication(application) {
val request = FakeRequest().withJsonBody(Json.obj(
"email" -> email,
"password" -> password,
"name" -> ""
)).withCSRFToken
Response(
BAD_REQUEST,
controller.signUp(request),
"auth.signUp.form.invalid",
Messages("invalid.form"),
Seq(FormError("name", Messages("error.required")))
)
}
}
"send an email to an already existing user" in new Context {
new WithApplication(application) {
val request = FakeRequest().withJsonBody(Json.obj(
"email" -> email,
"password" -> password,
"name" -> name
)).withCSRFToken
userService.retrieve(loginInfo) returns Future.successful(Some(user))
Response(
CREATED,
controller.signUp(request),
"auth.signUp.successful",
Messages("auth.sign.up.email.sent")
)
there was one(mailerClient).send(any[Email])
}
}
"register a new user and send an account activation email" in new Context {
new WithApplication(application) {
val captor = capture[User]
val host = "localhost:9000"
val userAgent = "Chrome/58.0.3029.81 Safari/537.36"
val avatarURL = "http://my.avatar/photo.jpg"
val hashedPassword = "hashed-password"
val passwordHasher = mock[PasswordHasher].smart
val passwordInfo = PasswordInfo("test-hasher", hashedPassword)
val request = FakeRequest()
.withJsonBody(Json.obj(
"email" -> email,
"password" -> password,
"name" -> "John Doe"
))
.withHeaders(
HeaderNames.HOST -> host,
HeaderNames.USER_AGENT -> userAgent,
HeaderNames.ACCEPT_LANGUAGE -> lang.code
)
.withCSRFToken
passwordHasher.hash(password) returns passwordInfo
passwordHasherRegistry.current returns passwordHasher
userService.retrieve(loginInfo) returns Future.successful(None)
avatarService.retrieveURL(email) returns Future.successful(Some(avatarURL))
authInfoRepository.add(loginInfo, passwordInfo) returns Future.successful(passwordInfo)
authTokenService.create(user.id, 5 minutes) returns Future.successful(authToken)
userService.save(any[User]) returns Future.successful(user)
Response(
CREATED,
controller.signUp(request),
"auth.signUp.successful",
Messages("auth.sign.up.email.sent")
)
there was one(mailerClient).send(any[Email])
there was one(userService).save(captor)
val u = captor.value
u.loginInfo must be equalTo Seq(loginInfo)
u.name must beSome(name)
u.email must beSome(email)
u.avatarURL must beSome(avatarURL)
u.registration.lang must be equalTo lang
u.registration.ip must be equalTo "127.0.0.1"
u.registration.host must beSome(host)
u.registration.userAgent must beSome(userAgent)
u.registration.activated must beFalse
u.registration.dateTime must be equalTo clock.instant()
u.settings.lang must be equalTo lang
u.settings.timeZone must beNone
}
}
}
/**
* The context.
*/
trait Context extends ApiContext[SignUpController] with AuthContext {
/**
* The user name.
*/
val name = "John Doe"
/**
* The user password.
*/
val password = "password"
/**
* A auth token ID.
*/
val tokenID = UUID.randomUUID()
/**
* An auth token.
*/
val authToken = AuthToken(tokenID, user.id, clock.instant())
/**
* The user service mock.
*/
val userService = mock[UserService].smart
/**
* The auth info repository mock.
*/
val authInfoRepository = mock[AuthInfoRepository].smart
/**
* The auth token service mock.
*/
val authTokenService = mock[AuthTokenService].smart
/**
* The avatar service mock.
*/
val avatarService = mock[AvatarService].smart
/**
* The password hasher registry mock.
*/
val passwordHasherRegistry = mock[PasswordHasherRegistry].smart
/**
* The mailer client mock.
*/
val mailerClient = mock[MailerClient].smart
/**
* The fake module used to instantiate the application.
*/
override def fakeModule: ScalaModule = new ScalaModule {
def configure(): Unit = {
bind[UserService].toInstance(userService)
bind[AuthInfoRepository].toInstance(authInfoRepository)
bind[AuthTokenService].toInstance(authTokenService)
bind[AvatarService].toInstance(avatarService)
bind[PasswordHasherRegistry].toInstance(passwordHasherRegistry)
bind[MailerClient].toInstance(mailerClient)
bind[Clock].toInstance(clock)
}
}
/**
* The application builder.
*/
override def applicationBuilder: GuiceApplicationBuilder =
super.applicationBuilder
.configure("ui.dev.url" -> "test")
.configure("play.i18n.langs" -> Seq(lang.code))
}
}
| akkie/silhouette-play-react-seed | app-auth/src/test/scala/auth/controllers/SignUpControllerSpec.scala | Scala | mit | 7,974 |
/*
* ******************************************************************************
* * Copyright (C) 2013 Christopher Harris (Itszuvalex)
* * Itszuvalex@gmail.com
* *
* * This program is free software; you can redistribute it and/or
* * modify it under the terms of the GNU General Public License
* * as published by the Free Software Foundation; either version 2
* * of the License, or (at your option) any later version.
* *
* * This program is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* * GNU General Public License for more details.
* *
* * You should have received a copy of the GNU General Public License
* * along with this program; if not, write to the Free Software
* * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *****************************************************************************
*/
package com.itszuvalex.femtocraft.industry.tiles
import java.util
import com.itszuvalex.femtocraft.api.core.{Configurable, Saveable}
import com.itszuvalex.femtocraft.api.industry.IAssemblerSchematic
import com.itszuvalex.femtocraft.api.power.PowerContainer
import com.itszuvalex.femtocraft.api.{AssemblerRecipe, EnumTechLevel}
import com.itszuvalex.femtocraft.core.tiles.TileEntityBase
import com.itszuvalex.femtocraft.core.traits.tile.{Inventory, MassTank}
import com.itszuvalex.femtocraft.industry.traits.IndustryBehavior
import com.itszuvalex.femtocraft.power.traits.PowerConsumer
import com.itszuvalex.femtocraft.utils.BaseInventory
import com.itszuvalex.femtocraft.{Femtocraft, FemtocraftGuiConstants}
import net.minecraft.item.ItemStack
import net.minecraftforge.fluids._
object TileEntityEncoder {
@Configurable(comment = "Power tech level.") val TECH_LEVEL = EnumTechLevel.MICRO
@Configurable(comment = "Mass storage maximum.") val MASS_STORAGE = 1000
@Configurable(comment = "Power storage maximum.") val POWER_STORAGE = 1200
@Configurable(comment = "Power per item to begin processing.") val POWER_TO_ENCODE = 100
@Configurable(comment = "Ticks required to process.") val TICKS_TO_ENCODE = 200
}
@Configurable class TileEntityEncoder extends TileEntityBase with IndustryBehavior with Inventory with MassTank with PowerConsumer {
@Saveable var timeWorked = 0
@Saveable private var encodingRecipe : AssemblerRecipe = null
@Saveable private var encodingSchematic: ItemStack = null
override def defaultInventory = new BaseInventory(12)
override def defaultTank = new FluidTank(TileEntityEncoder.MASS_STORAGE)
override def defaultContainer = new PowerContainer(TileEntityEncoder.TECH_LEVEL, TileEntityEncoder.POWER_STORAGE)
override def hasGUI = true
override def getGuiID = FemtocraftGuiConstants.EncoderGuiID
override def getStackInSlotOnClosing(i: Int) = if (i == 9) null else super.getStackInSlotOnClosing(i)
override def getInventoryName = Femtocraft.ID.toLowerCase + "." + "InventoryEncoder"
override def hasCustomInventoryName = false
override def isItemValidForSlot(i: Int, itemstack: ItemStack): Boolean = i match {
case 9 => false
case 10 => itemstack.getItem.isInstanceOf[IAssemblerSchematic]
case _ => super.isItemValidForSlot(i, itemstack)
}
override def isWorking = encodingRecipe != null
override def markDirty() {
val recipe = getRecipe
inventory.setInventorySlotContents(9, if (recipe == null) null else recipe.output.copy)
}
private def getRecipe: AssemblerRecipe = {
val recipe = Femtocraft.recipeManager.assemblyRecipes.getRecipe(util.Arrays.copyOfRange(inventory.getInventory, 0, 9))
if (recipe == null) {
return null
}
val researched = Femtocraft.researchManager.hasPlayerResearchedTechnology(getOwner, recipe.tech)
if (researched) recipe else null
}
def getProgressScaled(i: Int) = (timeWorked * i) / TileEntityEncoder.TICKS_TO_ENCODE
def getMassAmount = massTank.getFluidAmount
def setFluidAmount(amount: Int) {
if (massTank.getFluid != null) {
massTank.setFluid(new FluidStack(massTank.getFluid.fluidID, amount))
}
else {
massTank.setFluid(new FluidStack(Femtocraft.fluidMass, amount))
}
}
def clearFluid() {
massTank.setFluid(null)
}
def getMassCapacity = massTank.getCapacity
override protected def canStartWork = {
val recipe = getRecipe
recipe != null && getStackInSlot(11) == null && getStackInSlot(10) != null && getCurrentPower >= TileEntityEncoder.POWER_TO_ENCODE && massTank.getFluidAmount >= getStackInSlot(10).getItem.asInstanceOf[IAssemblerSchematic].massRequired(recipe) && getStackInSlot(10).getItem.isInstanceOf[IAssemblerSchematic]
}
override protected def startWork() {
encodingSchematic = decrStackSize(10, 1)
encodingRecipe = getRecipe
timeWorked = 0
consume(TileEntityEncoder.POWER_TO_ENCODE)
massTank.drain(encodingSchematic.getItem.asInstanceOf[IAssemblerSchematic].massRequired(encodingRecipe), true)
}
override protected def continueWork() {
timeWorked += 1
}
override protected def canFinishWork = timeWorked >= TileEntityEncoder.TICKS_TO_ENCODE
override protected def finishWork() {
timeWorked = 0
encodingSchematic.getItem.asInstanceOf[IAssemblerSchematic].setRecipe(encodingSchematic, encodingRecipe)
setInventorySlotContents(11, encodingSchematic)
encodingSchematic = null
encodingRecipe = null
}
}
| Itszuvalex/Femtocraft-alpha-1 | src/main/java/com/itszuvalex/femtocraft/industry/tiles/TileEntityEncoder.scala | Scala | gpl-2.0 | 5,588 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Websudos Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.serializers
import com.websudos.phantom.builder.QueryBuilder.Utils
import com.websudos.phantom.builder.query.CQLQuery
import com.websudos.phantom.builder.syntax.CQLSyntax
private[builder] class IndexModifiers extends BaseModifiers {
def eqs(column: String, value: String): CQLQuery = {
modifier(column, CQLSyntax.Operators.eqs, value)
}
def ==(column: String, value: String): CQLQuery = {
modifier(column, CQLSyntax.Operators.eqs, value)
}
def lt(column: String, value: String): CQLQuery = {
modifier(column, CQLSyntax.Operators.lt, value)
}
def lte(column: String, value: String): CQLQuery = {
modifier(column, CQLSyntax.Operators.lte, value)
}
def gt(column: String, value: String): CQLQuery = {
modifier(column, CQLSyntax.Operators.gt, value)
}
def gte(column: String, value: String): CQLQuery = {
modifier(column, CQLSyntax.Operators.gte, value)
}
def in(column: String, values: String*): CQLQuery = {
modifier(column, CQLSyntax.Operators.in, Utils.join(values))
}
def in(column: String, values: List[String]): CQLQuery = {
modifier(column, CQLSyntax.Operators.in, Utils.join(values))
}
def fcall(name: String, params: String*): CQLQuery = {
CQLQuery(name).append(Utils.join(params))
}
def token(name: String): String = {
CQLQuery(CQLSyntax.token).wrap(name).queryString
}
def where(qb: CQLQuery, condition: CQLQuery): CQLQuery = {
Utils.concat(qb, CQLSyntax.where, condition)
}
def and(qb: CQLQuery, clause: CQLQuery): CQLQuery = {
Utils.concat(qb, CQLSyntax.and, clause)
}
/**
* Creates a CONTAINS where clause applicable to SET columns.
* @param column The name of the column in which to look for the value.
* @param value The CQL serialized value of the element to look for in the CQL SET.
* @return A CQL Query wrapping the contains clause.
*/
def contains(column: String, value: String): CQLQuery = {
modifier(column, CQLSyntax.Operators.contains, value)
}
/**
* Creates a CONTAINS KEY where clause applicable to Map columns.
* @param column The name of the column in which to look for the value.
* @param value The CQL serialized value of the element to look for in the CQL SET.
* @return A CQL Query wrapping the contains clause.
*/
def containsKey(column: String, value: String): CQLQuery = {
modifier(column, CQLSyntax.Operators.containsKey, value)
}
}
| analytically/phantom | phantom-dsl/src/main/scala/com/websudos/phantom/builder/serializers/IndexModifiers.scala | Scala | bsd-2-clause | 3,960 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.internal.broker
import akka.persistence.query.Offset
import akka.stream.scaladsl.Source
import com.lightbend.lagom.scaladsl.api.broker.Subscriber
import com.lightbend.lagom.scaladsl.api.broker.Topic
import com.lightbend.lagom.scaladsl.persistence.AggregateEvent
import com.lightbend.lagom.scaladsl.persistence.AggregateEventTag
import scala.collection.immutable
trait InternalTopic[Message] extends Topic[Message] {
final override def topicId: Topic.TopicId =
throw new UnsupportedOperationException("Topic#topicId is not permitted in the service's topic implementation")
final override def subscribe: Subscriber[Message] =
throw new UnsupportedOperationException("Topic#subscribe is not permitted in the service's topic implementation.")
}
final class TaggedOffsetTopicProducer[Message, Event <: AggregateEvent[Event]](
val tags: immutable.Seq[AggregateEventTag[Event]],
val readSideStream: (AggregateEventTag[Event], Offset) => Source[(Message, Offset), _]
) extends InternalTopic[Message]
| lagom/lagom | service/scaladsl/broker/src/main/scala/com/lightbend/internal/broker/TopicProducers.scala | Scala | apache-2.0 | 1,107 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression}
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, UnknownPartitioning}
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.exchange.{ReusedExchangeExec, ShuffleExchangeLike}
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.sql.vectorized.ColumnarBatch
/**
* A wrapper of shuffle query stage, which follows the given partition arrangement.
*
* @param child It is usually `ShuffleQueryStageExec`, but can be the shuffle exchange
* node during canonicalization.
* @param partitionSpecs The partition specs that defines the arrangement.
*/
case class CustomShuffleReaderExec private(
child: SparkPlan,
partitionSpecs: Seq[ShufflePartitionSpec]) extends UnaryExecNode {
// If this reader is to read shuffle files locally, then all partition specs should be
// `PartialMapperPartitionSpec`.
if (partitionSpecs.exists(_.isInstanceOf[PartialMapperPartitionSpec])) {
assert(partitionSpecs.forall(_.isInstanceOf[PartialMapperPartitionSpec]))
}
override def supportsColumnar: Boolean = child.supportsColumnar
override def output: Seq[Attribute] = child.output
override lazy val outputPartitioning: Partitioning = {
// If it is a local shuffle reader with one mapper per task, then the output partitioning is
// the same as the plan before shuffle.
// TODO this check is based on assumptions of callers' behavior but is sufficient for now.
if (partitionSpecs.nonEmpty &&
partitionSpecs.forall(_.isInstanceOf[PartialMapperPartitionSpec]) &&
partitionSpecs.map(_.asInstanceOf[PartialMapperPartitionSpec].mapIndex).toSet.size ==
partitionSpecs.length) {
child match {
case ShuffleQueryStageExec(_, s: ShuffleExchangeLike, _) =>
s.child.outputPartitioning
case ShuffleQueryStageExec(_, r @ ReusedExchangeExec(_, s: ShuffleExchangeLike), _) =>
s.child.outputPartitioning match {
case e: Expression => r.updateAttr(e).asInstanceOf[Partitioning]
case other => other
}
case _ =>
throw new IllegalStateException("operating on canonicalization plan")
}
} else {
UnknownPartitioning(partitionSpecs.length)
}
}
override def stringArgs: Iterator[Any] = {
val desc = if (isLocalReader) {
"local"
} else if (hasCoalescedPartition && hasSkewedPartition) {
"coalesced and skewed"
} else if (hasCoalescedPartition) {
"coalesced"
} else if (hasSkewedPartition) {
"skewed"
} else {
""
}
Iterator(desc)
}
def hasCoalescedPartition: Boolean =
partitionSpecs.exists(_.isInstanceOf[CoalescedPartitionSpec])
def hasSkewedPartition: Boolean =
partitionSpecs.exists(_.isInstanceOf[PartialReducerPartitionSpec])
def isLocalReader: Boolean =
partitionSpecs.exists(_.isInstanceOf[PartialMapperPartitionSpec])
private def shuffleStage = child match {
case stage: ShuffleQueryStageExec => Some(stage)
case _ => None
}
@transient private lazy val partitionDataSizes: Option[Seq[Long]] = {
if (partitionSpecs.nonEmpty && !isLocalReader && shuffleStage.get.mapStats.isDefined) {
Some(partitionSpecs.map {
case p: CoalescedPartitionSpec =>
assert(p.dataSize.isDefined)
p.dataSize.get
case p: PartialReducerPartitionSpec => p.dataSize
case p => throw new IllegalStateException(s"unexpected $p")
})
} else {
None
}
}
private def sendDriverMetrics(): Unit = {
val executionId = sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
val driverAccumUpdates = ArrayBuffer.empty[(Long, Long)]
val numPartitionsMetric = metrics("numPartitions")
numPartitionsMetric.set(partitionSpecs.length)
driverAccumUpdates += (numPartitionsMetric.id -> partitionSpecs.length.toLong)
if (hasSkewedPartition) {
val skewedSpecs = partitionSpecs.collect {
case p: PartialReducerPartitionSpec => p
}
val skewedPartitions = metrics("numSkewedPartitions")
val skewedSplits = metrics("numSkewedSplits")
val numSkewedPartitions = skewedSpecs.map(_.reducerIndex).distinct.length
val numSplits = skewedSpecs.length
skewedPartitions.set(numSkewedPartitions)
driverAccumUpdates += (skewedPartitions.id -> numSkewedPartitions)
skewedSplits.set(numSplits)
driverAccumUpdates += (skewedSplits.id -> numSplits)
}
partitionDataSizes.foreach { dataSizes =>
val partitionDataSizeMetrics = metrics("partitionDataSize")
driverAccumUpdates ++= dataSizes.map(partitionDataSizeMetrics.id -> _)
// Set sum value to "partitionDataSize" metric.
partitionDataSizeMetrics.set(dataSizes.sum)
}
SQLMetrics.postDriverMetricsUpdatedByValue(sparkContext, executionId, driverAccumUpdates.toSeq)
}
@transient override lazy val metrics: Map[String, SQLMetric] = {
if (shuffleStage.isDefined) {
Map("numPartitions" -> SQLMetrics.createMetric(sparkContext, "number of partitions")) ++ {
if (isLocalReader) {
// We split the mapper partition evenly when creating local shuffle reader, so no
// data size info is available.
Map.empty
} else {
Map("partitionDataSize" ->
SQLMetrics.createSizeMetric(sparkContext, "partition data size"))
}
} ++ {
if (hasSkewedPartition) {
Map("numSkewedPartitions" ->
SQLMetrics.createMetric(sparkContext, "number of skewed partitions"),
"numSkewedSplits" ->
SQLMetrics.createMetric(sparkContext, "number of skewed partition splits"))
} else {
Map.empty
}
}
} else {
// It's a canonicalized plan, no need to report metrics.
Map.empty
}
}
private lazy val shuffleRDD: RDD[_] = {
shuffleStage match {
case Some(stage) =>
sendDriverMetrics()
stage.shuffle.getShuffleRDD(partitionSpecs.toArray)
case _ =>
throw new IllegalStateException("operating on canonicalized plan")
}
}
override protected def doExecute(): RDD[InternalRow] = {
shuffleRDD.asInstanceOf[RDD[InternalRow]]
}
override protected def doExecuteColumnar(): RDD[ColumnarBatch] = {
shuffleRDD.asInstanceOf[RDD[ColumnarBatch]]
}
override protected def withNewChildInternal(newChild: SparkPlan): CustomShuffleReaderExec =
copy(child = newChild)
}
| wangmiao1981/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/CustomShuffleReaderExec.scala | Scala | apache-2.0 | 7,597 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs/contributors
// License: http://www.gnu.org/licenses/lgpl-3.0.en.html
package spray.json
import org.scalatest._
import Matchers._
import JsWriter.ops._
import JsReader.ops._
class CollectionFormatsSpec extends WordSpec {
"The listFormat" should {
val list = List(1, 2, 3)
val json = JsArray(JsNumber(1), JsNumber(2), JsNumber(3))
"convert a List[Int] to a JsArray of JsNumbers" in {
list.toJson shouldEqual json
}
"convert a JsArray of JsNumbers to a List[Int]" in {
json.as[List[Int]] shouldEqual Right(list)
}
}
/*
"The arrayFormat" should {
import java.util.Arrays
val array = Array(1, 2, 3)
val json = JsArray(JsNumber(1), JsNumber(2), JsNumber(3))
"convert an Array[Int] to a JsArray of JsNumbers" in {
array.toJson shouldEqual json
}
"convert a JsArray of JsNumbers to an Array[Int]" in {
Arrays.equals(json.as[Array[Int]], array) shouldBe true
}
}*/
"The mapFormat" should {
val map = Map("a" -> 1, "b" -> 2, "c" -> 3)
val json =
JsObject("a" -> JsNumber(1), "b" -> JsNumber(2), "c" -> JsNumber(3))
"convert a Map[String, Long] to a JsObject" in {
map.toJson shouldEqual json
}
"be able to convert a JsObject to a Map[String, Long]" in {
json.as[Map[String, Long]] shouldEqual Right(map)
}
}
"The immutableSetFormat" should {
val set = Set(1, 2, 3)
val json = JsArray(JsNumber(1), JsNumber(2), JsNumber(3))
"convert a Set[Int] to a JsArray of JsNumbers" in {
set.toJson shouldEqual json
}
"convert a JsArray of JsNumbers to a Set[Int]" in {
json.as[Set[Int]] shouldEqual Right(set)
}
}
"The indexedSeqFormat" should {
val seq = collection.IndexedSeq(1, 2, 3)
val json = JsArray(JsNumber(1), JsNumber(2), JsNumber(3))
"convert a Set[Int] to a JsArray of JsNumbers" in {
seq.toJson shouldEqual json
}
"convert a JsArray of JsNumbers to a IndexedSeq[Int]" in {
json.as[collection.IndexedSeq[Int]] shouldEqual Right(seq)
}
}
}
| ensime/ensime-server | json/src/test/scala/spray/json/CollectionFormatsSpec.scala | Scala | gpl-3.0 | 2,128 |
package objsets
import TweetReader._
/**
* This represents a set of objects of type `Tweet` in the form of a binary search
* tree. Every branch in the tree has two children (two `TweetSet`s). There is an
* invariant which always holds: for every branch `b`, all elements in the left
* subtree are smaller than the tweet at `b`. The elements in the right subtree are
* larger.
*
* Note that the above structure requires us to be able to compare two tweets (we
* need to be able to say which of two tweets is larger, or if they are equal). In
* this implementation, the equality / order of tweets is based on the tweet's text
* (see `def incl`). Hence, a `TweetSet` could not contain two tweets with the same
* text from different users.
*
*
* The advantage of representing sets as binary search trees is that the elements
* of the set can be found quickly. If you want to learn more you can take a look
* at the Wikipedia page [1], but this is not necessary in order to solve this
* assignment.
*
* [1] http://en.wikipedia.org/wiki/Binary_search_tree
*/
abstract class TweetSet {
/**
* This method takes a predicate and returns a subset of all the elements
* in the original set for which the predicate is true.
*
* Question: Can we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def filter(p: Tweet => Boolean): TweetSet = filter0(p, EmptySet)
/**
* This is a helper method for `filter` that propagetes the accumulated tweets.
*/
def filter0(p: Tweet => Boolean, acc: TweetSet): TweetSet
/**
* Returns a new `TweetSet` that is the union of `TweetSet`s `this` and `that`.
*
* Question: Should we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
/*
def union(that: TweetSet): TweetSet =
{
if (this.isEmpty) that
else if (that.isEmpty) this
else {
this.incl(that.head).union(that.tail)
}
}
*/
def union(that: TweetSet): TweetSet =
{
if (this.isEmpty) that
else if (that.isEmpty) this
else {
this.tail union that.incl(this.head)
}
}
/**
* Returns a list containing all tweets of this set, sorted by retweet count
* in ascending order. In other words, the head of the resulting list should
* have the lowest retweet count.
*
* Hint: the method `remove` on TweetSet will be very useful.
* Question: Should we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def ascendingByRetweet: Trending =
{
if (isEmpty) {
//Pas de new car "objet"
EmptyTrending
} else {
val min = findMin
//on met new car "class"
new NonEmptyTrending(min, remove(min).ascendingByRetweet)
}
}
// The following methods are provided for you, and do not have to be changed
// -------------------------------------------------------------------------
/**
* Returns a new `TweetSet` which contains all elements of this set, and the
* the new element `tweet` in case it does not already exist in this set.
*
* If `this.contains(tweet)`, the current set is returned.
*/
def incl(x: Tweet): TweetSet
/**
* Tests if `tweet` exists in this `TweetSet`.
*/
def contains(x: Tweet): Boolean
def isEmpty: Boolean
def head: Tweet
def tail: TweetSet
/**
* This method takes a function and applies it to every element in the set.
*/
def foreach(f: Tweet => Unit): Unit = {
if (!isEmpty) {
f(head)
tail.foreach(f)
}
}
override def toString = {
val sb = new StringBuilder
foreach { t => sb.append(t); sb.append("\\n") }
sb.toString
}
/**
* Returns a new `TweetSet` which excludes `tweet`.
*/
def remove(tw: Tweet): TweetSet
def findMin0(curr: Tweet): Tweet =
if (isEmpty) curr
else if (head.retweets < curr.retweets) tail.findMin0(head)
else tail.findMin0(curr)
def findMin: Tweet =
tail.findMin0(head)
// -------------------------------------------------------------------------
}
| PiSurQuatre/fp-scala-public | objsets/src/main/scala/objsets/TweetSet.scala | Scala | mit | 4,168 |
package org.jetbrains.plugins.scala
package lang
package refactoring
package namesSuggester
import java.{util => ju}
import com.intellij.psi.codeStyle.SuggestedNameInfo
import com.intellij.psi.{PsiElement, PsiNamedElement}
import com.intellij.refactoring.rename.NameSuggestionProvider
import org.jetbrains.plugins.scala.extensions.PsiNamedElementExt
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScNewTemplateDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.types.result.Typeable
import scala.collection.JavaConverters._
/**
* User: Alexander Podkhalyuzin
* Date: 23.11.2008
*/
class ScalaNameSuggestionProvider extends AbstractNameSuggestionProvider {
import NameSuggester.suggestNames
import ScalaNameSuggestionProvider._
override protected def suggestedNames(element: PsiElement): Seq[String] = element match {
case definition: ScNewTemplateDefinition => suggestNames(definition)
case definition: ScTemplateDefinition => Seq(definition.name)
case typed: ScTypedDefinition =>
typed.name +: suggestedNamesByType(typed)
case named: PsiNamedElement => Seq(named.name)
case expr: ScExpression => suggestNames(expr)
case typeElement: ScTypeElement => suggestedNamesByType(typeElement)
case _ => Seq.empty
}
}
object ScalaNameSuggestionProvider {
import NameSuggester.suggestNamesByType
private def suggestedNamesByType(typeable: Typeable): Seq[String] =
typeable.`type`()
.toOption.toSeq
.flatMap(suggestNamesByType)
}
abstract class AbstractNameSuggestionProvider extends NameSuggestionProvider {
override final def getSuggestedNames(element: PsiElement, context: PsiElement, result: ju.Set[String]): SuggestedNameInfo = {
val names = suggestedNames(element)
result.addAll(names.asJavaCollection)
new SuggestedNameInfo(names.toArray) {}
}
protected def suggestedNames(element: PsiElement): Seq[String]
} | triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/refactoring/namesSuggester/ScalaNameSuggestionProvider.scala | Scala | apache-2.0 | 2,164 |
package reactivemongo
import java.util.UUID
import reactivemongo.core.errors.GenericDriverException
import reactivemongo.api.{
DistributedSession,
NodeSetSession,
Session,
SessionTransaction,
WriteConcern
}
final class SessionSpec extends org.specs2.mutable.Specification {
"Session".title
sequential
stopOnFail
section("unit")
"NodeSet session" should {
val id = UUID.randomUUID()
val session1 = new NodeSetSession(lsid = id)
val session2 = new DistributedSession(lsid = id)
"be created (without transaction)" >> {
def spec(s: Session) = {
s.lsid must_=== id and {
s.causalConsistency must beTrue
} and {
s.transaction must beFailedTry[SessionTransaction]
} and {
s.operationTime must beNone
}
}
"when not distributed" in spec(session1)
"when distributed" in spec(session2)
}
{
def specs(s: Session) = {
"without cluster time" in {
s.update(1L, None, None)
s.operationTime must beSome(1L)
}
"with cluster time" in {
s.update(2L, Some(3L), None)
s.operationTime must beSome(2L)
}
}
"be updated when not distributed" >> specs(session1)
"be updated when distributed" >> specs(session2)
}
"not end or flag transaction before it's started" >> {
def spec(s: Session) = s.endTransaction() must beNone and {
s.transactionToFlag() must beFalse
}
"when not distributed" in spec(session1)
"when distributed" in spec(session2)
}
"start transaction" >> {
"if none is already started (when not distributed)" in {
session1.startTransaction(WriteConcern.Default, None).
aka("started tx") must beSuccessfulTry[(SessionTransaction, Boolean)].like {
case (SessionTransaction(1L, Some(wc), None, false, None), true) =>
wc must_=== WriteConcern.Default
}
}
"when distributed" >> {
"with failure without pinned node" in {
session2.startTransaction(WriteConcern.Default, None).
aka("started tx") must beFailedTry[(SessionTransaction, Boolean)].
withThrowable[GenericDriverException](
".*Cannot start a distributed transaction without a pinned node.*")
}
"successfully with a pinned node if none already started" in {
session2.startTransaction(WriteConcern.Default, Some("pinnedNode")).
aka("started tx") must beSuccessfulTry[(SessionTransaction, Boolean)].like {
case (SessionTransaction(
1L, Some(wc), Some("pinnedNode"), false, None), true) =>
wc must_=== WriteConcern.Default
}
}
}
}
"not start transaction if already started" >> {
"when not distributed" in {
session1.startTransaction(WriteConcern.Default, None).
aka("start tx") must beSuccessfulTry[(SessionTransaction, Boolean)].
like {
case (SessionTransaction(
1L, Some(_), None, false, None),
startedNewTx) => startedNewTx must beFalse
}
}
"when distributed" in {
session2.startTransaction(WriteConcern.Default, Some("pinnedNode")).
aka("start tx") must beSuccessfulTry[(SessionTransaction, Boolean)].
like {
case (SessionTransaction(
1L, Some(_), Some("pinnedNode"), false, None), startedNewTx) =>
startedNewTx must beFalse
}
}
}
"flag transaction to start" >> {
def spec(s: Session) = s.transactionToFlag() must beFalse and {
s.transactionToFlag() must beTrue
}
"when not distributed" in spec(session1)
"when distributed" in spec(session2)
}
"end transaction" in {
def spec(s: Session) = {
s.endTransaction() must beSome[SessionTransaction].like {
case SessionTransaction(1L, Some(_), _, true, None) =>
s.endTransaction() must beNone
}
}
"when not distributed" in spec(session1)
"when distributed" in spec(session2)
}
"start a second transaction" >> {
"with pinned node ignored when not distributed" in {
session1.startTransaction(WriteConcern.Default, Some("node")).
aka("started tx") must beSuccessfulTry[(SessionTransaction, Boolean)].like {
case (SessionTransaction(2L, Some(wc), None, false, None), true) =>
wc must_=== WriteConcern.Default
}
}
"when distributed" in {
session2.startTransaction(WriteConcern.Default, Some("node2")).
aka("started tx") must beSuccessfulTry[(SessionTransaction, Boolean)].like {
case (SessionTransaction(
2L, Some(wc), Some("node2"), false, None), true) =>
wc must_=== WriteConcern.Default
}
}
}
}
section("unit")
}
| ReactiveMongo/ReactiveMongo | driver/src/test/scala/SessionSpec.scala | Scala | apache-2.0 | 5,000 |
/*
* Copyright (c) 2011-2017 Interfaculty Department of Geoinformatics, University of
* Salzburg (Z_GIS) & Institute of Geological and Nuclear Sciences Limited (GNS Science)
* in the SMART Aquifer Characterisation (SAC) programme funded by the New Zealand
* Ministry of Business, Innovation and Employment (MBIE)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import play.api.libs.json.{Format, JsPath, Reads, Writes}
import play.api.libs.functional.syntax.{unlift, _}
/**
* This class should be returned as JSON in case of Controller returning anything that is not OK() as HTTP-Response
* like InternalServerError() etc.
*/
case class ErrorResult(message: String, details: Option[String]) { }
/** companion for {{ErrorResult}} case class
* Implements a JSON reader and writer
* */
object ErrorResult {
implicit val reads: Reads[ErrorResult] = (
(JsPath \\ "message").read[String] and
(JsPath \\ "details").readNullable[String]
) (ErrorResult.apply _)
implicit val writes: Writes[ErrorResult] = (
(JsPath \\ "message").write[String] and
(JsPath \\ "details").writeNullable[String]
) (unlift(ErrorResult.unapply))
val format: Format[ErrorResult] = Format(reads, writes)
}
| ZGIS/smart-csw-ingester | app/models/ErrorResult.scala | Scala | apache-2.0 | 1,752 |
/*
* Copyright 2010-2011 Vilius Normantas <code@norma.lt>
*
* This file is part of Crossbow library.
*
* Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Crossbow. If not,
* see <http://www.gnu.org/licenses/>.
*/
package lt.norma.crossbow.indicators
import ImplicitValueConverter._
/** Calculates normalized value of the indicator. If the first value of the target indicator is 0,
* `Normalized` will always result to `NaN` or `Infinity` values.
* {{{ Normalize = (Target[i] / Target[0] - 1) * 100 }}}*/
class Normalize(target: Indicator[Double]) extends FunctionalIndicator[Double] {
def name = "Normalize(" + target.name + ")"
private val target0 = new FirstValue(target)
def dependencies = Set(target, target0)
def calculate = (target(), target0()) match {
case (Some(t), Some(t0)) => (t / t0 - 1) * 100
case _ => None
}
}
| ViliusN/Crossbow | crossbow-core/src/lt/norma/crossbow/indicators/Normalize.scala | Scala | gpl-3.0 | 1,380 |
import sbt._
import sbt.Keys._
import sbt.Keys._
object IeslPluginLoader extends Build {
lazy val root = Project(id = "plugins", base = file("."))
.settings(resolvers += "IESL Public Releases" at "http://dev-iesl.cs.umass.edu/nexus/content/groups/public")
.settings(resolvers += "IESL Public Snapshots" at "http://dev-iesl.cs.umass.edu/nexus/content/groups/public-snapshots")
.settings(addSbtPlugin("edu.umass.cs.iesl" %% "iesl-sbt-base" % "latest.release"))
.settings(addSbtPlugin("com.typesafe.play" %% "sbt-plugin" % "2.3.4"))
.settings(addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "0.6.3"))
}
| iesl/iesl-formkit | project/project/Plugins.scala | Scala | apache-2.0 | 641 |
/*************************************************************************
* *
* This file is part of the 20n/act project. *
* 20n/act enables DNA prediction for synthetic biology/bioengineering. *
* Copyright (C) 2017 20n Labs, Inc. *
* *
* Please direct all queries to act@20n.com. *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
* *
*************************************************************************/
package act.shared
import act.shared.ChemicalSymbols.{Atom, Br, C, Cl, F, H, I, MonoIsotopicMass, N, O, P, S}
import act.shared.ChemicalSymbols.Helpers.computeMassFromAtomicFormula
import java.io.PrintWriter
object EnumChemFormulae {
val defaultMaxMass = 1000.0 // Da
val defaultMaxFormula = "C30H100N30O30P30S30Cl5F5I5Br5"
def toFormula(s: String) = MassToFormula.getFormulaMap(s)
val defaultMaxElementCounts = toFormula(defaultMaxFormula)
def main(args: Array[String]) {
val opts = List(optOutFile, optUptoFormula, optExcludeHalogens, optMaxMass)
val className = this.getClass.getCanonicalName
val cmdLine: CmdLineParser = new CmdLineParser(className, args, opts)
val out: PrintWriter = {
if (cmdLine has optOutFile)
new PrintWriter(cmdLine get optOutFile)
else
new PrintWriter(System.out)
}
def removeHalogens(f: Map[Atom, Int]) = f + (Cl->0, Br->0, I->0, F->0)
val maxCountsAll = toFormula(if (cmdLine has optUptoFormula) cmdLine.get(optUptoFormula) else defaultMaxFormula)
val maxCounts = if (cmdLine has optExcludeHalogens) removeHalogens(maxCountsAll) else maxCountsAll
val maxMass = new MonoIsotopicMass(if (cmdLine has optMaxMass) cmdLine.get(optMaxMass).toDouble else defaultMaxMass)
val doer = new EnumChemFormulae(maxCounts, maxMass)
doer.enumerate(out)
}
val optOutFile = new OptDesc(
param = "o",
longParam = "outjson",
name = "filename",
desc = "Output json of peaks, mz, rt, masses, formulae etc.",
isReqd = false, hasArg = true)
val optUptoFormula = new OptDesc(
param = "f",
longParam = "max-elem-counts",
name = "formula",
desc = s"Max atoms to enumerate, defaults to $defaultMaxFormula",
isReqd = false, hasArg = true)
val optMaxMass = new OptDesc(
param = "m",
longParam = "max-mass",
name = "Da",
desc = s"Upper bound on molecule's mass, defaults to $defaultMaxMass",
isReqd = false, hasArg = true)
val optExcludeHalogens = new OptDesc(
param = "x",
longParam = "exclude-halogens",
name = "",
desc = "By default CHNOPS+ClBrFI are enumerated over. If flag set, second set is excluded.",
isReqd = false, hasArg = false)
}
class EnumChemFormulae(maxElems: Map[Atom, Int] = EnumChemFormulae.defaultMaxElementCounts,
maxMass: MonoIsotopicMass = new MonoIsotopicMass(EnumChemFormulae.defaultMaxMass)) {
type ChemicalFormula = Map[Atom, Int]
// need an instance to be able to build the chemical formula string
val m2f = new MassToFormula
// only enumerate stable chemical formulae
val stableFormulae = new StableChemicalFormulae
def enumerate(out: PrintWriter) = {
// print header
out.println(outformat(None))
// print enumeration
for (c <- 1 to maxElems(C);
h <- 1 to maxElems(H);
n <- 0 to maxElems(N);
o <- 0 to maxElems(O);
p <- 0 to maxElems(P);
s <- 0 to maxElems(S);
cl <- 0 to maxElems(Cl);
f <- 0 to maxElems(F);
b <- 0 to maxElems(Br);
i <- 0 to maxElems(I)
) {
val formula: ChemicalFormula = Map(C->c, H->h, N->n, O->o, P->p, S->s, Cl->cl, F->f, Br->b, I->i)
val isStableChemical = stableFormulae.isValid(formula)
if (isStableChemical) {
val mass = computeMassFromAtomicFormula(formula)
if (MonoIsotopicMass.isLt(mass, maxMass)) {
val formulaStr = m2f.buildChemFormulaA(formula)
out.println(outformat(Some((mass, formulaStr))))
}
}
}
// shall we close the file after running for a 100 years? Maybe, maybe not. :p
out close
}
def outformat(massFormula: Option[(MonoIsotopicMass, String)]): String = {
val row: List[String] = massFormula match {
case None => {
// output header
List("mass", "formula")
}
case Some(mF) => {
val (mass, formula) = mF
List(mass.toString(6), formula)
}
}
row.mkString("\\t")
}
}
| 20n/act | reachables/src/main/scala/act/shared/EnumChemFormulae.scala | Scala | gpl-3.0 | 5,959 |
package com.hansod1.demo
import org.apache.spark.ml.feature._
import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}
import org.apache.spark.sql.functions.udf
object SparkPCADemo {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder().appName("SparkPCADemo").getOrCreate()
import spark.implicits._
val fields = Array(
StructField("score",DataTypes.StringType),
StructField("uri", DataTypes.StringType),
StructField("title", DataTypes.StringType),
StructField("source_html",DataTypes.StringType)
)
val schema = StructType(fields)
val df = spark.read.schema(schema)
.option("delimiter","\\t")
.csv(spark.conf.get("spark.input.dir"))
.repartition(160)
val tokenizer = new Tokenizer()
.setInputCol("source_html")
.setOutputCol("source_words")
val tokenizedDF = tokenizer.transform(df)
val bigram = new NGram().setInputCol("source_words").setOutputCol("bigrams").setN(2)
val bigramDF = bigram.transform(tokenizedDF)
val trigram = new NGram().setInputCol("source_words").setOutputCol("trigrams").setN(3)
val concat_array = udf((c1 : Seq[String], c2 : Seq[String], c3 : Seq[String]) => {
c1 ++ c2 ++ c3
})
val trigramDF = trigram.transform(bigramDF).withColumn("source_words_ngrams",concat_array($"source_words",$"bigrams",$"trigrams"))
val cvModel = new CountVectorizer()
.setInputCol("source_words_ngrams")
.setOutputCol("count_features")
.setVocabSize(10000)
.setMinDF(2)
.fit(trigramDF)
spark.sparkContext.parallelize[String](cvModel.vocabulary).toDF("vocab").write.save(spark.conf.get("spark.vocab.output.dir"))
val countVectorizedDF = cvModel.transform(trigramDF)
val idf = new IDF()
.setInputCol("count_features")
.setOutputCol("idf_features")
.fit(countVectorizedDF)
val idfVectorizedDF = idf.transform(countVectorizedDF)
val pca = new PCA()
.setInputCol("idf_features")
.setOutputCol("pca_features")
.setK(1000)
.fit(idfVectorizedDF)
val pcaDF = pca.transform(idfVectorizedDF)
pcaDF.write.save(spark.conf.get("spark.output.dir"))
}
}
| hansod1/spark-pca-demo | src/main/scala/com/hansod1/demo/SparkPCADemo.scala | Scala | apache-2.0 | 2,312 |
import scala.io.Source
// To manually test the converter against an individual file
class ManualSuite extends ConverterSuite {
override val parseAsCompilationUnit: Boolean = true
def checkUrl(url: String): Unit = {
val code = Source.fromURL(url)("UTF-8").getLines().mkString("\\n")
check(code)
}
def check(code: String): Unit = {
test(code.lines.filter(_.nonEmpty).take(1).mkString) {
getConvertedMetaTree(code)
}
}
// For example github raw url
// checkUrl(
// "https://raw.githubusercontent.com/ornicar/lila/e5b897ada8f7212ed69de886fcbbc26ea52f3b82/modules/relation/src/main/RelationActor.scala")
// or code
// check("""
// |package object a {
// | println(1)
// |}
// """.stripMargin)
}
| xeno-by/paradise | tests/converters/src/test/scala/ManualSuite.scala | Scala | bsd-3-clause | 760 |
package com.shocktrade.common.forms
import com.shocktrade.common.forms.ResearchOptions.SortField
import scala.scalajs.js
/**
* Securities Research Options
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
class ResearchOptions(var betaMax: js.UndefOr[Double] = js.undefined,
var betaMin: js.UndefOr[Double] = js.undefined,
var changeMax: js.UndefOr[Double] = js.undefined,
var changeMin: js.UndefOr[Double] = js.undefined,
var priceMax: js.UndefOr[Double] = js.undefined,
var priceMin: js.UndefOr[Double] = js.undefined,
var spreadMax: js.UndefOr[Double] = js.undefined,
var spreadMin: js.UndefOr[Double] = js.undefined,
var volumeMax: js.UndefOr[Double] = js.undefined,
var volumeMin: js.UndefOr[Double] = js.undefined,
var avgVolumeMax: js.UndefOr[Double] = js.undefined,
var avgVolumeMin: js.UndefOr[Double] = js.undefined,
var sortFields: js.UndefOr[js.Array[SortField]] = js.undefined,
var sortBy: js.UndefOr[String] = js.undefined,
var reverse: js.UndefOr[Boolean] = js.undefined,
var maxResults: js.UndefOr[Int] = 25) extends js.Object
/**
* Research Options Companion
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
object ResearchOptions {
class SortField(val field: String, val direction: Int) extends js.Object
} | ldaniels528/shocktrade.js | app/shared/common/src/main/scala/com/shocktrade/common/forms/ResearchOptions.scala | Scala | apache-2.0 | 1,593 |
package org.template.recommendation
import org.apache.predictionio.controller.P2LAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap
import org.apache.predictionio.data.storage.Event
import org.apache.predictionio.data.storage.Storage
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import grizzled.slf4j.Logger
import scala.collection.mutable.PriorityQueue
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global
case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
lambda: Double,
seed: Option[Long]
) extends Params
class ALSModel(
val rank: Int,
val userFeatures: Map[Int, Array[Double]],
val productFeatures: Map[Int, (Item, Option[Array[Double]])],
val userStringIntMap: BiMap[String, Int],
val itemStringIntMap: BiMap[String, Int]
) extends Serializable {
@transient lazy val itemIntStringMap = itemStringIntMap.inverse
override def toString = {
s" rank: ${rank}" +
s" userFeatures: [${userFeatures.size}]" +
s"(${userFeatures.take(2).toList}...)" +
s" productFeatures: [${productFeatures.size}]" +
s"(${productFeatures.take(2).toList}...)" +
s" userStringIntMap: [${userStringIntMap.size}]" +
s"(${userStringIntMap.take(2).toString}...)]" +
s" itemStringIntMap: [${itemStringIntMap.size}]" +
s"(${itemStringIntMap.take(2).toString}...)]"
}
}
/**
* Use ALS to build item x feature matrix
*/
class ALSAlgorithm(val ap: ALSAlgorithmParams)
extends P2LAlgorithm[PreparedData, ALSModel, Query, PredictedResult] {
@transient lazy val logger = Logger[this.type]
def train(data: PreparedData): ALSModel = {
require(!data.viewEvents.take(1).isEmpty,
s"viewEvents in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.users.take(1).isEmpty,
s"users in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.items.take(1).isEmpty,
s"items in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
// create User and item's String ID to integer index BiMap
val userStringIntMap = BiMap.stringInt(data.users.keys)
val itemStringIntMap = BiMap.stringInt(data.items.keys)
val mllibRatings = data.viewEvents
.map { r =>
// Convert user and item String IDs to Int index for MLlib
val uindex = userStringIntMap.getOrElse(r.user, -1)
val iindex = itemStringIntMap.getOrElse(r.item, -1)
if (uindex == -1)
logger.info(s"Couldn't convert nonexistent user ID ${r.user}"
+ " to Int index.")
if (iindex == -1)
logger.info(s"Couldn't convert nonexistent item ID ${r.item}"
+ " to Int index.")
((uindex, iindex), 1)
}.filter { case ((u, i), v) =>
// keep events with valid user and item index
(u != -1) && (i != -1)
}
.reduceByKey(_ + _) // aggregate all view events of same user-item pair
.map { case ((u, i), v) =>
// MLlibRating requires integer index for user and item
MLlibRating(u, i, v)
}.cache()
// MLLib ALS cannot handle empty training data.
require(!mllibRatings.take(1).isEmpty,
s"mllibRatings cannot be empty." +
" Please check if your events contain valid user and item ID.")
// seed for MLlib ALS
val seed = ap.seed.getOrElse(System.nanoTime)
val m = ALS.trainImplicit(
ratings = mllibRatings,
rank = ap.rank,
iterations = ap.numIterations,
lambda = ap.lambda,
blocks = -1,
alpha = 1.0,
seed = seed)
val userFeatures = m.userFeatures.collectAsMap.toMap
// convert ID to Int index
val items = data.items.map { case (id, item) =>
(itemStringIntMap(id), item)
}
// join item with the trained productFeatures
val productFeatures = items.leftOuterJoin(m.productFeatures)
.collectAsMap.toMap
new ALSModel(
rank = m.rank,
userFeatures = userFeatures,
productFeatures = productFeatures,
userStringIntMap = userStringIntMap,
itemStringIntMap = itemStringIntMap
)
}
def predict(model: ALSModel, query: Query): PredictedResult = {
val userFeatures = model.userFeatures
val productFeatures = model.productFeatures
// convert whiteList's string ID to integer index
val whiteList: Option[Set[Int]] = query.whiteList.map( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val blackList: Set[String] = query.blackList.getOrElse(Set[String]())
// combine query's blackList
// into final blackList.
// convert seen Items list from String ID to interger Index
val finalBlackList: Set[Int] = blackList.map( x =>
model.itemStringIntMap.get(x)).flatten
val userFeature =
model.userStringIntMap.get(query.user).map { userIndex =>
userFeatures.get(userIndex)
}
// flatten Option[Option[Array[Double]]] to Option[Array[Double]]
.flatten
val topScores = if (userFeature.isDefined) {
// the user has feature vector
val uf = userFeature.get
val indexScores: Map[Int, Double] =
productFeatures.par // convert to parallel collection
.filter { case (i, (item, feature)) =>
feature.isDefined &&
isCandidateItem(
i = i,
item = item,
categories = query.categories,
whiteList = whiteList,
blackList = finalBlackList
)
}
.map { case (i, (item, feature)) =>
// NOTE: feature must be defined, so can call .get
val s = dotProduct(uf, feature.get)
// Can adjust score here
(i, s)
}
.filter(_._2 > 0) // only keep items with score > 0
.seq // convert back to sequential collection
val ord = Ordering.by[(Int, Double), Double](_._2).reverse
val topScores = getTopN(indexScores, query.num)(ord).toArray
topScores
} else {
// the user doesn't have feature vector.
// For example, new user is created after model is trained.
logger.info(s"No userFeature found for user ${query.user}.")
Array[(Int, Double)]()
}
val itemScores = topScores.map { case (i, s) =>
new ItemScore(
// convert item int index back to string ID
item = model.itemIntStringMap(i),
score = s
)
}
new PredictedResult(itemScores)
}
private
def getTopN[T](s: Iterable[T], n: Int)(implicit ord: Ordering[T]): Seq[T] = {
val q = PriorityQueue()
for (x <- s) {
if (q.size < n)
q.enqueue(x)
else {
// q is full
if (ord.compare(x, q.head) < 0) {
q.dequeue()
q.enqueue(x)
}
}
}
q.dequeueAll.toSeq.reverse
}
private
def dotProduct(v1: Array[Double], v2: Array[Double]): Double = {
val size = v1.size
var i = 0
var d: Double = 0
while (i < size) {
d += v1(i) * v2(i)
i += 1
}
d
}
private
def isCandidateItem(
i: Int,
item: Item,
categories: Option[Set[String]],
whiteList: Option[Set[Int]],
blackList: Set[Int]
): Boolean = {
// can add other custom filtering here
whiteList.map(_.contains(i)).getOrElse(true) &&
!blackList.contains(i) &&
// filter categories
categories.map { cat =>
item.categories.map { itemCat =>
// keep this item if has ovelap categories with the query
!(itemCat.toSet.intersect(cat).isEmpty)
}.getOrElse(false) // discard this item if it has no categories
}.getOrElse(true)
}
}
| alex9311/PredictionIO | examples/experimental/scala-parallel-recommendation-cat/src/main/scala/ALSAlgorithm.scala | Scala | apache-2.0 | 8,112 |
package com.kixeye.chassis.scala.transport
/*
* #%L
* Chassis Scala Transport Core
* %%
* Copyright (C) 2014 KIXEYE, Inc
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import org.springframework.stereotype.Component
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter
import javax.annotation.PostConstruct
import org.springframework.web.method.support.HandlerMethodReturnValueHandler
import com.kixeye.chassis.scala.transport.http.ScalaFutureReturnValueHandler
import com.kixeye.chassis.transport.websocket.WebSocketAction
import com.kixeye.chassis.scala.transport.websocket.responseconverter.ScalaFutureWebSocketResponseConverter
import com.kixeye.chassis.transport.util.SpringContextWrapper
import org.springframework.context.ApplicationContext
/**
* Registers a ScalaFutureReturnValueHandler with spring mvc framework
*/
@Component
class ScalaTransportInstaller @Autowired(required = false) (applicationContext: ApplicationContext) {
@PostConstruct
def initialize() : Unit = {
// Register a Scala Future HTTP response handler with SpringMVC
registerReturnValueHandler(applicationContext)
// in chassis, spring mvc context is registered as a child context of the main chassis context. the child
// context is exposed to the parent via the SpringContextWrapper. If found, register the handler with
// the RequestMappingHandlerAdapter of the child.
val childContext = getBeanOfTypeQuietly(classOf[SpringContextWrapper], applicationContext)
childContext.foreach(wrapper => registerReturnValueHandler(wrapper.getContext))
// Register a Scala Future converter with the web socket handler
WebSocketAction.addWebSocketResponseConverter( new ScalaFutureWebSocketResponseConverter() )
}
def registerReturnValueHandler(applicationContext: ApplicationContext) : Unit = {
registerReturnValueHandler(getBeanOfTypeQuietly(classOf[RequestMappingHandlerAdapter], applicationContext))
}
def registerReturnValueHandler(requestMappingHandlerAdapterOption: Option[RequestMappingHandlerAdapter]) : Unit = {
if(requestMappingHandlerAdapterOption.isEmpty){
return
}
val requestMappingHandlerAdapter = requestMappingHandlerAdapterOption.get
val handlers = new java.util.ArrayList[HandlerMethodReturnValueHandler](requestMappingHandlerAdapter.getReturnValueHandlers)
handlers.add(0, new ScalaFutureReturnValueHandler())
requestMappingHandlerAdapter.setReturnValueHandlers(handlers)
}
def getBeanOfTypeQuietly [T] (beanClass: Class[T], applicationContext: ApplicationContext): Option[T] = {
val t = applicationContext.getBeansOfType(beanClass)
if(t.isEmpty){
return Option.empty
}
Option(t.values().iterator().next())
}
}
| zzglitch/chassis | chassis-transport/chassis-scala-transport-core/src/main/scala/com/kixeye/chassis/scala/transport/ScalaTransportInstaller.scala | Scala | apache-2.0 | 3,354 |
package app
import java.lang.management.ManagementFactory
import java.util.concurrent.TimeUnit
import akka.actor.Status.Failure
import akka.actor._
import akka.io.IO
import akka.routing.RoundRobinPool
import app.Configs._
import app.adapters.database._
import app.adapters.database.support.{DbConfig, DbInitializer, DbProfile, TypesafeDbConfig}
import app.server.ServerSupervisor
import app.services.TaskService
import app.utils.ShutdownHook
import com.codahale.metrics.health.jvm.ThreadDeadlockHealthCheck
import com.codahale.metrics.health.{HealthCheck, HealthCheckRegistry}
import com.codahale.metrics.jvm.{BufferPoolMetricSet, GarbageCollectorMetricSet, MemoryUsageGaugeSet, ThreadStatesGaugeSet}
import com.codahale.metrics.{JmxReporter, MetricRegistry}
import com.typesafe.config.Config
import spray.can.Http
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.slick.driver.{JdbcProfile, PostgresDriver}
object Main extends App with ShutdownHook{
implicit val system = ActorSystem("main-system")
log.info("Actor system $system is up and running")
private implicit val configuration: Config = Configs.configuration
private val metricsRegistry = new MetricRegistry
private val healthCheckRegistry = new HealthCheckRegistry
private val dbProfile = createDbProfile
private val taskDao = new TaskDAO(dbProfile)
private val dbActor = system.actorOf(Props(new TaskService(taskDao){
override val maybeMetricsRegistry: Option[AnyRef] = Some(metricsRegistry)
override val maybeHealthCheckRegistry: Option[AnyRef] = Some(healthCheckRegistry)
}), "database-actor")
private val mainHandler = system.actorOf(
Props(new ServerSupervisor(metricsRegistry, dbActor))
.withRouter(RoundRobinPool(nrOfInstances = 10)), "main-http-actor"
)
addDeadLetterLogger(system)
initDatabase(dbProfile)
log.info("Postgres is up and running")
startReporters()
log.info("Metrics started")
IO(Http) ! Http.Bind(mainHandler, interface = Configs.interface, port = Configs.appPort)
private def startReporters() {
metricsRegistry.registerAll(new ThreadStatesGaugeSet())
metricsRegistry.registerAll(new GarbageCollectorMetricSet())
metricsRegistry.registerAll(new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer()))
metricsRegistry.registerAll(new MemoryUsageGaugeSet())
val reporter = JmxReporter.forRegistry(metricsRegistry)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build()
reporter.start()
healthCheckRegistry.register("deadlocks", new ThreadDeadlockHealthCheck())
logHealth(healthCheckRegistry)
}
def logHealth(registry: HealthCheckRegistry)(implicit system :ActorSystem, config: Config): Unit = {
import system.dispatcher
val refreshInterval = config.getLong("app.healthchecks.reporting.interval.seconds")
system.scheduler.schedule(FiniteDuration(refreshInterval, "seconds"), FiniteDuration(refreshInterval, "seconds")){
registry.runHealthChecks().asScala.foreach{
case (name: String, result: HealthCheck.Result) => log.info(s"The healthCheck $name reported status was $result")
}
}
}
private def initDatabase(dbProfile: DbProfile): Unit = {
new DbInitializer(dbProfile).initialize
}
private def createDbProfile(implicit configuration: Config): DbProfile = new DbProfile {
override val profile: JdbcProfile = PostgresDriver
override val dbConfig: DbConfig = new TypesafeDbConfig {
override def conf: Config = configuration
}
}
private def addDeadLetterLogger(system: ActorSystem): Boolean = {
val loggingActor = system.actorOf(Props(new Actor with ActorLogging {
override def receive: Receive = {
case DeadLetter(Failure(ex), sender, recipient) => log.error(ex, s"Failure in the communication between $sender and $recipient")
}
}))
system.eventStream.subscribe(loggingActor, classOf[DeadLetter])
}
} | mericano1/spray-akka-slick-postgres | src/main/scala/app/Main.scala | Scala | mit | 3,988 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io._
import java.nio.file.Files
import java.util.concurrent._
import kafka.admin.AdminUtils
import kafka.common.{KafkaException, KafkaStorageException}
import kafka.server.checkpoints.OffsetCheckpointFile
import kafka.server.{BrokerState, RecoveringFromUncleanShutdown, _}
import kafka.utils._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.utils.Time
import scala.collection.JavaConverters._
import scala.collection._
/**
* The entry point to the kafka log management subsystem. The log manager is responsible for log creation, retrieval, and cleaning.
* All read and write operations are delegated to the individual log instances.
*
* The log manager maintains logs in one or more directories. New logs are created in the data directory
* with the fewest logs. No attempt is made to move partitions after the fact or balance based on
* size or I/O rate.
*
* A background thread handles log retention by periodically truncating excess log segments.
*/
@threadsafe
class LogManager(val logDirs: Array[File],
val topicConfigs: Map[String, LogConfig], // note that this doesn't get updated after creation
val defaultConfig: LogConfig,
val cleanerConfig: CleanerConfig,
ioThreads: Int,
val flushCheckMs: Long,
val flushRecoveryOffsetCheckpointMs: Long,
val flushStartOffsetCheckpointMs: Long,
val retentionCheckMs: Long,
val maxPidExpirationMs: Int,
scheduler: Scheduler,
val brokerState: BrokerState,
brokerTopicStats: BrokerTopicStats,
time: Time) extends Logging {
val RecoveryPointCheckpointFile = "recovery-point-offset-checkpoint"
val LogStartOffsetCheckpointFile = "log-start-offset-checkpoint"
val LockFile = ".lock"
val InitialTaskDelayMs = 30*1000
private val logCreationOrDeletionLock = new Object
private val logs = new Pool[TopicPartition, Log]()
private val logsToBeDeleted = new LinkedBlockingQueue[Log]()
createAndValidateLogDirs(logDirs)
private val dirLocks = lockLogDirs(logDirs)
private val recoveryPointCheckpoints = logDirs.map(dir => (dir, new OffsetCheckpointFile(new File(dir, RecoveryPointCheckpointFile)))).toMap
private val logStartOffsetCheckpoints = logDirs.map(dir => (dir, new OffsetCheckpointFile(new File(dir, LogStartOffsetCheckpointFile)))).toMap
loadLogs()
// public, so we can access this from kafka.admin.DeleteTopicTest
val cleaner: LogCleaner =
if(cleanerConfig.enableCleaner)
new LogCleaner(cleanerConfig, logDirs, logs, time = time)
else
null
/**
* Create and check validity of the given directories, specifically:
* <ol>
* <li> Ensure that there are no duplicates in the directory list
* <li> Create each directory if it doesn't exist
* <li> Check that each path is a readable directory
* </ol>
*/
private def createAndValidateLogDirs(dirs: Seq[File]) {
if(dirs.map(_.getCanonicalPath).toSet.size < dirs.size)
throw new KafkaException("Duplicate log directory found: " + logDirs.mkString(", "))
for(dir <- dirs) {
if(!dir.exists) {
info("Log directory '" + dir.getAbsolutePath + "' not found, creating it.")
val created = dir.mkdirs()
if(!created)
throw new KafkaException("Failed to create data directory " + dir.getAbsolutePath)
}
if(!dir.isDirectory || !dir.canRead)
throw new KafkaException(dir.getAbsolutePath + " is not a readable log directory.")
}
}
/**
* Lock all the given directories
*/
private def lockLogDirs(dirs: Seq[File]): Seq[FileLock] = {
dirs.map { dir =>
val lock = new FileLock(new File(dir, LockFile))
if(!lock.tryLock())
throw new KafkaException("Failed to acquire lock on file .lock in " + lock.file.getParentFile.getAbsolutePath +
". A Kafka instance in another process or thread is using this directory.")
lock
}
}
/**
* Recover and load all logs in the given data directories
*/
private def loadLogs(): Unit = {
info("Loading logs.")
val startMs = time.milliseconds
val threadPools = mutable.ArrayBuffer.empty[ExecutorService]
val jobs = mutable.Map.empty[File, Seq[Future[_]]]
for (dir <- this.logDirs) {
val pool = Executors.newFixedThreadPool(ioThreads)
threadPools.append(pool)
val cleanShutdownFile = new File(dir, Log.CleanShutdownFile)
if (cleanShutdownFile.exists) {
debug(
"Found clean shutdown file. " +
"Skipping recovery for all logs in data directory: " +
dir.getAbsolutePath)
} else {
// log recovery itself is being performed by `Log` class during initialization
brokerState.newState(RecoveringFromUncleanShutdown)
}
var recoveryPoints = Map[TopicPartition, Long]()
try {
recoveryPoints = this.recoveryPointCheckpoints(dir).read
} catch {
case e: Exception =>
warn("Error occurred while reading recovery-point-offset-checkpoint file of directory " + dir, e)
warn("Resetting the recovery checkpoint to 0")
}
var logStartOffsets = Map[TopicPartition, Long]()
try {
logStartOffsets = this.logStartOffsetCheckpoints(dir).read
} catch {
case e: Exception =>
warn("Error occurred while reading log-start-offset-checkpoint file of directory " + dir, e)
}
val jobsForDir = for {
dirContent <- Option(dir.listFiles).toList
logDir <- dirContent if logDir.isDirectory
} yield {
CoreUtils.runnable {
debug("Loading log '" + logDir.getName + "'")
val topicPartition = Log.parseTopicPartitionName(logDir)
val config = topicConfigs.getOrElse(topicPartition.topic, defaultConfig)
val logRecoveryPoint = recoveryPoints.getOrElse(topicPartition, 0L)
val logStartOffset = logStartOffsets.getOrElse(topicPartition, 0L)
val current = Log(
dir = logDir,
config = config,
logStartOffset = logStartOffset,
recoveryPoint = logRecoveryPoint,
maxProducerIdExpirationMs = maxPidExpirationMs,
scheduler = scheduler,
time = time,
brokerTopicStats = brokerTopicStats)
if (logDir.getName.endsWith(Log.DeleteDirSuffix)) {
this.logsToBeDeleted.add(current)
} else {
val previous = this.logs.put(topicPartition, current)
if (previous != null) {
throw new IllegalArgumentException(
"Duplicate log directories found: %s, %s!".format(
current.dir.getAbsolutePath, previous.dir.getAbsolutePath))
}
}
}
}
jobs(cleanShutdownFile) = jobsForDir.map(pool.submit)
}
try {
for ((cleanShutdownFile, dirJobs) <- jobs) {
dirJobs.foreach(_.get)
cleanShutdownFile.delete()
}
} catch {
case e: ExecutionException => {
error("There was an error in one of the threads during logs loading: " + e.getCause)
throw e.getCause
}
} finally {
threadPools.foreach(_.shutdown())
}
info(s"Logs loading complete in ${time.milliseconds - startMs} ms.")
}
/**
* Start the background threads to flush logs and do log cleanup
*/
def startup() {
/* Schedule the cleanup task to delete old logs */
if(scheduler != null) {
info("Starting log cleanup with a period of %d ms.".format(retentionCheckMs))
scheduler.schedule("kafka-log-retention",
cleanupLogs _,
delay = InitialTaskDelayMs,
period = retentionCheckMs,
TimeUnit.MILLISECONDS)
info("Starting log flusher with a default period of %d ms.".format(flushCheckMs))
scheduler.schedule("kafka-log-flusher",
flushDirtyLogs _,
delay = InitialTaskDelayMs,
period = flushCheckMs,
TimeUnit.MILLISECONDS)
scheduler.schedule("kafka-recovery-point-checkpoint",
checkpointRecoveryPointOffsets _,
delay = InitialTaskDelayMs,
period = flushRecoveryOffsetCheckpointMs,
TimeUnit.MILLISECONDS)
scheduler.schedule("kafka-log-start-offset-checkpoint",
checkpointLogStartOffsets _,
delay = InitialTaskDelayMs,
period = flushStartOffsetCheckpointMs,
TimeUnit.MILLISECONDS)
scheduler.schedule("kafka-delete-logs",
deleteLogs _,
delay = InitialTaskDelayMs,
period = defaultConfig.fileDeleteDelayMs,
TimeUnit.MILLISECONDS)
}
if(cleanerConfig.enableCleaner)
cleaner.startup()
}
/**
* Close all the logs
*/
def shutdown() {
info("Shutting down.")
val threadPools = mutable.ArrayBuffer.empty[ExecutorService]
val jobs = mutable.Map.empty[File, Seq[Future[_]]]
// stop the cleaner first
if (cleaner != null) {
CoreUtils.swallow(cleaner.shutdown())
}
// close logs in each dir
for (dir <- this.logDirs) {
debug("Flushing and closing logs at " + dir)
val pool = Executors.newFixedThreadPool(ioThreads)
threadPools.append(pool)
val logsInDir = logsByDir.getOrElse(dir.toString, Map()).values
val jobsForDir = logsInDir map { log =>
CoreUtils.runnable {
// flush the log to ensure latest possible recovery point
log.flush()
log.close()
}
}
jobs(dir) = jobsForDir.map(pool.submit).toSeq
}
try {
for ((dir, dirJobs) <- jobs) {
dirJobs.foreach(_.get)
// update the last flush point
debug("Updating recovery points at " + dir)
checkpointLogRecoveryOffsetsInDir(dir)
debug("Updating log start offsets at " + dir)
checkpointLogStartOffsetsInDir(dir)
// mark that the shutdown was clean by creating marker file
debug("Writing clean shutdown marker at " + dir)
CoreUtils.swallow(Files.createFile(new File(dir, Log.CleanShutdownFile).toPath))
}
} catch {
case e: ExecutionException => {
error("There was an error in one of the threads during LogManager shutdown: " + e.getCause)
throw e.getCause
}
} finally {
threadPools.foreach(_.shutdown())
// regardless of whether the close succeeded, we need to unlock the data directories
dirLocks.foreach(_.destroy())
}
info("Shutdown complete.")
}
/**
* Truncate the partition logs to the specified offsets and checkpoint the recovery point to this offset
*
* @param partitionOffsets Partition logs that need to be truncated
*/
def truncateTo(partitionOffsets: Map[TopicPartition, Long]) {
for ((topicPartition, truncateOffset) <- partitionOffsets) {
val log = logs.get(topicPartition)
// If the log does not exist, skip it
if (log != null) {
//May need to abort and pause the cleaning of the log, and resume after truncation is done.
val needToStopCleaner = cleaner != null && truncateOffset < log.activeSegment.baseOffset
if (needToStopCleaner)
cleaner.abortAndPauseCleaning(topicPartition)
try {
log.truncateTo(truncateOffset)
if (needToStopCleaner)
cleaner.maybeTruncateCheckpoint(log.dir.getParentFile, topicPartition, log.activeSegment.baseOffset)
} finally {
if (needToStopCleaner)
cleaner.resumeCleaning(topicPartition)
}
}
}
checkpointRecoveryPointOffsets()
}
/**
* Delete all data in a partition and start the log at the new offset
* @param newOffset The new offset to start the log with
*/
def truncateFullyAndStartAt(topicPartition: TopicPartition, newOffset: Long) {
val log = logs.get(topicPartition)
// If the log does not exist, skip it
if (log != null) {
//Abort and pause the cleaning of the log, and resume after truncation is done.
if (cleaner != null)
cleaner.abortAndPauseCleaning(topicPartition)
log.truncateFullyAndStartAt(newOffset)
if (cleaner != null) {
cleaner.maybeTruncateCheckpoint(log.dir.getParentFile, topicPartition, log.activeSegment.baseOffset)
cleaner.resumeCleaning(topicPartition)
}
}
checkpointRecoveryPointOffsets()
}
/**
* Write out the current recovery point for all logs to a text file in the log directory
* to avoid recovering the whole log on startup.
*/
def checkpointRecoveryPointOffsets() {
this.logDirs.foreach(checkpointLogRecoveryOffsetsInDir)
}
/**
* Write out the current log start offset for all logs to a text file in the log directory
* to avoid exposing data that have been deleted by DeleteRecordsRequest
*/
def checkpointLogStartOffsets() {
this.logDirs.foreach(checkpointLogStartOffsetsInDir)
}
/**
* Make a checkpoint for all logs in provided directory.
*/
private def checkpointLogRecoveryOffsetsInDir(dir: File): Unit = {
val recoveryPoints = this.logsByDir.get(dir.toString)
if (recoveryPoints.isDefined) {
this.recoveryPointCheckpoints(dir).write(recoveryPoints.get.mapValues(_.recoveryPoint))
}
}
/**
* Checkpoint log start offset for all logs in provided directory.
*/
private def checkpointLogStartOffsetsInDir(dir: File): Unit = {
val logs = this.logsByDir.get(dir.toString)
if (logs.isDefined) {
this.logStartOffsetCheckpoints(dir).write(
logs.get.filter{case (tp, log) => log.logStartOffset > log.logSegments.head.baseOffset}.mapValues(_.logStartOffset))
}
}
/**
* Get the log if it exists, otherwise return None
*/
def getLog(topicPartition: TopicPartition): Option[Log] = Option(logs.get(topicPartition))
/**
* Create a log for the given topic and the given partition
* If the log already exists, just return a copy of the existing log
*/
def createLog(topicPartition: TopicPartition, config: LogConfig): Log = {
logCreationOrDeletionLock synchronized {
// create the log if it has not already been created in another thread
getLog(topicPartition).getOrElse {
val dataDir = nextLogDir()
val dir = new File(dataDir, topicPartition.topic + "-" + topicPartition.partition)
Files.createDirectories(dir.toPath)
val log = Log(
dir = dir,
config = config,
logStartOffset = 0L,
recoveryPoint = 0L,
maxProducerIdExpirationMs = maxPidExpirationMs,
scheduler = scheduler,
time = time,
brokerTopicStats = brokerTopicStats)
logs.put(topicPartition, log)
info("Created log for partition [%s,%d] in %s with properties {%s}."
.format(topicPartition.topic,
topicPartition.partition,
dataDir.getAbsolutePath,
config.originals.asScala.mkString(", ")))
log
}
}
}
/**
* Delete logs marked for deletion.
*/
private def deleteLogs(): Unit = {
try {
var failed = 0
while (!logsToBeDeleted.isEmpty && failed < logsToBeDeleted.size()) {
val removedLog = logsToBeDeleted.take()
if (removedLog != null) {
try {
removedLog.delete()
info(s"Deleted log for partition ${removedLog.topicPartition} in ${removedLog.dir.getAbsolutePath}.")
} catch {
case e: Throwable =>
error(s"Exception in deleting $removedLog. Moving it to the end of the queue.", e)
failed = failed + 1
logsToBeDeleted.put(removedLog)
}
}
}
} catch {
case e: Throwable =>
error(s"Exception in kafka-delete-logs thread.", e)
}
}
/**
* Rename the directory of the given topic-partition "logdir" as "logdir.uuid.delete" and
* add it in the queue for deletion.
* @param topicPartition TopicPartition that needs to be deleted
*/
def asyncDelete(topicPartition: TopicPartition) = {
val removedLog: Log = logCreationOrDeletionLock synchronized {
logs.remove(topicPartition)
}
if (removedLog != null) {
//We need to wait until there is no more cleaning task on the log to be deleted before actually deleting it.
if (cleaner != null) {
cleaner.abortCleaning(topicPartition)
cleaner.updateCheckpoints(removedLog.dir.getParentFile)
}
val dirName = Log.logDeleteDirName(removedLog.name)
removedLog.close()
val renamedDir = new File(removedLog.dir.getParent, dirName)
val renameSuccessful = removedLog.dir.renameTo(renamedDir)
if (renameSuccessful) {
checkpointLogStartOffsetsInDir(removedLog.dir.getParentFile)
removedLog.dir = renamedDir
// change the file pointers for log and index file
for (logSegment <- removedLog.logSegments) {
logSegment.log.setFile(new File(renamedDir, logSegment.log.file.getName))
logSegment.index.file = new File(renamedDir, logSegment.index.file.getName)
}
logsToBeDeleted.add(removedLog)
removedLog.removeLogMetrics()
info(s"Log for partition ${removedLog.topicPartition} is renamed to ${removedLog.dir.getAbsolutePath} and is scheduled for deletion")
} else {
throw new KafkaStorageException("Failed to rename log directory from " + removedLog.dir.getAbsolutePath + " to " + renamedDir.getAbsolutePath)
}
}
}
/**
* Choose the next directory in which to create a log. Currently this is done
* by calculating the number of partitions in each directory and then choosing the
* data directory with the fewest partitions.
*/
private def nextLogDir(): File = {
if(logDirs.size == 1) {
logDirs(0)
} else {
// count the number of logs in each parent directory (including 0 for empty directories
val logCounts = allLogs.groupBy(_.dir.getParent).mapValues(_.size)
val zeros = logDirs.map(dir => (dir.getPath, 0)).toMap
val dirCounts = (zeros ++ logCounts).toBuffer
// choose the directory with the least logs in it
val leastLoaded = dirCounts.sortBy(_._2).head
new File(leastLoaded._1)
}
}
/**
* Delete any eligible logs. Return the number of segments deleted.
* Only consider logs that are not compacted.
*/
def cleanupLogs() {
debug("Beginning log cleanup...")
var total = 0
val startMs = time.milliseconds
for(log <- allLogs; if !log.config.compact) {
debug("Garbage collecting '" + log.name + "'")
total += log.deleteOldSegments()
}
debug("Log cleanup completed. " + total + " files deleted in " +
(time.milliseconds - startMs) / 1000 + " seconds")
}
/**
* Get all the partition logs
*/
def allLogs(): Iterable[Log] = logs.values
/**
* Get a map of TopicPartition => Log
*/
def logsByTopicPartition: Map[TopicPartition, Log] = logs.toMap
/**
* Map of log dir to logs by topic and partitions in that dir
*/
private def logsByDir = {
this.logsByTopicPartition.groupBy {
case (_, log) => log.dir.getParent
}
}
/**
* Flush any log which has exceeded its flush interval and has unwritten messages.
*/
private def flushDirtyLogs() = {
debug("Checking for dirty logs to flush...")
for ((topicPartition, log) <- logs) {
try {
val timeSinceLastFlush = time.milliseconds - log.lastFlushTime
debug("Checking if flush is needed on " + topicPartition.topic + " flush interval " + log.config.flushMs +
" last flushed " + log.lastFlushTime + " time since last flush: " + timeSinceLastFlush)
if(timeSinceLastFlush >= log.config.flushMs)
log.flush
} catch {
case e: Throwable =>
error("Error flushing topic " + topicPartition.topic, e)
}
}
}
}
object LogManager {
def apply(config: KafkaConfig,
zkUtils: ZkUtils,
brokerState: BrokerState,
kafkaScheduler: KafkaScheduler,
time: Time,
brokerTopicStats: BrokerTopicStats): LogManager = {
val defaultProps = KafkaServer.copyKafkaConfigToLog(config)
val defaultLogConfig = LogConfig(defaultProps)
val topicConfigs = AdminUtils.fetchAllTopicConfigs(zkUtils).map { case (topic, configs) =>
topic -> LogConfig.fromProps(defaultProps, configs)
}
// read the log configurations from zookeeper
val cleanerConfig = CleanerConfig(numThreads = config.logCleanerThreads,
dedupeBufferSize = config.logCleanerDedupeBufferSize,
dedupeBufferLoadFactor = config.logCleanerDedupeBufferLoadFactor,
ioBufferSize = config.logCleanerIoBufferSize,
maxMessageSize = config.messageMaxBytes,
maxIoBytesPerSecond = config.logCleanerIoMaxBytesPerSecond,
backOffMs = config.logCleanerBackoffMs,
enableCleaner = config.logCleanerEnable)
new LogManager(logDirs = config.logDirs.map(new File(_)).toArray,
topicConfigs = topicConfigs,
defaultConfig = defaultLogConfig,
cleanerConfig = cleanerConfig,
ioThreads = config.numRecoveryThreadsPerDataDir,
flushCheckMs = config.logFlushSchedulerIntervalMs,
flushRecoveryOffsetCheckpointMs = config.logFlushOffsetCheckpointIntervalMs,
flushStartOffsetCheckpointMs = config.logFlushStartOffsetCheckpointIntervalMs,
retentionCheckMs = config.logCleanupIntervalMs,
maxPidExpirationMs = config.transactionIdExpirationMs,
scheduler = kafkaScheduler,
brokerState = brokerState,
time = time,
brokerTopicStats = brokerTopicStats)
}
}
| airbnb/kafka | core/src/main/scala/kafka/log/LogManager.scala | Scala | apache-2.0 | 23,003 |
package com.iheart.sqs
import java.util.regex.Pattern
object UserAgent {
val bot = Pattern.compile("(ads|google|bing|msn|yandex|baidu|ro|career|)bot")
val spider = Pattern.compile("(baidu|jike|symantec)spider")
val scanner = Pattern.compile("scanner")
val crawler = Pattern.compile("(web)crawler")
val ipad = Pattern.compile("ipad")
val iphone = Pattern.compile("ip(hone|od)")
val androidPhone = Pattern.compile("android.*(mobile|mini)")
val androidTablet = Pattern.compile("android")
val operaMobile = Pattern.compile("Opera Mobi")
val msPhone = Pattern.compile("IEMobile")
val gingerBread = Pattern.compile("GT-.*Build/GINGERBREAD")
def isBot(str: String) = bot.matcher(str).find() || spider.matcher(str).find() || scanner.matcher(str).find() || crawler.matcher(str).find()
def isIpad(str: String) = ipad.matcher(str).find()
def isiPhone(str: String) = iphone.matcher(str).find()
def isAndroidPhone(str: String) = androidPhone.matcher(str).find() || operaMobile.matcher(str).find() || gingerBread.matcher(str).find()
def isAndroidTablet(str: String) = androidTablet.matcher(str).find()
def isMSPhone(str: String) = msPhone.matcher(str).find()
}
| iheartradio/fastly-sqs | src/main/scala/com/iheart/sqs/UserAgent.scala | Scala | apache-2.0 | 1,194 |
#!/usr/bin/env scala-script
println("hello")
args foreach { println }
| rvanider/scala-script | sample.scala | Scala | mit | 72 |
package hr.element
package object cmrfs
extends scala.collection.convert.DecorateAsScala {
type Properties = java.util.Properties
val Path = scalax.file.Path
type Path = scalax.file.Path
val Codec = scalax.io.Codec
val Resource = scalax.io.Resource
type StrictLogging = com.typesafe.scalalogging.StrictLogging
type VectorBuilder[T] = scala.collection.immutable.VectorBuilder[T]
object NumberFormat {
def getIntegerInstance = java.text.NumberFormat.getIntegerInstance
}
}
| melezov/cmrfs | src/main/scala/hr/element/cmrfs/package.scala | Scala | bsd-3-clause | 503 |
package org.jetbrains.plugins.scala.lang.formatting.settings
import com.intellij.application.options._
import com.intellij.psi.codeStyle.CodeStyleSettings
import org.jetbrains.plugins.scala.ScalaFileType
import org.jetbrains.plugins.scala.lang.rearranger.ScalaArrangementPanel
/**
* User: Alefas
* Date: 23.09.11
*/
class ScalaTabbedCodeStylePanel(currentSettings: CodeStyleSettings, settings: CodeStyleSettings)
extends TabbedLanguageCodeStylePanel(ScalaFileType.SCALA_LANGUAGE, currentSettings, settings) {
protected override def initTabs(settings: CodeStyleSettings) {
super.initTabs(settings)
addTab(new ScalaDocFormattingPanel(settings))
addTab(new ImportsPanel(settings))
addTab(new MultiLineStringCodeStylePanel(settings))
addTab(new TypeAnnotationsPanel(settings))
addTab(new ScalaArrangementPanel(settings))
addTab(new OtherCodeStylePanel(settings))
}
} | double-y/translation-idea-plugin | src/org/jetbrains/plugins/scala/lang/formatting/settings/ScalaTabbedCodeStylePanel.scala | Scala | apache-2.0 | 901 |
package com.basrikahveci
package cardgame.domain
import cardgame.messaging.Response
import cardgame.messaging.response.{GamePlayNotification, StartGameNotification}
import actors.Actor
import collection.mutable.ArrayBuffer
import java.util.concurrent.{TimeUnit, Executors}
case class SendMessage(response: Response)
case object LeaveGameNotification
object Bot {
val Executor = Executors.newScheduledThreadPool(4)
}
class Bot(_game: Game) extends Player with Actor {
start
def act {
loop {
react {
case SendMessage(response) =>
_sendMessage(response)
case LeaveGameNotification =>
_leaveGame
}
}
}
var closedCards = scala.collection.mutable.ArrayBuffer[Int]()
def id = -1L
val identity = new UserIdentity(id, "Bot", Playing.ordinal)
var game: Option[Game] = Some(_game)
def points = 0
def setStatus(status: UserStatus) {}
def oneMoreWin {}
def oneMoreLose {}
def oneMoreLeave {}
def addSessionPoints(pointsToAdd: Int) {}
def join(game: Game) {}
def leaveGame = this ! LeaveGameNotification
private def _leaveGame = game = None
def sendMessage(response: Response) = this ! SendMessage(response)
var rememberedCards = ArrayBuffer[Int]()
var previousCard: Option[Int] = None
var cards = ArrayBuffer[String]()
var numberOfCardsToRemember: Int = 0
private def _sendMessage(response: Response) {
Bot.Executor.schedule(new Runnable {
def run() {
response match {
case startGameNotification: StartGameNotification =>
closedCards.clear()
startGameNotification.round.cards.zipWithIndex.foreach(eachCard => closedCards += eachCard._2)
closedCards = scala.util.Random shuffle closedCards
cards.clear()
rememberedCards.clear()
cards ++= startGameNotification.round.cards
numberOfCardsToRemember = cards.size / 5
if (startGameNotification.round.currentTurn == 1) {
play(closedCards(0))
}
case gamePlayNotification: GamePlayNotification =>
if (!gamePlayNotification.nextTurn.gameOver) {
gamePlayNotification.nextTurn.openCards.foreach {
openCard =>
closedCards -= openCard
rememberedCards -= openCard
}
gamePlayNotification.nextTurn.closeCards.foreach {
closeCard =>
closedCards += closeCard
if (!rememberedCards.contains(closeCard)) {
rememberedCards += closeCard
}
}
if (rememberedCards.size > numberOfCardsToRemember) {
rememberedCards = rememberedCards.drop(rememberedCards.size - numberOfCardsToRemember)
}
if (gamePlayNotification.nextTurn.nextTurnPosition == 1) {
previousCard match {
case Some(prev) =>
rememberedCards.find(cards(_) == cards(prev)) match {
case Some(matching) =>
game.foreach(_.playedBy(Bot.this, matching))
case None =>
game.foreach(_.playedBy(Bot.this, closedCards(0)))
}
previousCard = None
case None =>
val previousMatch =
for (prev1 <- rememberedCards; prev2 <- rememberedCards if (prev1 != prev2 && cards(prev1) == cards(prev2)))
yield prev1
if (!previousMatch.isEmpty) {
play(previousMatch.head)
} else {
play(closedCards(0))
}
}
}
} else {
game.foreach(_.setReady(Bot.this))
}
case _ =>
}
}
}, 1, TimeUnit.SECONDS)
}
private def play(card: Int) {
game.foreach(_.playedBy(this, card))
previousCard = Some(card)
}
override def exceptionHandler = {
case e: Exception =>
User.logger.error("[exception-in-bot] Game Id: " + (if (game.isDefined) game.get.id else "-"), e)
}
override def hashCode() = id.hashCode()
override def equals(other: Any) = other match {
case u: Player => id == u.id
case _ => false
}
}
| metanet/cardgame-server-scala | src/main/scala/com/basrikahveci/cardgame/domain/Bot.scala | Scala | mit | 4,400 |
package gv
package jleon3
import java.nio.file.{ Path β JPath, FileSystem β JFileSystem, FileAlreadyExistsException, NoSuchFileException }
import java.nio.channels.{
// ReadableByteChannel,
WritableByteChannel
// ,Channels
}
import language.{
postfixOps,
implicitConversions
// ,higherKinds,
// existentials
}
import util.{ Try, Success, Failure }
import concurrent.{ Future, ExecutionContext, Await }
import concurrent.duration._
//import akka.stream.scaladsl.{ Source, Flow, Sink }
import akka.stream.{ Materializer, ActorMaterializer }
import akka.actor.{ ActorSystem }
//import akka.http.scaladsl.server.{ Route, Directives, Directive0 }
//import akka.http.scaladsl.{ Http }
import com.typesafe.config.{ Config β TSConfig, ConfigFactory β TSConfigFactory }
import com.typesafe.scalalogging.{ StrictLogging }
//import isi.convertible._
//import isi.std.conversions._
import isi.{ ~~> }
object Main extends StrictLogging {
app β
final case class Main() {
logger info "Creating Actor System"
implicit val actorSystem: ActorSystem = ActorSystem("leon")
logger info "Creating Stream Materializer"
implicit val materializer: Materializer = ActorMaterializer()
logger info "Loading Application Config"
val config: TSConfig = TSConfigFactory.defaultApplication
logger info "Acquiring File System"
implicit val fileSystem = java.nio.file.FileSystems.getDefault
logger info "Acquiring Execution Context"
implicit val executionContext: ExecutionContext = materializer.executionContext
logger info "Creating Storage Factory"
val storageFactory = StorageFactory(config)
// logger info "Importing Storage Components"
// import storageFactory.{
// storageMap,
// storage
// }
//
private[this] def logFutureCompletion[T](msg: String): T β T =
result β { logger info msg; result; }
def shutdown(): Future[akka.actor.Terminated] = {
implicit val safeEc = ExecutionContext.Implicits.global
logger info "Terminating Actor System"
actorSystem
.terminate()
.map(logFutureCompletion(" [OK] Actor System Terminated"))(safeEc)
}
}
type FactorySourceOf[T] = { type t[a] = a β T }
trait Config extends Any {
type T <: TSConfig
def self: T
implicit def fileSystem: JFileSystem
def path: String β JPath = (self getString _) andThen (fileSystem getPath _)
}
object Config {
final type Aux[a <: TSConfig] = Config { type T = a }
implicit def toCore[T <: TSConfig](config: Config.Aux[T]): T = config.self
}
implicit def TSConfigConfigFactory(implicit fs: JFileSystem): TSConfig β Config.Aux[TSConfig] =
config β new Config {
final type T = TSConfig
val self: TSConfig = config
val fileSystem: JFileSystem = fs
}
trait Path[T] extends Any {
def self: T
def addExt(filename: String): T
}
implicit class JPathPath(val self: JPath) extends AnyVal with Path[JPath] {
def addExt(ext: String): JPath = self resolveSibling s"${self.getFileName}.$ext"
}
final case class StorageMap(base: JPath) {
final case class forItem(item: String) {
val storage: JPath = base resolveSibling item
val lock: JPath = storage addExt StorageMap.EXT_LOCK
val failure: JPath = storage addExt StorageMap.EXT_FAILURE
}
}
object StorageMap {
final val EXT_LOCK = "lock"
final val EXT_FAILURE = "failed"
}
trait StoragePackage {
sealed trait LockResult
object LockResult {
final case class Acquired(channel: WritableByteChannel) extends LockResult
final case class Locked(item: String, cause: FileAlreadyExistsException)
extends Exception(s"item is locked: $item", cause) with LockResult
final case class Failed(item: String)
extends Exception(s"item is failed: $item") with LockResult
}
}
trait Storage {
this: StoragePackage β
import java.nio.file.{ Files, StandardOpenOption β oopt }
type StorageMap <: app.StorageMap
val storageMap: StorageMap
private[this]type Map = storageMap.forItem
private[this] object withMap {
val nothing: Try[LockResult] = Failure(new Exception("nothing done yet"))
def pf[T](t: β T): Any ~~> T = { case _ β t }
}
private[this] final case class withMap(map: Map) {
import withMap.{ pf, nothing }
val getLock: Try[LockResult] = Try {
Files newByteChannel (map.lock, oopt.CREATE_NEW) close ()
} flatMap { _ β
Failure(new NoSuchFileException(map.lock.toString))
} recover {
case ex: FileAlreadyExistsException β LockResult.Locked(map.item, ex)
}
val getFailure: Try[LockResult] =
if (Files exists map.failure)
Success(LockResult.Failed(map.item))
else
Failure(new NoSuchFileException(map.failure.toString))
val getStorage: Try[LockResult] = Try {
LockResult.Acquired(Files newByteChannel map.storage)
}
val tryLock: Try[LockResult] =
nothing recoverWith pf(getFailure) recoverWith pf(getLock) recoverWith pf(getStorage)
}
final val tryLock: String β Try[LockResult] =
(storageMap forItem _) andThen withMap.apply andThen (_ tryLock)
}
trait StorageFactory {
type Config <: app.Config
val config: Config
// require(Option(config).isDefined)
println(config)
//
// final val storageMap: StorageMap = StorageMap(
// base = config.fileSystem.getPath("lol") // config path StorageFactory.BASE_PATH
// )
//
// final val storage: Storage = new Storage with StoragePackage {
// final type StorageMap = app.StorageMap
// final val storageMap: StorageMap = StorageFactory.this.storageMap
// }
}
object StorageFactory {
val BASE_PATH = "basePath"
def apply[T <: TSConfig, S: FactorySourceOf[Config.Aux[T]]#t](configSource: S): StorageFactory =
new {
final val config: Config.Aux[T] = configSource
} with StorageFactory {
final type Config = Config.Aux[T]
}
}
def main(args: Array[String]): Unit = {
import ExecutionContext.Implicits.global
val ready =
Future {
println("Hello this is leon")
} map { _ β
Main()
} flatMap { main β
main shutdown ()
} map { _ β
println("Byte byte leon")
}
Await.result(ready, 5 seconds)
}
}
| mouchtaris/jleon | src/main/scala-2.12/gv/jleon3/Main.scala | Scala | mit | 6,507 |
class i0 {
val (_* @ null) = null
}
| som-snytt/dotty | tests/fuzzy/CCE-3f31ba00cea30842028ab580e5be0270046c682d.scala | Scala | apache-2.0 | 38 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE
* file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file
* to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package kafka.api
import java.util.concurrent._
import java.util.{Collection, Collections}
import kafka.admin.AdminClient
import kafka.server.KafkaConfig
import kafka.utils.{CoreUtils, Logging, ShutdownableThread, TestUtils}
import org.apache.kafka.clients.consumer._
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.junit.Assert._
import org.junit.{After, Before, Ignore, Test}
import scala.collection.JavaConverters._
/**
* Integration tests for the new consumer that cover basic usage as well as server failures
*/
class ConsumerBounceTest extends IntegrationTestHarness with Logging {
val producerCount = 1
val consumerCount = 2
val serverCount = 3
val topic = "topic"
val part = 0
val tp = new TopicPartition(topic, part)
// Time to process commit and leave group requests in tests when brokers are available
val gracefulCloseTimeMs = 1000
val executor = Executors.newScheduledThreadPool(2)
// configure the servers and clients
this.serverConfig.setProperty(KafkaConfig.OffsetsTopicReplicationFactorProp, "3") // don't want to lose offset
this.serverConfig.setProperty(KafkaConfig.OffsetsTopicPartitionsProp, "1")
this.serverConfig.setProperty(KafkaConfig.GroupMinSessionTimeoutMsProp, "10") // set small enough session timeout
this.serverConfig.setProperty(KafkaConfig.GroupInitialRebalanceDelayMsProp, "0")
this.serverConfig.setProperty(KafkaConfig.UncleanLeaderElectionEnableProp, "true")
this.serverConfig.setProperty(KafkaConfig.AutoCreateTopicsEnableProp, "false")
this.producerConfig.setProperty(ProducerConfig.ACKS_CONFIG, "all")
this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "my-test")
this.consumerConfig.setProperty(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 4096.toString)
this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000")
this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "3000")
this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
override def generateConfigs = {
FixedPortTestUtils.createBrokerConfigs(serverCount, zkConnect, enableControlledShutdown = false)
.map(KafkaConfig.fromProps(_, serverConfig))
}
@Before
override def setUp() {
super.setUp()
// create the test topic with all the brokers as replicas
createTopic(topic, 1, serverCount)
}
@After
override def tearDown() {
try {
executor.shutdownNow()
// Wait for any active tasks to terminate to ensure consumer is not closed while being used from another thread
assertTrue("Executor did not terminate", executor.awaitTermination(5000, TimeUnit.MILLISECONDS))
} finally {
super.tearDown()
}
}
@Test
@Ignore // To be re-enabled once we can make it less flaky (KAFKA-4801)
def testConsumptionWithBrokerFailures() = consumeWithBrokerFailures(10)
/*
* 1. Produce a bunch of messages
* 2. Then consume the messages while killing and restarting brokers at random
*/
def consumeWithBrokerFailures(numIters: Int) {
val numRecords = 1000
sendRecords(numRecords)
this.producers.foreach(_.close)
var consumed = 0L
val consumer = this.consumers.head
consumer.subscribe(Collections.singletonList(topic))
val scheduler = new BounceBrokerScheduler(numIters)
scheduler.start()
while (scheduler.isRunning) {
val records = consumer.poll(100).asScala
assertEquals(Set(tp), consumer.assignment.asScala)
for (record <- records) {
assertEquals(consumed, record.offset())
consumed += 1
}
if (records.nonEmpty) {
consumer.commitSync()
assertEquals(consumer.position(tp), consumer.committed(tp).offset)
if (consumer.position(tp) == numRecords) {
consumer.seekToBeginning(Collections.emptyList())
consumed = 0
}
}
}
scheduler.shutdown()
}
@Test
def testSeekAndCommitWithBrokerFailures() = seekAndCommitWithBrokerFailures(5)
def seekAndCommitWithBrokerFailures(numIters: Int) {
val numRecords = 1000
sendRecords(numRecords)
this.producers.foreach(_.close)
val consumer = this.consumers.head
consumer.assign(Collections.singletonList(tp))
consumer.seek(tp, 0)
// wait until all the followers have synced the last HW with leader
TestUtils.waitUntilTrue(() => servers.forall(server =>
server.replicaManager.getReplica(tp).get.highWatermark.messageOffset == numRecords
), "Failed to update high watermark for followers after timeout")
val scheduler = new BounceBrokerScheduler(numIters)
scheduler.start()
while(scheduler.isRunning) {
val coin = TestUtils.random.nextInt(3)
if (coin == 0) {
info("Seeking to end of log")
consumer.seekToEnd(Collections.emptyList())
assertEquals(numRecords.toLong, consumer.position(tp))
} else if (coin == 1) {
val pos = TestUtils.random.nextInt(numRecords).toLong
info("Seeking to " + pos)
consumer.seek(tp, pos)
assertEquals(pos, consumer.position(tp))
} else if (coin == 2) {
info("Committing offset.")
consumer.commitSync()
assertEquals(consumer.position(tp), consumer.committed(tp).offset)
}
}
}
@Test
def testSubscribeWhenTopicUnavailable() {
val numRecords = 1000
val newtopic = "newtopic"
val consumer = this.consumers.head
consumer.subscribe(Collections.singleton(newtopic))
executor.schedule(new Runnable {
def run() = TestUtils.createTopic(zkClient, newtopic, serverCount, serverCount, servers)
}, 2, TimeUnit.SECONDS)
consumer.poll(0)
def sendRecords(numRecords: Int, topic: String) {
var remainingRecords = numRecords
val endTimeMs = System.currentTimeMillis + 20000
while (remainingRecords > 0 && System.currentTimeMillis < endTimeMs) {
val futures = (0 until remainingRecords).map { i =>
this.producers.head.send(new ProducerRecord(topic, part, i.toString.getBytes, i.toString.getBytes))
}
futures.map { future =>
try {
future.get
remainingRecords -= 1
} catch {
case _: Exception =>
}
}
}
assertEquals(0, remainingRecords)
}
sendRecords(numRecords, newtopic)
receiveRecords(consumer, numRecords, newtopic, 10000)
servers.foreach(server => killBroker(server.config.brokerId))
Thread.sleep(500)
restartDeadBrokers()
val future = executor.submit(new Runnable {
def run() = receiveRecords(consumer, numRecords, newtopic, 10000)
})
sendRecords(numRecords, newtopic)
future.get
}
@Test
def testClose() {
val numRecords = 10
sendRecords(numRecords)
checkCloseGoodPath(numRecords, "group1")
checkCloseWithCoordinatorFailure(numRecords, "group2", "group3")
checkCloseWithClusterFailure(numRecords, "group4", "group5")
}
/**
* Consumer is closed while cluster is healthy. Consumer should complete pending offset commits
* and leave group. New consumer instance should be able join group and start consuming from
* last committed offset.
*/
private def checkCloseGoodPath(numRecords: Int, groupId: String) {
val consumer = createConsumerAndReceive(groupId, false, numRecords)
val future = submitCloseAndValidate(consumer, Long.MaxValue, None, Some(gracefulCloseTimeMs))
future.get
checkClosedState(groupId, numRecords)
}
/**
* Consumer closed while coordinator is unavailable. Close of consumers using group
* management should complete after commit attempt even though commits fail due to rebalance.
* Close of consumers using manual assignment should complete with successful commits since a
* broker is available.
*/
private def checkCloseWithCoordinatorFailure(numRecords: Int, dynamicGroup: String, manualGroup: String) {
val consumer1 = createConsumerAndReceive(dynamicGroup, false, numRecords)
val consumer2 = createConsumerAndReceive(manualGroup, true, numRecords)
val adminClient = AdminClient.createSimplePlaintext(this.brokerList)
killBroker(adminClient.findCoordinator(dynamicGroup).id)
killBroker(adminClient.findCoordinator(manualGroup).id)
val future1 = submitCloseAndValidate(consumer1, Long.MaxValue, None, Some(gracefulCloseTimeMs))
val future2 = submitCloseAndValidate(consumer2, Long.MaxValue, None, Some(gracefulCloseTimeMs))
future1.get
future2.get
restartDeadBrokers()
checkClosedState(dynamicGroup, 0)
checkClosedState(manualGroup, numRecords)
adminClient.close()
}
/**
* Consumer is closed while all brokers are unavailable. Cannot rebalance or commit offsets since
* there is no coordinator, but close should timeout and return. If close is invoked with a very
* large timeout, close should timeout after request timeout.
*/
private def checkCloseWithClusterFailure(numRecords: Int, group1: String, group2: String) {
val consumer1 = createConsumerAndReceive(group1, false, numRecords)
val requestTimeout = 6000
this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "5000")
this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000")
this.consumerConfig.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout.toString)
val consumer2 = createConsumerAndReceive(group2, true, numRecords)
servers.foreach(server => killBroker(server.config.brokerId))
val closeTimeout = 2000
val future1 = submitCloseAndValidate(consumer1, closeTimeout, Some(closeTimeout), Some(closeTimeout))
val future2 = submitCloseAndValidate(consumer2, Long.MaxValue, Some(requestTimeout), Some(requestTimeout))
future1.get
future2.get
}
/**
* Consumer is closed during rebalance. Close should leave group and close
* immediately if rebalance is in progress. If brokers are not available,
* close should terminate immediately without sending leave group.
*/
@Test
def testCloseDuringRebalance() {
val topic = "closetest"
createTopic(topic, 10, serverCount)
this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000")
this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000")
this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
checkCloseDuringRebalance("group1", topic, executor, true)
}
private def checkCloseDuringRebalance(groupId: String, topic: String, executor: ExecutorService, brokersAvailableDuringClose: Boolean) {
def subscribeAndPoll(consumer: KafkaConsumer[Array[Byte], Array[Byte]], revokeSemaphore: Option[Semaphore] = None): Future[Any] = {
executor.submit(CoreUtils.runnable {
consumer.subscribe(Collections.singletonList(topic), new ConsumerRebalanceListener {
def onPartitionsAssigned(partitions: Collection[TopicPartition]) {
}
def onPartitionsRevoked(partitions: Collection[TopicPartition]) {
revokeSemaphore.foreach(s => s.release())
}
})
consumer.poll(0)
}, 0)
}
def waitForRebalance(timeoutMs: Long, future: Future[Any], otherConsumers: KafkaConsumer[Array[Byte], Array[Byte]]*) {
val startMs = System.currentTimeMillis
while (System.currentTimeMillis < startMs + timeoutMs && !future.isDone)
otherConsumers.foreach(consumer => consumer.poll(100))
assertTrue("Rebalance did not complete in time", future.isDone)
}
def createConsumerToRebalance(): Future[Any] = {
val consumer = createConsumer(groupId)
val rebalanceSemaphore = new Semaphore(0)
val future = subscribeAndPoll(consumer, Some(rebalanceSemaphore))
// Wait for consumer to poll and trigger rebalance
assertTrue("Rebalance not triggered", rebalanceSemaphore.tryAcquire(2000, TimeUnit.MILLISECONDS))
// Rebalance is blocked by other consumers not polling
assertFalse("Rebalance completed too early", future.isDone)
future
}
val consumer1 = createConsumer(groupId)
waitForRebalance(2000, subscribeAndPoll(consumer1))
val consumer2 = createConsumer(groupId)
waitForRebalance(2000, subscribeAndPoll(consumer2), consumer1)
val rebalanceFuture = createConsumerToRebalance()
// consumer1 should leave group and close immediately even though rebalance is in progress
val closeFuture1 = submitCloseAndValidate(consumer1, Long.MaxValue, None, Some(gracefulCloseTimeMs))
// Rebalance should complete without waiting for consumer1 to timeout since consumer1 has left the group
waitForRebalance(2000, rebalanceFuture, consumer2)
// Trigger another rebalance and shutdown all brokers
// This consumer poll() doesn't complete and `tearDown` shuts down the executor and closes the consumer
createConsumerToRebalance()
servers.foreach(server => killBroker(server.config.brokerId))
// consumer2 should close immediately without LeaveGroup request since there are no brokers available
val closeFuture2 = submitCloseAndValidate(consumer2, Long.MaxValue, None, Some(0))
// Ensure futures complete to avoid concurrent shutdown attempt during test cleanup
closeFuture1.get(2000, TimeUnit.MILLISECONDS)
closeFuture2.get(2000, TimeUnit.MILLISECONDS)
}
private def createConsumer(groupId: String) : KafkaConsumer[Array[Byte], Array[Byte]] = {
this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId)
val consumer = createNewConsumer
consumers += consumer
consumer
}
private def createConsumerAndReceive(groupId: String, manualAssign: Boolean, numRecords: Int) : KafkaConsumer[Array[Byte], Array[Byte]] = {
val consumer = createConsumer(groupId)
if (manualAssign)
consumer.assign(Collections.singleton(tp))
else
consumer.subscribe(Collections.singleton(topic))
receiveRecords(consumer, numRecords)
consumer
}
private def receiveRecords(consumer: KafkaConsumer[Array[Byte], Array[Byte]], numRecords: Int, topic: String = this.topic, timeoutMs: Long = 60000) {
var received = 0L
val endTimeMs = System.currentTimeMillis + timeoutMs
while (received < numRecords && System.currentTimeMillis < endTimeMs)
received += consumer.poll(1000).count()
assertEquals(numRecords, received)
}
private def submitCloseAndValidate(consumer: KafkaConsumer[Array[Byte], Array[Byte]],
closeTimeoutMs: Long, minCloseTimeMs: Option[Long], maxCloseTimeMs: Option[Long]): Future[Any] = {
executor.submit(CoreUtils.runnable {
val closeGraceTimeMs = 2000
val startNanos = System.nanoTime
info("Closing consumer with timeout " + closeTimeoutMs + " ms.")
consumer.close(closeTimeoutMs, TimeUnit.MILLISECONDS)
val timeTakenMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime - startNanos)
maxCloseTimeMs.foreach { ms =>
assertTrue("Close took too long " + timeTakenMs, timeTakenMs < ms + closeGraceTimeMs)
}
minCloseTimeMs.foreach { ms =>
assertTrue("Close finished too quickly " + timeTakenMs, timeTakenMs >= ms)
}
info("consumer.close() completed in " + timeTakenMs + " ms.")
}, 0)
}
private def checkClosedState(groupId: String, committedRecords: Int) {
// Check that close was graceful with offsets committed and leave group sent.
// New instance of consumer should be assigned partitions immediately and should see committed offsets.
val assignSemaphore = new Semaphore(0)
val consumer = createConsumer(groupId)
consumer.subscribe(Collections.singletonList(topic), new ConsumerRebalanceListener {
def onPartitionsAssigned(partitions: Collection[TopicPartition]) {
assignSemaphore.release()
}
def onPartitionsRevoked(partitions: Collection[TopicPartition]) {
}})
consumer.poll(3000)
assertTrue("Assigment did not complete on time", assignSemaphore.tryAcquire(1, TimeUnit.SECONDS))
if (committedRecords > 0)
assertEquals(committedRecords, consumer.committed(tp).offset)
consumer.close()
}
private class BounceBrokerScheduler(val numIters: Int) extends ShutdownableThread("daemon-bounce-broker", false)
{
var iter: Int = 0
override def doWork(): Unit = {
killRandomBroker()
Thread.sleep(500)
restartDeadBrokers()
iter += 1
if (iter == numIters)
initiateShutdown()
else
Thread.sleep(500)
}
}
private def sendRecords(numRecords: Int, topic: String = this.topic) {
val futures = (0 until numRecords).map { i =>
this.producers.head.send(new ProducerRecord(topic, part, i.toString.getBytes, i.toString.getBytes))
}
futures.map(_.get)
}
}
| MyPureCloud/kafka | core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala | Scala | apache-2.0 | 17,717 |
/*
biojava-adam BioJava and ADAM integration.
Copyright (c) 2017-2022 held jointly by the individual authors.
This library is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation; either version 3 of the License, or (at
your option) any later version.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; with out even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation,
Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
> http://www.fsf.org/licensing/licenses/lgpl.html
> http://www.opensource.org/licenses/lgpl-license.php
*/
import org.slf4j.LoggerFactory
val logger = LoggerFactory.getLogger("loadFastaRna")
import org.apache.log4j.{ Level, Logger }
Logger.getLogger("loadFastaRna").setLevel(Level.INFO)
Logger.getLogger("org.biojava").setLevel(Level.INFO)
import org.biojava.nbio.adam.BiojavaAdamContext
val bac = BiojavaAdamContext(sc)
val inputPath = Option(System.getenv("INPUT"))
val outputPath = Option(System.getenv("OUTPUT"))
if (inputPath.isEmpty || outputPath.isEmpty) {
logger.error("INPUT and OUTPUT environment variables are required")
System.exit(1)
}
val sequences = bac.loadBiojavaFastaRna(inputPath.get)
logger.info("Saving RNA sequences to output path %s ...".format(outputPath.get))
sequences.save(outputPath.get, asSingleFile = true, disableFastConcat = false)
logger.info("Done")
System.exit(0)
| heuermh/biojava-adam | scripts/loadFastaRna.scala | Scala | lgpl-3.0 | 1,784 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.status
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.tools.Command
/**
* Note: this class is a placeholder for the 'classpath' function implemented in the 'geomesa-*' script, to get it
* to show up in the JCommander help
*/
class ClasspathCommand extends Command {
override val name = "classpath"
override val params = new ClasspathParameters
override def execute(): Unit = {}
}
@Parameters(commandDescription = "Display the GeoMesa classpath")
class ClasspathParameters {}
| aheyne/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/status/ClasspathCommand.scala | Scala | apache-2.0 | 1,019 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.container
import akka.event.Logging.ErrorLevel
import whisk.common.{ Logging, LoggingMarkers, SimpleExec, TransactionId }
object RuncUtils {
def list()(implicit transid: TransactionId, logging: Logging): (Int, String) = {
runRuncCmd(false, Seq("list"))
}
def pause(id: ContainerIdentifier)(implicit transid: TransactionId, logging: Logging): (Int, String) = {
runRuncCmd(false, Seq("pause", id.toString))
}
def resume(id: ContainerIdentifier)(implicit transid: TransactionId, logging: Logging): (Int, String) = {
runRuncCmd(false, Seq("resume", id.toString))
}
/**
* Synchronously runs the given runc command returning stdout if successful.
*/
def runRuncCmd(skipLogError: Boolean, args: Seq[String])(implicit transid: TransactionId, logging: Logging): (Int, String) = {
val start = transid.started(this, LoggingMarkers.INVOKER_RUNC_CMD(args(0)))
try {
val fullCmd = getRuncCmd() ++ args
val (stdout, stderr, exitCode) = SimpleExec.syncRunCmd(fullCmd)
if (exitCode == 0) {
transid.finished(this, start)
(exitCode, stdout.trim)
} else {
if (!skipLogError) {
transid.failed(this, start, s"stdout:\\n$stdout\\nstderr:\\n$stderr", ErrorLevel)
} else {
transid.failed(this, start)
}
(exitCode, (stdout + stderr).trim)
}
} catch {
case t: Throwable =>
val errorMsg = "error: " + t.getMessage
transid.failed(this, start, errorMsg, ErrorLevel)
(-1, errorMsg)
}
}
def isSuccessful(result : (Int, String)) : Boolean =
result match {
case (0, _) => true
case _ => false
}
/*
* Any global flags are added here.
*/
private def getRuncCmd(): Seq[String] = {
val runcBin = "/usr/bin/docker-runc"
Seq(runcBin)
}
}
| CrowdFlower/incubator-openwhisk | core/invoker/src/main/scala/whisk/core/container/RuncUtils.scala | Scala | apache-2.0 | 2,667 |
package scala.scalanative
package compiler
package pass
import compiler.analysis.ClassHierarchy._
import compiler.analysis.ClassHierarchyExtractors._
import nir._, Inst.Let
/** Translates high-level object-oriented method calls into
* low-level dispatch based on vtables for classes
* and dispatch tables for interfaces.
*/
class MethodLowering(implicit fresh: Fresh, top: Top) extends Pass {
override def preInst = {
case Let(n, Op.Method(obj, MethodRef(cls: Class, meth)))
if meth.isVirtual =>
val typeptr = Val.Local(fresh(), Type.Ptr)
val methptrptr = Val.Local(fresh(), Type.Ptr)
Seq(
Let(typeptr.name, Op.Load(Type.Ptr, obj)),
Let(methptrptr.name,
Op.Elem(cls.typeStruct,
typeptr,
Seq(Val.I32(0),
Val.I32(2), // index of vtable in type struct
Val.I32(meth.vindex)))),
Let(n, Op.Load(Type.Ptr, methptrptr))
)
case Let(n, Op.Method(obj, MethodRef(_: Class, meth))) if meth.isStatic =>
Seq(
Let(n, Op.Copy(Val.Global(meth.name, Type.Ptr)))
)
case Let(n, Op.Method(obj, MethodRef(trt: Trait, meth))) =>
val typeptr = Val.Local(fresh(), Type.Ptr)
val idptr = Val.Local(fresh(), Type.Ptr)
val id = Val.Local(fresh(), Type.I32)
val methptrptr = Val.Local(fresh(), Type.Ptr)
Seq(
Let(typeptr.name, Op.Load(Type.Ptr, obj)),
Let(idptr.name,
Op.Elem(Rt.Type, typeptr, Seq(Val.I32(0), Val.I32(0)))),
Let(id.name, Op.Load(Type.I32, idptr)),
Let(methptrptr.name,
Op.Elem(top.dispatchTy,
top.dispatchVal,
Seq(Val.I32(0), id, Val.I32(meth.id)))),
Let(n, Op.Load(Type.Ptr, methptrptr))
)
}
}
object MethodLowering extends PassCompanion {
def apply(ctx: Ctx) = new MethodLowering()(ctx.fresh, ctx.top)
}
| cedricviaccoz/scala-native | tools/src/main/scala/scala/scalanative/compiler/pass/MethodLowering.scala | Scala | bsd-3-clause | 1,957 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.