code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.File
import java.util.concurrent.TimeUnit
import com.google.common.base.Charsets._
import com.google.common.io.Files
import org.apache.hadoop.io.{BytesWritable, LongWritable, Text}
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.mapreduce.lib.input.{TextInputFormat => NewTextInputFormat}
import org.apache.spark.util.Utils
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import org.scalatest.Matchers._
class SparkContextSuite extends SparkFunSuite with LocalSparkContext {
test("Only one SparkContext may be active at a time") {
// Regression test for SPARK-4180
val conf = new SparkConf().setAppName("test").setMaster("local")
.set("spark.driver.allowMultipleContexts", "false")
sc = new SparkContext(conf)
// A SparkContext is already running, so we shouldn't be able to create a second one
intercept[SparkException] { new SparkContext(conf) }
// After stopping the running context, we should be able to create a new one
resetSparkContext()
sc = new SparkContext(conf)
}
test("Can still construct a new SparkContext after failing to construct a previous one") {
val conf = new SparkConf().set("spark.driver.allowMultipleContexts", "false")
// This is an invalid configuration (no app name or master URL)
intercept[SparkException] {
new SparkContext(conf)
}
// Even though those earlier calls failed, we should still be able to create a new context
sc = new SparkContext(conf.setMaster("local").setAppName("test"))
}
test("Check for multiple SparkContexts can be disabled via undocumented debug option") {
var secondSparkContext: SparkContext = null
try {
val conf = new SparkConf().setAppName("test").setMaster("local")
.set("spark.driver.allowMultipleContexts", "true")
sc = new SparkContext(conf)
secondSparkContext = new SparkContext(conf)
} finally {
Option(secondSparkContext).foreach(_.stop())
}
}
test("Test getOrCreate") {
var sc2: SparkContext = null
SparkContext.clearActiveContext()
val conf = new SparkConf().setAppName("test").setMaster("local")
sc = SparkContext.getOrCreate(conf)
assert(sc.getConf.get("spark.app.name").equals("test"))
sc2 = SparkContext.getOrCreate(new SparkConf().setAppName("test2").setMaster("local"))
assert(sc2.getConf.get("spark.app.name").equals("test"))
assert(sc === sc2)
assert(sc eq sc2)
// Try creating second context to confirm that it's still possible, if desired
sc2 = new SparkContext(new SparkConf().setAppName("test3").setMaster("local")
.set("spark.driver.allowMultipleContexts", "true"))
sc2.stop()
}
test("BytesWritable implicit conversion is correct") {
// Regression test for SPARK-3121
val bytesWritable = new BytesWritable()
val inputArray = (1 to 10).map(_.toByte).toArray
bytesWritable.set(inputArray, 0, 10)
bytesWritable.set(inputArray, 0, 5)
val converter = WritableConverter.bytesWritableConverter()
val byteArray = converter.convert(bytesWritable)
assert(byteArray.length === 5)
bytesWritable.set(inputArray, 0, 0)
val byteArray2 = converter.convert(bytesWritable)
assert(byteArray2.length === 0)
}
test("addFile works") {
val dir = Utils.createTempDir()
val file1 = File.createTempFile("someprefix1", "somesuffix1", dir)
val absolutePath1 = file1.getAbsolutePath
val file2 = File.createTempFile("someprefix2", "somesuffix2", dir)
val relativePath = file2.getParent + "/../" + file2.getParentFile.getName + "/" + file2.getName
val absolutePath2 = file2.getAbsolutePath
try {
Files.write("somewords1", file1, UTF_8)
Files.write("somewords2", file2, UTF_8)
val length1 = file1.length()
val length2 = file2.length()
sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local"))
sc.addFile(file1.getAbsolutePath)
sc.addFile(relativePath)
sc.parallelize(Array(1), 1).map(x => {
val gotten1 = new File(SparkFiles.get(file1.getName))
val gotten2 = new File(SparkFiles.get(file2.getName))
if (!gotten1.exists()) {
throw new SparkException("file doesn't exist : " + absolutePath1)
}
if (!gotten2.exists()) {
throw new SparkException("file doesn't exist : " + absolutePath2)
}
if (length1 != gotten1.length()) {
throw new SparkException(
s"file has different length $length1 than added file ${gotten1.length()} : " +
absolutePath1)
}
if (length2 != gotten2.length()) {
throw new SparkException(
s"file has different length $length2 than added file ${gotten2.length()} : " +
absolutePath2)
}
if (absolutePath1 == gotten1.getAbsolutePath) {
throw new SparkException("file should have been copied :" + absolutePath1)
}
if (absolutePath2 == gotten2.getAbsolutePath) {
throw new SparkException("file should have been copied : " + absolutePath2)
}
x
}).count()
} finally {
sc.stop()
}
}
test("addFile recursive works") {
val pluto = Utils.createTempDir()
val neptune = Utils.createTempDir(pluto.getAbsolutePath)
val saturn = Utils.createTempDir(neptune.getAbsolutePath)
val alien1 = File.createTempFile("alien", "1", neptune)
val alien2 = File.createTempFile("alien", "2", saturn)
try {
sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local"))
sc.addFile(neptune.getAbsolutePath, true)
sc.parallelize(Array(1), 1).map(x => {
val sep = File.separator
if (!new File(SparkFiles.get(neptune.getName + sep + alien1.getName)).exists()) {
throw new SparkException("can't access file under root added directory")
}
if (!new File(SparkFiles.get(neptune.getName + sep + saturn.getName + sep + alien2.getName))
.exists()) {
throw new SparkException("can't access file in nested directory")
}
if (new File(SparkFiles.get(pluto.getName + sep + neptune.getName + sep + alien1.getName))
.exists()) {
throw new SparkException("file exists that shouldn't")
}
x
}).count()
} finally {
sc.stop()
}
}
test("addFile recursive can't add directories by default") {
val dir = Utils.createTempDir()
try {
sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local"))
intercept[SparkException] {
sc.addFile(dir.getAbsolutePath)
}
} finally {
sc.stop()
}
}
test("Cancelling job group should not cause SparkContext to shutdown (SPARK-6414)") {
try {
sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local"))
val future = sc.parallelize(Seq(0)).foreachAsync(_ => {Thread.sleep(1000L)})
sc.cancelJobGroup("nonExistGroupId")
Await.ready(future, Duration(2, TimeUnit.SECONDS))
// In SPARK-6414, sc.cancelJobGroup will cause NullPointerException and cause
// SparkContext to shutdown, so the following assertion will fail.
assert(sc.parallelize(1 to 10).count() == 10L)
} finally {
sc.stop()
}
}
test("Comma separated paths for newAPIHadoopFile/wholeTextFiles/binaryFiles (SPARK-7155)") {
// Regression test for SPARK-7155
// dir1 and dir2 are used for wholeTextFiles and binaryFiles
val dir1 = Utils.createTempDir()
val dir2 = Utils.createTempDir()
val dirpath1 = dir1.getAbsolutePath
val dirpath2 = dir2.getAbsolutePath
// file1 and file2 are placed inside dir1, they are also used for
// textFile, hadoopFile, and newAPIHadoopFile
// file3, file4 and file5 are placed inside dir2, they are used for
// textFile, hadoopFile, and newAPIHadoopFile as well
val file1 = new File(dir1, "part-00000")
val file2 = new File(dir1, "part-00001")
val file3 = new File(dir2, "part-00000")
val file4 = new File(dir2, "part-00001")
val file5 = new File(dir2, "part-00002")
val filepath1 = file1.getAbsolutePath
val filepath2 = file2.getAbsolutePath
val filepath3 = file3.getAbsolutePath
val filepath4 = file4.getAbsolutePath
val filepath5 = file5.getAbsolutePath
try {
// Create 5 text files.
Files.write("someline1 in file1\nsomeline2 in file1\nsomeline3 in file1", file1, UTF_8)
Files.write("someline1 in file2\nsomeline2 in file2", file2, UTF_8)
Files.write("someline1 in file3", file3, UTF_8)
Files.write("someline1 in file4\nsomeline2 in file4", file4, UTF_8)
Files.write("someline1 in file2\nsomeline2 in file5", file5, UTF_8)
sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local"))
// Test textFile, hadoopFile, and newAPIHadoopFile for file1 and file2
assert(sc.textFile(filepath1 + "," + filepath2).count() == 5L)
assert(sc.hadoopFile(filepath1 + "," + filepath2,
classOf[TextInputFormat], classOf[LongWritable], classOf[Text]).count() == 5L)
assert(sc.newAPIHadoopFile(filepath1 + "," + filepath2,
classOf[NewTextInputFormat], classOf[LongWritable], classOf[Text]).count() == 5L)
// Test textFile, hadoopFile, and newAPIHadoopFile for file3, file4, and file5
assert(sc.textFile(filepath3 + "," + filepath4 + "," + filepath5).count() == 5L)
assert(sc.hadoopFile(filepath3 + "," + filepath4 + "," + filepath5,
classOf[TextInputFormat], classOf[LongWritable], classOf[Text]).count() == 5L)
assert(sc.newAPIHadoopFile(filepath3 + "," + filepath4 + "," + filepath5,
classOf[NewTextInputFormat], classOf[LongWritable], classOf[Text]).count() == 5L)
// Test wholeTextFiles, and binaryFiles for dir1 and dir2
assert(sc.wholeTextFiles(dirpath1 + "," + dirpath2).count() == 5L)
assert(sc.binaryFiles(dirpath1 + "," + dirpath2).count() == 5L)
} finally {
sc.stop()
}
}
test("calling multiple sc.stop() must not throw any exception") {
noException should be thrownBy {
sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local"))
val cnt = sc.parallelize(1 to 4).count()
sc.cancelAllJobs()
sc.stop()
// call stop second time
sc.stop()
}
}
test("No exception when both num-executors and dynamic allocation set.") {
noException should be thrownBy {
sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")
.set("spark.dynamicAllocation.enabled", "true").set("spark.executor.instances", "6"))
assert(sc.executorAllocationManager.isEmpty)
assert(sc.getConf.getInt("spark.executor.instances", 0) === 6)
}
}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
core/src/test/scala/org/apache/spark/SparkContextSuite.scala
|
Scala
|
apache-2.0
| 11,678
|
package is.hail.utils
import scala.reflect.ClassTag
final class MissingArrayBuilder[@specialized T](initialCapacity: Int)(implicit tct: ClassTag[T]) {
private var b: Array[T] = new Array[T](initialCapacity)
private var missing: Array[Boolean] = new Array[Boolean](initialCapacity)
private var size_ : Int = 0
def this()(implicit tct: ClassTag[T]) = this(BoxedArrayBuilder.defaultInitialCapacity)
def size: Int = size_
def length: Int = size_
def isEmpty: Boolean = size_ == 0
def apply(i: Int): T = {
require(i >= 0 && i < size)
b(i)
}
def update(i: Int, x: T) {
require(i >= 0 && i < size)
b(i) = x
}
def ensureCapacity(n: Int) {
if (b.length < n) {
val newCapacity = (b.length * 2).max(n)
val newb = new Array[T](newCapacity)
Array.copy(b, 0, newb, 0, size_)
b = newb
val newmissing = new Array[Boolean](newCapacity)
Array.copy(missing, 0, newmissing, 0, size_)
missing = newmissing
}
}
def clear(): Unit = {
size_ = 0
}
def +=(x: T) {
ensureCapacity(size_ + 1)
b(size_) = x
missing(size_) = false
size_ += 1
}
def ++=(s: Seq[T]): Unit = s.foreach(x => this += x)
def ++=(a: Array[T]): Unit = ++=(a, a.length)
def ++=(a: Array[T], length: Int) {
require(length >= 0 && length <= a.length)
ensureCapacity(size_ + length)
System.arraycopy(a, 0, b, size_, length)
var i = 0
while (i < length) {
missing(size_ + i) = false
i += 1
}
size_ += length
}
def underlying(): Array[T] = b
def isMissing(i: Int): Boolean = {
require(i >= 0 && i < size)
missing(i)
}
def setMissing(i: Int, m: Boolean): Unit = {
require(i >= 0 && i < size)
missing(i) = m
}
def addMissing() {
ensureCapacity(size_ + 1)
missing(size_) = true
size_ += 1
}
}
|
hail-is/hail
|
hail/src/main/scala/is/hail/utils/MissingArrayBuilder.scala
|
Scala
|
mit
| 1,855
|
package ingress.submission
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import com.rabbitmq.client.{QueueingConsumer, Channel, Connection}
import play.api.libs.ws.WS
import play.api.test.{FakeApplication, FakeRequest}
import play.api.test.Helpers._
import com.rabbitmq.client.AMQP.BasicProperties
import app.ConfigProperties
import utils.WithApplication
import scala.concurrent.{Await, TimeoutException, ExecutionContext, Future}
import ExecutionContext.Implicits.global
import submission.SubmissionService
import submission.messaging._
import scala.concurrent.duration._
import app.ConfigProperties._
import scala.xml.XML
class SubmissionServiceIntegrationSpec extends Specification with Mockito{
"Ingress Service" should {
"Successfully publish a message into a live broker" in new WithServerConfig("queue.name" -> "IngresServiceIntegration1","env.name"->"Test") {
val queueName = TestUtils.declareQueue
val timestamp = "2013-10-23"
val endpoint = s"http://localhost:$port/submission"
val xml = XML.load (getClass getResourceAsStream "/ValidXMLWithRSASignature.xml")
val response = Await.result(WS.url(endpoint).post(<request>{timestamp}{xml}</request>),DurationInt(4).seconds)
try{
response.status mustEqual OK
}finally{
TestUtils.consumeMessage(queueName) must contain(timestamp)
}
}
"Failure for exceeded message capacity" in new WithApplication {
val service:SubmissionService = new SubmissionService {
override def messagingService: MessageSendingService = {
new MessageSenderImpl {
override def getQueueName:String = "IngressServiceIntegrationSpec_2"
}
}
}
val queueName = service.messagingService.getQueueName
val conn: Connection = ConnectionManager.factory.newConnection()
var channel: Channel = conn.createChannel()
val declareOk = channel.queueDeclare(queueName,true,false,false,null)
channel.confirmSelect()
for(i <- 0 to getIntProperty("rabbit.messages.max")) {
channel.basicPublish("",queueName,new BasicProperties().builder().deliveryMode(2).build(),("Message number "+i).getBytes)
channel.waitForConfirms()
}
channel.close()
val timestamp = "2013-10-24"
val request = FakeRequest().withXmlBody(<request>{timestamp}</request>)
try{
val response = Future(service.xmlProcessing(request))
status(response) mustEqual SERVICE_UNAVAILABLE
}finally {
channel = conn.createChannel()
channel.queuePurge(queueName)
channel.queueDelete(queueName)
channel.close()
conn.close()
}
}
"Failure because connection fails" in new WithApplication{
ConnectionManager.factory.setUri("amqp://nonexistinghost")
val service:SubmissionService = new SubmissionService {
override def messagingService: MessageSendingService = {
new MessageSenderImpl {
override def getQueueName:String = "IngressServiceIntegrationSpec_3"
}
}
}
val request = FakeRequest().withXmlBody(<request></request>)
try {
val response = Future(service.xmlProcessing(request))
status(response) mustEqual SERVICE_UNAVAILABLE
ConnectionManager.factory.setUri(ConnectionManager.readUri)
} catch {
case e: TimeoutException => success
case _: Throwable => failure
}
}
"Failure by createChannel throwing exceptions" in new WithApplication {
val service:SubmissionService = new SubmissionService {
override def messagingService: MessageSendingService = new MessageSenderImpl {
override def getQueueName:String = "IngressServiceIntegrationSpec_4"
override protected def createChannel(connection: Connection): Channel = {
throw new Exception("This is a test thrown exception")
}
}
}
val request = FakeRequest().withXmlBody(<request>{"2013-10-24"}</request>)
try{
val response = Future(service.xmlProcessing(request))
status(response) mustEqual SERVICE_UNAVAILABLE
}finally {
}
}
"Failure by withChannel throwing exceptions" in new WithApplication {
val service:SubmissionService = new SubmissionService {
override def messagingService: MessageSendingService = new MessageSenderImpl {
override def getQueueName:String = "IngressServiceIntegrationSpec_5"
protected override def withChannel(f: (Channel) => Result): Result = {
throw new Exception("This is a test thrown exception")
}
}
}
val request = FakeRequest().withXmlBody(<request>{"2013-10-24"}</request>)
try{
val response = Future(service.xmlProcessing(request))
status(response) mustEqual SERVICE_UNAVAILABLE
}finally {
}
}
}
section("integration")
}
|
Department-for-Work-and-Pensions/ClaimReceived
|
cr/test/ingress/submission/SubmissionServiceIntegrationSpec.scala
|
Scala
|
mit
| 5,002
|
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package sys
package process
import processInternal._
import Process._
import java.io.{ FileInputStream, FileOutputStream }
import BasicIO.{ Uncloseable, Streamed }
import Uncloseable.protect
private[process] trait ProcessBuilderImpl {
self: ProcessBuilder.type =>
private[process] class DaemonBuilder(underlying: ProcessBuilder) extends AbstractBuilder {
final def run(io: ProcessIO): Process = underlying.run(io.daemonized())
}
private[process] class Dummy(override val toString: String, exitValue: => Int) extends AbstractBuilder {
override def run(io: ProcessIO): Process = new DummyProcess(exitValue)
override def canPipeTo = true
}
private[process] class URLInput(url: URL) extends IStreamBuilder(url.openStream, url.toString)
private[process] class FileInput(file: File) extends IStreamBuilder(new FileInputStream(file), file.getAbsolutePath)
private[process] class FileOutput(file: File, append: Boolean) extends OStreamBuilder(new FileOutputStream(file, append), file.getAbsolutePath)
private[process] class OStreamBuilder(
stream: => OutputStream,
label: String
) extends ThreadBuilder(label, _ writeInput protect(stream)) {
override def hasExitValue = false
}
private[process] class IStreamBuilder(
stream: => InputStream,
label: String
) extends ThreadBuilder(label, _ processOutput protect(stream)) {
override def hasExitValue = false
}
private[process] abstract class ThreadBuilder(
override val toString: String,
runImpl: ProcessIO => Unit
) extends AbstractBuilder {
override def run(io: ProcessIO): Process = {
val success = new SyncVar[Boolean]
def go(): Unit = {
var ok = false
try {
runImpl(io)
ok = true
} finally success.put(ok)
}
val t = Spawn(go(), io.daemonizeThreads)
new ThreadProcess(t, success)
}
}
/** Represents a simple command without any redirection or combination. */
private[process] class Simple(p: JProcessBuilder) extends AbstractBuilder {
override def run(io: ProcessIO): Process = {
val process = p.start() // start the external process
import io._
// spawn threads that process the input, output, and error streams using the functions defined in `io`
val inThread = Spawn(writeInput(process.getOutputStream), daemon = true)
val outThread = Spawn(processOutput(process.getInputStream), daemonizeThreads)
val errorThread =
if (p.redirectErrorStream) Nil
else List(Spawn(processError(process.getErrorStream), daemonizeThreads))
new SimpleProcess(process, inThread, outThread :: errorThread)
}
override def toString = p.command.toString
override def canPipeTo = true
}
private[scala] abstract class AbstractBuilder extends ProcessBuilder with Sink with Source {
protected def toSource = this
protected def toSink = this
def #|(other: ProcessBuilder): ProcessBuilder = {
require(other.canPipeTo, "Piping to multiple processes is not supported.")
new PipedBuilder(this, other, false)
}
def #||(other: ProcessBuilder): ProcessBuilder = new OrBuilder(this, other)
def #&&(other: ProcessBuilder): ProcessBuilder = new AndBuilder(this, other)
def ###(other: ProcessBuilder): ProcessBuilder = new SequenceBuilder(this, other)
def run(): Process = run(connectInput = false)
def run(connectInput: Boolean): Process = run(BasicIO.standard(connectInput))
def run(log: ProcessLogger): Process = run(log, connectInput = false)
def run(log: ProcessLogger, connectInput: Boolean): Process = run(BasicIO(connectInput, log))
def !! = slurp(None, withIn = false)
def !!(log: ProcessLogger) = slurp(Some(log), withIn = false)
def !!< = slurp(None, withIn = true)
def !!<(log: ProcessLogger) = slurp(Some(log), withIn = true)
def lineStream: Stream[String] = lineStream(withInput = false, nonZeroException = true, None)
def lineStream(log: ProcessLogger): Stream[String] = lineStream(withInput = false, nonZeroException = true, Some(log))
def lineStream_! : Stream[String] = lineStream(withInput = false, nonZeroException = false, None)
def lineStream_!(log: ProcessLogger): Stream[String] = lineStream(withInput = false, nonZeroException = false, Some(log))
def ! = run(connectInput = false).exitValue()
def !(io: ProcessIO) = run(io).exitValue()
def !(log: ProcessLogger) = runBuffered(log, connectInput = false)
def !< = run(connectInput = true).exitValue()
def !<(log: ProcessLogger) = runBuffered(log, connectInput = true)
/** Constructs a new builder which runs this command with all input/output threads marked
* as daemon threads. This allows the creation of a long running process while still
* allowing the JVM to exit normally.
*
* Note: not in the public API because it's not fully baked, but I need the capability
* for fsc.
*/
def daemonized(): ProcessBuilder = new DaemonBuilder(this)
private[this] def slurp(log: Option[ProcessLogger], withIn: Boolean): String = {
val buffer = new StringBuffer
val code = this ! BasicIO(withIn, buffer, log)
if (code == 0) buffer.toString
else scala.sys.error("Nonzero exit value: " + code)
}
private[this] def lineStream(
withInput: Boolean,
nonZeroException: Boolean,
log: Option[ProcessLogger]
): Stream[String] = {
val streamed = Streamed[String](nonZeroException)
val process = run(BasicIO(withInput, streamed.process, log))
Spawn(streamed done process.exitValue())
streamed.stream()
}
private[this] def runBuffered(log: ProcessLogger, connectInput: Boolean) =
log buffer run(log, connectInput).exitValue()
def canPipeTo = false
def hasExitValue = true
}
private[process] class URLImpl(url: URL) extends URLBuilder with Source {
protected def toSource = new URLInput(url)
}
private[process] class FileImpl(base: File) extends FileBuilder with Sink with Source {
protected def toSource = new FileInput(base)
protected def toSink = new FileOutput(base, false)
def #<<(f: File): ProcessBuilder = #<<(new FileInput(f))
def #<<(u: URL): ProcessBuilder = #<<(new URLInput(u))
def #<<(s: => InputStream): ProcessBuilder = #<<(new IStreamBuilder(s, "<input stream>"))
def #<<(b: ProcessBuilder): ProcessBuilder = new PipedBuilder(b, new FileOutput(base, true), false)
}
private[process] abstract class BasicBuilder extends AbstractBuilder {
protected[this] def checkNotThis(a: ProcessBuilder) = require(a != this, "Compound process '" + a + "' cannot contain itself.")
final def run(io: ProcessIO): Process = {
val p = createProcess(io)
p.start()
p
}
protected[this] def createProcess(io: ProcessIO): BasicProcess
}
private[process] abstract class SequentialBuilder(
a: ProcessBuilder,
b: ProcessBuilder,
operatorString: String
) extends BasicBuilder {
checkNotThis(a)
checkNotThis(b)
override def toString = " ( " + a + " " + operatorString + " " + b + " ) "
}
private[process] class PipedBuilder(
first: ProcessBuilder,
second: ProcessBuilder,
toError: Boolean
) extends SequentialBuilder(first, second, if (toError) "#|!" else "#|") {
override def createProcess(io: ProcessIO) = new PipedProcesses(first, second, io, toError)
}
private[process] class AndBuilder(
first: ProcessBuilder,
second: ProcessBuilder
) extends SequentialBuilder(first, second, "#&&") {
override def createProcess(io: ProcessIO) = new AndProcess(first, second, io)
}
private[process] class OrBuilder(
first: ProcessBuilder,
second: ProcessBuilder
) extends SequentialBuilder(first, second, "#||") {
override def createProcess(io: ProcessIO) = new OrProcess(first, second, io)
}
private[process] class SequenceBuilder(
first: ProcessBuilder,
second: ProcessBuilder
) extends SequentialBuilder(first, second, "###") {
override def createProcess(io: ProcessIO) = new ProcessSequence(first, second, io)
}
}
|
felixmulder/scala
|
src/library/scala/sys/process/ProcessBuilderImpl.scala
|
Scala
|
bsd-3-clause
| 8,943
|
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms.constraints
import forms.constraints.utils.ValidationHelper.{validate, validateNot}
import play.api.data.validation.Constraint
import play.api.i18n.Messages
object EmailAddressConstraints {
// http://emailregex.com/
private val emailRegex =
"""^(?:[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\
|x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*")@(?:(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)+[
|a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4]
|[0-9]|[01]?[0-9][0-9]?|[a-zA-Z0-9-]*[a-zA-Z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\
|x09\\x0b\\x0c\\x0e-\\x7f])+)\\])$""".stripMargin
private val emailMaxLength = 132
def emailAddressFormat(message: String = "capture-email-address.error.incorrect_format"): Constraint[String] = Constraint("email_address.incorrect_format")(
emailAddress => validateNot(
constraint = emailAddress matches emailRegex,
errMsg = message
)
)
def emailAddressEmpty(message: String = "capture-email-address.error.nothing_entered"): Constraint[String] = Constraint("email_address.nothing_entered")(
emailAddress => validate(
constraint = emailAddress.isEmpty,
errMsg = message
)
)
def emailAddressLength(message: String = "capture-email-address.error.incorrect_length"): Constraint[String] = Constraint("email_address.incorrect_length")(
emailAddress => validate(
constraint = emailAddress.trim.length > emailMaxLength,
errMsg = message
)
)
}
|
hmrc/vat-registration-frontend
|
app/forms/constraints/EmailAddressConstraints.scala
|
Scala
|
apache-2.0
| 2,233
|
package pl.touk.nussknacker.engine.flink.util.transformer
import cats.data.NonEmptyList
import com.typesafe.config.ConfigFactory
import com.typesafe.config.ConfigValueFactory.fromAnyRef
import org.apache.flink.streaming.api.scala._
import org.scalatest.{FunSuite, Inside, Matchers}
import pl.touk.nussknacker.engine.api.process.{EmptyProcessConfigCreator, _}
import pl.touk.nussknacker.engine.api.typed.typing
import pl.touk.nussknacker.engine.api.typed.typing.Typed
import pl.touk.nussknacker.engine.api.{ProcessListener, ProcessVersion}
import pl.touk.nussknacker.engine.build.ScenarioBuilder
import pl.touk.nussknacker.engine.compiledgraph.part.PotentiallyStartPart
import pl.touk.nussknacker.engine.deployment.DeploymentData
import pl.touk.nussknacker.engine.flink.test.FlinkSpec
import pl.touk.nussknacker.engine.flink.util.source.EmitWatermarkAfterEachElementCollectionSource
import pl.touk.nussknacker.engine.graph.EspProcess
import pl.touk.nussknacker.engine.process.ExecutionConfigPreparer
import pl.touk.nussknacker.engine.process.compiler.FlinkProcessCompiler
import pl.touk.nussknacker.engine.process.registrar.FlinkProcessRegistrar
import pl.touk.nussknacker.engine.spel.Implicits._
import pl.touk.nussknacker.engine.testing.LocalModelData
import pl.touk.nussknacker.engine.testmode.{ResultsCollectingListener, ResultsCollectingListenerHolder, TestProcess, TestRunId, TestServiceInvocationCollector}
import java.time.Duration
import java.util.UUID
class ForEachTransformerSpec extends FunSuite with FlinkSpec with Matchers with Inside {
private val sinkId = "end"
private val resultVariableName = "resultVar"
private val forEachOutputVariableName = "forEachVar"
private val forEachNodeId = "for-each"
test("should produce results for each element in list") {
val collectingListener = initializeListener
val model = modelData(List(TestRecord()), collectingListener)
val testProcess = aProcessWithForEachNode(elements = "{'one', 'other'}", resultExpression = s"#$forEachOutputVariableName + '_1'")
val results = collectTestResults[String](model, testProcess, collectingListener)
extractResultValues(results) shouldBe List("one_1", "other_1")
}
test("should set return type based on element types") {
val collectingListener = initializeListener
val model = modelData(List(TestRecord()), collectingListener)
val testProcess = aProcessWithForEachNode(elements = "{'one', 'other'}", resultExpression = s"#$forEachOutputVariableName + '_1'")
val compiledSources: NonEmptyList[PotentiallyStartPart] = getCompiledSources(model, testProcess)
val forEachOutputVariable = compiledSources.head.nextParts.find(_.id == forEachNodeId)
.get.validationContext.variables(forEachOutputVariableName)
forEachOutputVariable shouldBe Typed[String]
}
test("should not produce any results when elements list is empty") {
val collectingListener = initializeListener
val model = modelData(List(TestRecord()), collectingListener)
val testProcess = aProcessWithForEachNode(elements = "{}")
val results = collectTestResults[String](model, testProcess, collectingListener)
results.nodeResults shouldNot contain key sinkId
}
private def initializeListener = ResultsCollectingListenerHolder.registerRun(identity)
private def modelData(list: List[TestRecord] = List(), collectingListener: ResultsCollectingListener): LocalModelData = LocalModelData(ConfigFactory
.empty().withValue("useTypingResultTypeInformation", fromAnyRef(true)), new Creator(list, collectingListener))
private def aProcessWithForEachNode(elements: String, resultExpression: String = s"#$forEachOutputVariableName") =
ScenarioBuilder
.streaming("forEachProcess")
.parallelism(1)
.stateOnDisk(true)
.source("start", "start")
.customNode(forEachNodeId, forEachOutputVariableName, "for-each", "Elements" -> elements)
.buildSimpleVariable("for-each-result", "resultVar", resultExpression)
.emptySink(sinkId, "dead-end")
private def collectTestResults[T](model: LocalModelData, testProcess: EspProcess, collectingListener: ResultsCollectingListener): TestProcess.TestResults[Any] = {
runProcess(model, testProcess)
collectingListener.results[Any]
}
private def extractResultValues(results: TestProcess.TestResults[Any]): List[String] = results.nodeResults(sinkId)
.map(_.variableTyped[String](resultVariableName).get)
private def getCompiledSources(model: LocalModelData, testProcess: EspProcess): NonEmptyList[PotentiallyStartPart] = {
val compiler = new FlinkProcessCompiler(model)
val compiledSources = compiler.compileProcess(testProcess, ProcessVersion.empty, DeploymentData.empty, new TestServiceInvocationCollector(TestRunId(UUID.randomUUID().toString)))(getClass.getClassLoader)
.compileProcess()
.sources
compiledSources
}
private def runProcess(model: LocalModelData, testProcess: EspProcess): Unit = {
val stoppableEnv = flinkMiniCluster.createExecutionEnvironment()
val registrar = FlinkProcessRegistrar(new FlinkProcessCompiler(model), ExecutionConfigPreparer.unOptimizedChain(model))
registrar.register(new StreamExecutionEnvironment(stoppableEnv), testProcess, ProcessVersion.empty, DeploymentData.empty)
stoppableEnv.executeAndWaitForFinished(testProcess.id)()
}
}
class Creator(input: List[TestRecord], collectingListener: ResultsCollectingListener) extends EmptyProcessConfigCreator {
override def sourceFactories(processObjectDependencies: ProcessObjectDependencies): Map[String, WithCategories[SourceFactory]] =
Map(
"start" -> WithCategories(SourceFactory.noParam[TestRecord](EmitWatermarkAfterEachElementCollectionSource
.create[TestRecord](input, _.timestamp, Duration.ofHours(1))))
)
override def listeners(processObjectDependencies: ProcessObjectDependencies): Seq[ProcessListener] =
List(collectingListener)
}
case class TestRecord(id: String = "1", timeHours: Int = 0, eId: Int = 1, str: String = "a") {
def timestamp: Long = timeHours * 3600L * 1000
}
|
TouK/nussknacker
|
engine/flink/components/base/src/test/scala/pl/touk/nussknacker/engine/flink/util/transformer/ForEachTransformerSpec.scala
|
Scala
|
apache-2.0
| 6,094
|
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package runtime
/**
* Dummy class which exist only to satisfy the JVM. It corresponds to
* `scala.Null`. If such type appears in method signatures, it is erased
* to this one. A private constructor ensures that Java code can't create
* subclasses. The only value of type Null$ should be null
*/
sealed abstract class Null$ private ()
|
scala/scala
|
src/library/scala/runtime/Null$.scala
|
Scala
|
apache-2.0
| 644
|
/*
* Copyright (c) 2015-2016 Luciano Resende
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.luck.csv
import org.apache.spark._
import org.apache.spark.sql.SparkSession
/**
* Sample application that reads a CSV
* and display its contents
*/
case class SomeData(id: Integer, timestamp: String)
object CsvApplication {
def main(args: Array[String]): Unit = {
println("Starting CSV Application") //scalastyle:ignore
var sparkConf: SparkConf = new SparkConf()
.setAppName("Spark-CSV")
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
// check Spark configuration for master URL, set it to local if not configured
if (! sparkConf.contains("spark.master")) {
println(">>> will set master") //scalastyle:ignore
sparkConf.setMaster("local[2]")
}
val sparkSession: SparkSession = SparkSession.builder
.config(sparkConf)
.getOrCreate
import sparkSession.implicits._
val df = sparkSession.read
.option("header", "true") // Use first line of all files as header
.option("inferSchema", "true") // Automatically infer data types
.option("delimiter", " ") // define the delimiter to use
.csv("/users/lresende/data.csv") // location relative to hdfs root
.as[SomeData] // this will return DataSet[SomeData]
df.printSchema()
df.show(50, false)
}
}
|
lresende/spark-sandbox
|
src/main/scala/com/luck/csv/CsvApplication.scala
|
Scala
|
apache-2.0
| 1,997
|
package com.easyforger.creatures
import net.minecraft.entity.monster.EntityCreeper
import net.minecraft.world.World
case class CreeperConfig(common: CommonEntityConfig = CommonEntityConfig(),
fuseTime: Option[Int] = None,
explosionRadius: Option[Int] = None,
powered: Option[Boolean] = None) extends CreatureConfig
class CustomCreeper(world: World) extends EntityCreeper(world) with CommonCustomMonster {
val creeperConfig = VanillaCreatures.creeperConfig
val config = creeperConfig.common
creeperConfig.fuseTime.foreach(setIntField(classOf[EntityCreeper], this, "fuseTime", _))
creeperConfig.explosionRadius.foreach(setIntField(classOf[EntityCreeper], this, "explosionRadius", _))
init()
override def getPowered = creeperConfig.powered.getOrElse(super.getPowered)
}
|
ThiagoGarciaAlves/easyforger
|
src/main/scala/com/easyforger/creatures/CustomCreeper.scala
|
Scala
|
gpl-3.0
| 862
|
package io.hydrosphere.mist.api.ml.classification
import io.hydrosphere.mist.api.ml._
import org.apache.spark.ml.classification.{DecisionTreeClassificationModel, RandomForestClassificationModel}
import org.apache.spark.ml.linalg.{DenseVector, Vector, Vectors}
class LocalRandomForestClassificationModel(override val sparkTransformer: RandomForestClassificationModel) extends LocalTransformer[RandomForestClassificationModel] {
override def transform(localData: LocalData): LocalData = {
localData.column(sparkTransformer.getFeaturesCol) match {
case Some(column) =>
val cls = classOf[RandomForestClassificationModel]
val rawPredictionCol = LocalDataColumn(sparkTransformer.getRawPredictionCol, column.data.map(f => Vectors.dense(f.asInstanceOf[Array[Double]])).map { vector =>
val predictRaw = cls.getDeclaredMethod("predictRaw", classOf[Vector])
predictRaw.invoke(sparkTransformer, vector)
})
val probabilityCol = LocalDataColumn(sparkTransformer.getProbabilityCol, rawPredictionCol.data.map(_.asInstanceOf[DenseVector]).map { vector =>
val raw2probabilityInPlace = cls.getDeclaredMethod("raw2probabilityInPlace", classOf[Vector])
raw2probabilityInPlace.invoke(sparkTransformer, vector.copy)
})
val predictionCol = LocalDataColumn(sparkTransformer.getPredictionCol, rawPredictionCol.data.map(_.asInstanceOf[DenseVector]).map { vector =>
val raw2prediction = cls.getMethod("raw2prediction", classOf[Vector])
raw2prediction.invoke(sparkTransformer, vector.copy)
})
localData.withColumn(rawPredictionCol)
.withColumn(probabilityCol)
.withColumn(predictionCol)
case None => localData
}
}
}
object LocalRandomForestClassificationModel extends LocalModel[RandomForestClassificationModel] {
override def load(metadata: Metadata, data: Map[String, Any]): RandomForestClassificationModel = {
val treesMetadata = metadata.paramMap("treesMetadata").asInstanceOf[Map[String, Any]]
val trees = treesMetadata map { treeKv =>
val treeMeta = treeKv._2.asInstanceOf[Map[String, Any]]
val meta = treeMeta("metadata").asInstanceOf[Metadata]
LocalDecisionTreeClassificationModel.createTree(
meta,
data(treeKv._1).asInstanceOf[Map[String, Any]]
)
}
val ctor = classOf[RandomForestClassificationModel].getDeclaredConstructor(classOf[String], classOf[Array[DecisionTreeClassificationModel]], classOf[Int], classOf[Int])
ctor.setAccessible(true)
ctor
.newInstance(
metadata.uid,
trees.to[Array],
metadata.numFeatures.get.asInstanceOf[java.lang.Integer],
metadata.numClasses.get.asInstanceOf[java.lang.Integer]
)
.setFeaturesCol(metadata.paramMap("featuresCol").asInstanceOf[String])
.setPredictionCol(metadata.paramMap("predictionCol").asInstanceOf[String])
.setProbabilityCol(metadata.paramMap("probabilityCol").asInstanceOf[String])
}
override implicit def getTransformer(transformer: RandomForestClassificationModel): LocalTransformer[RandomForestClassificationModel] = new LocalRandomForestClassificationModel(transformer)
}
|
KineticCookie/mist
|
mist-lib/src/main/scala-2.11/io/hydrosphere/mist/api/ml/classification/LocalRandomForestClassificationModel.scala
|
Scala
|
apache-2.0
| 3,209
|
/*
* Copyright 2013 Sanjin Sehic
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.saserr.cqrs
import scala.language.higherKinds
import java.io.Serializable
import scala.collection.immutable.{Set, Traversable, Vector}
import scalaz.@>
abstract class Storage[Key, Value <: Serializable, F[A] <: Traversable[A]]
(private val version: Long, private val f: F[Versioned[Value]]) extends Serializable {
@transient protected def key: Value @> Key
@transient private[this] lazy val byKey = f.map(v => key.get(v.value) -> v).toMap
@transient lazy val values = f.to[Vector].map(_.value)
@transient lazy val keys: Set[Key] = byKey.keySet
def contains(key: Key): Boolean = byKey contains key
def get(key: Key): Option[Versioned[Value]] = byKey get key
}
object Storage {
implicit def version[Key <: Serializable, Value <: Serializable, F[A] <: Traversable[A]]: Version[Storage[Key, Value, F]] =
new Version[Storage[Key, Value, F]] {
override def apply(storage: Storage[Key, Value, F]) = storage.version
}
implicit def read[Key <: Serializable, Value <: Serializable, F[A] <: Traversable[A], S <: Serializable]: State.Read[Key, Value, Storage[Key, Value, F]] =
new State.Read[Key, Value, Storage[Key, Value, F]] {
override def get(key: Key, storage: Storage[Key, Value, F]) = storage get key
override def values(storage: Storage[Key, Value, F]) = Versioned(storage.version, storage.values)
}
}
|
saserr/CQRS
|
src/scala/org/saserr/cqrs/Storage.scala
|
Scala
|
apache-2.0
| 1,971
|
package mesosphere.marathon
import java.util.jar.{Attributes, Manifest}
import scala.Predef._
import scala.util.control.NonFatal
import scala.jdk.CollectionConverters._
import mesosphere.marathon.io.IO
case object BuildInfo {
private val marathonJar = "\\\\bmesosphere\\\\.marathon\\\\.marathon-[0-9.]+".r
val DefaultBuildVersion = SemVer(1, 11, 0, Some("SNAPSHOT"))
/**
* sbt-native-package provides all of the files as individual JARs. By default, `getResourceAsStream` returns the
* first matching file for the first JAR in the class path. Instead, we need to enumerate through all of the
* manifests, and find the one that applies to the Marathon application jar.
*/
private lazy val marathonManifestPath: List[java.net.URL] =
getClass()
.getClassLoader()
.getResources("META-INF/MANIFEST.MF")
.asScala
.iterator
.filter { manifest =>
marathonJar.findFirstMatchIn(manifest.getPath).nonEmpty
}
.toList
lazy val manifest: Option[Manifest] = marathonManifestPath match {
case Nil => None
case List(file) =>
val mf = new Manifest()
IO.using(file.openStream) { f =>
mf.read(f)
Some(mf)
}
case otherwise =>
throw new RuntimeException(s"Multiple marathon JAR manifests returned! ${otherwise}")
}
lazy val attributes: Option[Attributes] = manifest.map(_.getMainAttributes())
def getAttribute(name: String): Option[String] =
attributes.flatMap { attrs =>
try {
Option(attrs.getValue(name))
} catch {
case NonFatal(_) => None
}
}
lazy val name: String = getAttribute("Implementation-Title").getOrElse("unknown")
// IntelliJ has its own manifest.mf that will inject a version that doesn't necessarily match
// our actual version. This can cause Migrations to fail since the version number doesn't correctly match up.
lazy val version: SemVer =
getAttribute("Implementation-Version").filterNot(_ == "0.1-SNAPSHOT").map(SemVer(_)).getOrElse(DefaultBuildVersion)
lazy val scalaVersion: String = getAttribute("Scala-Version").getOrElse("2.x.x")
lazy val buildref: String = getAttribute("Git-Commit").getOrElse("unknown")
override val toString: String = {
"name: %s, version: %s, scalaVersion: %s, buildref: %s" format (
name, version, scalaVersion, buildref
)
}
}
|
mesosphere/marathon
|
src/main/scala/mesosphere/marathon/BuildInfo.scala
|
Scala
|
apache-2.0
| 2,372
|
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.filters.csp
import akka.util.ByteString
import javax.inject.Inject
import play.api.libs.streams.Accumulator
import play.api.mvc.request.RequestAttrKey
import play.api.mvc.EssentialAction
import play.api.mvc.RequestHeader
import play.api.mvc.Result
/**
* A result processor that applies a CSPResult to a play request pipeline -- either an ActionBuilder or a Filter.
*/
trait CSPResultProcessor {
def apply(next: EssentialAction, request: RequestHeader): Accumulator[ByteString, Result]
}
object CSPResultProcessor {
def apply(processor: CSPProcessor): CSPResultProcessor = new DefaultCSPResultProcessor(processor)
}
/**
* This trait is used by CSPActionBuilder and CSPFilter to apply the CSPResult to a
* Play HTTP result as headers.
*
* Appends as `play.api.http.HeaderNames.CONTENT_SECURITY_POLICY` or
* `play.api.http.HeaderNames.CONTENT_SECURITY_POLICY_REPORT_ONLY`,
* depending on config.reportOnly.
*
* If `cspResult.nonceHeader` is defined then
* `play.api.http.HeaderNames.X_CONTENT_SECURITY_POLICY_NONCE_HEADER`
* is set as an additional header.
*/
class DefaultCSPResultProcessor @Inject() (cspProcessor: CSPProcessor) extends CSPResultProcessor {
def apply(next: EssentialAction, request: RequestHeader): Accumulator[ByteString, Result] = {
cspProcessor
.process(request)
.map { cspResult =>
val maybeNonceRequest = cspResult.nonce
.map { nonce =>
request.addAttr(RequestAttrKey.CSPNonce, nonce)
}
.getOrElse(request)
next(maybeNonceRequest).map { result =>
result.withHeaders(generateHeaders(cspResult): _*)
}(play.core.Execution.trampoline)
}
.getOrElse {
next(request)
}
}
protected def generateHeaders(cspResult: CSPResult): Seq[(String, String)] = {
import play.api.http.HeaderNames._
val headerName = if (cspResult.reportOnly) {
CONTENT_SECURITY_POLICY_REPORT_ONLY
} else {
CONTENT_SECURITY_POLICY
}
var cspHeader = collection.immutable.Seq(headerName -> cspResult.directives)
cspResult.nonce match {
case Some(nonce) if cspResult.nonceHeader =>
cspHeader :+ (X_CONTENT_SECURITY_POLICY_NONCE_HEADER -> nonce)
case _ =>
cspHeader
}
}
}
|
benmccann/playframework
|
web/play-filters-helpers/src/main/scala/play/filters/csp/CSPResultProcessor.scala
|
Scala
|
apache-2.0
| 2,350
|
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.model
import com.waz.api.IConversation.Access.{CODE, INVITE}
import com.waz.api.IConversation.AccessRole._
import com.waz.api.IConversation.{Access, AccessRole}
import com.waz.api.{IConversation, Verification}
import com.waz.db.Col._
import com.waz.db.{Dao, Dao2}
import com.waz.log.LogShow.SafeToLog
import com.waz.model
import com.waz.model.ConversationData.{ConversationType, Link, UnreadCount}
import com.waz.service.SearchKey
import com.waz.utils.wrappers.{DB, DBCursor}
import com.waz.utils.{JsonDecoder, JsonEncoder, _}
import org.json.JSONArray
import scala.concurrent.duration._
case class ConversationData(override val id: ConvId = ConvId(),
remoteId: RConvId = RConvId(),
name: Option[Name] = None,
creator: UserId = UserId(),
convType: ConversationType = ConversationType.Group,
team: Option[TeamId] = None,
lastEventTime: RemoteInstant = RemoteInstant.Epoch,
isActive: Boolean = true,
lastRead: RemoteInstant = RemoteInstant.Epoch,
muted: MuteSet = MuteSet.AllAllowed,
muteTime: RemoteInstant = RemoteInstant.Epoch,
archived: Boolean = false,
archiveTime: RemoteInstant = RemoteInstant.Epoch,
cleared: Option[RemoteInstant] = None,
generatedName: Name = Name.Empty,
searchKey: Option[SearchKey] = None,
unreadCount: UnreadCount = UnreadCount(0, 0, 0, 0, 0),
failedCount: Int = 0,
missedCallMessage: Option[MessageId] = None,
incomingKnockMessage: Option[MessageId] = None,
hidden: Boolean = false,
verified: Verification = Verification.UNKNOWN,
localEphemeral: Option[FiniteDuration] = None,
globalEphemeral: Option[FiniteDuration] = None,
access: Set[Access] = Set.empty,
accessRole: Option[AccessRole] = None, //option for migration purposes only - at some point we do a fetch and from that point it will always be defined
link: Option[Link] = None,
receiptMode: Option[Int] = None
) extends Identifiable[ConvId] {
def displayName = if (convType == ConversationType.Group) name.getOrElse(generatedName) else generatedName
def withFreshSearchKey = copy(searchKey = freshSearchKey)
def savedOrFreshSearchKey = searchKey.orElse(freshSearchKey)
def freshSearchKey = if (convType == ConversationType.Group) name.map(SearchKey(_)) else None
lazy val completelyCleared = cleared.exists(!_.isBefore(lastEventTime))
val isManaged = team.map(_ => false) //can be returned to parameter list when we need it.
lazy val ephemeralExpiration: Option[EphemeralDuration] = (globalEphemeral, localEphemeral) match {
case (Some(d), _) => Some(ConvExpiry(d)) //global ephemeral takes precedence over local
case (_, Some(d)) => Some(MessageExpiry(d))
case _ => None
}
def withLastRead(time: RemoteInstant) = copy(lastRead = lastRead max time)
def withCleared(time: RemoteInstant) = copy(cleared = Some(cleared.fold(time)(_ max time)))
def isTeamOnly: Boolean = accessRole match {
case Some(TEAM) if access.contains(Access.INVITE) => true
case _ => false
}
def isGuestRoom: Boolean = accessRole match {
case Some(NON_ACTIVATED) if access == Set(Access.INVITE, Access.CODE) => true
case _ => false
}
def isWirelessLegacy: Boolean = !(isTeamOnly || isGuestRoom)
def isUserAllowed(userData: UserData): Boolean =
!(userData.isGuest(team) && isTeamOnly)
def isMemberFromTeamGuest(teamId: Option[TeamId]): Boolean = team.isDefined && teamId != team
def isAllAllowed: Boolean = muted.isAllAllowed
def isAllMuted: Boolean = muted.isAllMuted
def onlyMentionsAllowed: Boolean = muted.onlyMentionsAllowed
def readReceiptsAllowed: Boolean = team.isDefined && receiptMode.exists(_ > 0)
}
/**
* Conversation user binding.
*/
case class ConversationMemberData(userId: UserId, convId: ConvId) extends Identifiable[(UserId, ConvId)] {
override val id: (UserId, ConvId) = (userId, convId)
}
object ConversationData {
val Empty = ConversationData(ConvId(), RConvId(), None, UserId(), IConversation.Type.UNKNOWN)
case class UnreadCount(normal: Int, call: Int, ping: Int, mentions: Int, quotes: Int) extends SafeToLog {
def total = normal + call + ping + mentions + quotes
def messages = normal + ping
}
// total (!) ordering for use in ordered sets; handwritten (instead of e.g. derived from tuples) to avoid allocations
implicit val ConversationDataOrdering: Ordering[ConversationData] = new Ordering[ConversationData] {
override def compare(b: ConversationData, a: ConversationData): Int =
if (a.id == b.id) 0
else {
val c = a.lastEventTime.compareTo(b.lastEventTime)
if (c != 0) c
else a.id.str.compareTo(b.id.str)
}
}
type ConversationType = IConversation.Type
object ConversationType {
val Unknown = IConversation.Type.UNKNOWN
val Group = IConversation.Type.GROUP
val OneToOne = IConversation.Type.ONE_TO_ONE
val Self = IConversation.Type.SELF
val WaitForConnection = IConversation.Type.WAIT_FOR_CONNECTION
val Incoming = IConversation.Type.INCOMING_CONNECTION
def apply(id: Int) = IConversation.Type.withId(id)
def isOneToOne(tp: IConversation.Type) = tp == OneToOne || tp == WaitForConnection || tp == Incoming
def values = Set(Unknown, Group, OneToOne, Self, WaitForConnection, Incoming)
}
def getAccessAndRoleForGroupConv(teamOnly: Boolean, teamId: Option[TeamId]): (Set[Access], AccessRole) = {
teamId match {
case Some(_) if teamOnly => (Set(INVITE), TEAM)
case Some(_) => (Set(INVITE, CODE), NON_ACTIVATED)
case _ => (Set(INVITE), ACTIVATED)
}
}
case class Link(url: String)
implicit object ConversationDataDao extends Dao[ConversationData, ConvId] {
val Id = id[ConvId]('_id, "PRIMARY KEY").apply(_.id)
val RemoteId = id[RConvId]('remote_id).apply(_.remoteId)
val Name = opt(text[model.Name]('name, _.str, model.Name(_)))(_.name.filterNot(_.isEmpty))
val Creator = id[UserId]('creator).apply(_.creator)
val ConvType = int[ConversationType]('conv_type, _.id, ConversationType(_))(_.convType)
val Team = opt(id[TeamId]('team))(_.team)
val IsManaged = opt(bool('is_managed))(_.isManaged)
val LastEventTime = remoteTimestamp('last_event_time)(_.lastEventTime)
val IsActive = bool('is_active)(_.isActive)
val LastRead = remoteTimestamp('last_read)(_.lastRead)
val MutedStatus = int('muted_status)(_.muted.toInt)
val MutedTime = remoteTimestamp('mute_time)(_.muteTime)
val Archived = bool('archived)(_.archived)
val ArchivedTime = remoteTimestamp('archive_time)(_.archiveTime)
val Cleared = opt(remoteTimestamp('cleared))(_.cleared)
val GeneratedName = text[model.Name]('generated_name, _.str, model.Name(_))(_.generatedName)
val SKey = opt(text[SearchKey]('search_key, _.asciiRepresentation, SearchKey.unsafeRestore))(_.searchKey)
val UnreadCount = int('unread_count)(_.unreadCount.normal)
val UnreadCallCount = int('unread_call_count)(_.unreadCount.call)
val UnreadPingCount = int('unread_ping_count)(_.unreadCount.ping)
val FailedCount = int('unsent_count)(_.failedCount)
val Hidden = bool('hidden)(_.hidden)
val MissedCall = opt(id[MessageId]('missed_call))(_.missedCallMessage)
val IncomingKnock = opt(id[MessageId]('incoming_knock))(_.incomingKnockMessage)
val Verified = text[Verification]('verified, _.name, Verification.valueOf)(_.verified)
val LocalEphemeral = opt(finiteDuration('ephemeral))(_.localEphemeral)
val GlobalEphemeral = opt(finiteDuration('global_ephemeral))(_.globalEphemeral)
val Access = set[Access]('access, JsonEncoder.encodeAccess(_).toString(), v => JsonDecoder.array[Access](new JSONArray(v), (arr: JSONArray, i: Int) => IConversation.Access.valueOf(arr.getString(i).toUpperCase)).toSet)(_.access)
val AccessRole = opt(text[IConversation.AccessRole]('access_role, JsonEncoder.encodeAccessRole, v => IConversation.AccessRole.valueOf(v.toUpperCase)))(_.accessRole)
val Link = opt(text[Link]('link, _.url, v => ConversationData.Link(v)))(_.link)
val UnreadMentionsCount = int('unread_mentions_count)(_.unreadCount.mentions)
val UnreadQuotesCount = int('unread_quote_count)(_.unreadCount.quotes)
val ReceiptMode = opt(int('receipt_mode))(_.receiptMode)
override val idCol = Id
override val table = Table(
"Conversations",
Id,
RemoteId,
Name,
Creator,
ConvType,
Team,
IsManaged,
LastEventTime,
IsActive,
LastRead,
MutedStatus,
MutedTime,
Archived,
ArchivedTime,
Cleared,
GeneratedName,
SKey,
UnreadCount,
FailedCount,
Hidden,
MissedCall,
IncomingKnock,
Verified,
LocalEphemeral,
GlobalEphemeral,
UnreadCallCount,
UnreadPingCount,
Access,
AccessRole,
Link,
UnreadMentionsCount,
UnreadQuotesCount,
ReceiptMode
)
override def apply(implicit cursor: DBCursor): ConversationData =
ConversationData(
Id,
RemoteId,
Name,
Creator,
ConvType,
Team,
LastEventTime,
IsActive,
LastRead,
MuteSet(MutedStatus),
MutedTime,
Archived,
ArchivedTime,
Cleared,
GeneratedName,
SKey,
ConversationData.UnreadCount(UnreadCount, UnreadCallCount, UnreadPingCount, UnreadMentionsCount, UnreadQuotesCount),
FailedCount,
MissedCall,
IncomingKnock,
Hidden,
Verified,
LocalEphemeral,
GlobalEphemeral,
Access,
AccessRole,
Link,
ReceiptMode
)
import com.waz.model.ConversationData.ConversationType._
override def onCreate(db: DB): Unit = {
super.onCreate(db)
db.execSQL(s"CREATE INDEX IF NOT EXISTS Conversation_search_key on Conversations (${SKey.name})")
}
def establishedConversations(implicit db: DB) = iterating(db.rawQuery(
s"""SELECT *
| FROM ${table.name}
| WHERE (${ConvType.name} = ${ConvType(ConversationType.OneToOne)} OR ${ConvType.name} = ${ConvType(ConversationType.Group)})
| AND ${IsActive.name} = ${IsActive(true)}
| AND ${Hidden.name} = 0
""".stripMargin, null))
def allConversations(implicit db: DB) =
db.rawQuery(s"SELECT *, ${ConvType.name} = ${Self.id} as is_self, ${ConvType.name} = ${Incoming.id} as is_incoming, ${Archived.name} = 1 as is_archived FROM ${table.name} WHERE ${Hidden.name} = 0 ORDER BY is_self DESC, is_archived ASC, is_incoming DESC, ${LastEventTime.name} DESC", null)
import ConversationMemberData.{ConversationMemberDataDao => CM}
import UserData.{UserDataDao => U}
def search(prefix: SearchKey, self: UserId, handleOnly: Boolean, teamId: Option[TeamId])(implicit db: DB) = {
val select =
s"""SELECT c.* ${if (teamId.isDefined) ", COUNT(*)" else ""}
| FROM ${table.name} c
| JOIN ${CM.table.name} cm ON cm.${CM.ConvId.name} = c.${Id.name}
| JOIN ${U.table.name} u ON cm.${CM.UserId.name} = u.${U.Id.name}
| WHERE c.${ConvType.name} = ${ConvType(ConversationType.Group)}
| AND c.${Hidden.name} = ${Hidden(false)}
| AND u.${U.Id.name} != '${U.Id(self)}'
| AND (c.${Cleared.name} IS NULL OR c.${Cleared.name} < c.${LastEventTime.name} OR c.${IsActive.name} = ${IsActive(true)})""".stripMargin
val handleCondition =
if (handleOnly){
s"""AND u.${U.Handle.name} LIKE '${prefix.asciiRepresentation}%'""".stripMargin
} else {
s"""AND ( c.${SKey.name} LIKE '${SKey(Some(prefix))}%'
| OR c.${SKey.name} LIKE '% ${SKey(Some(prefix))}%'
| OR u.${U.SKey.name} LIKE '${U.SKey(prefix)}%'
| OR u.${U.SKey.name} LIKE '% ${U.SKey(prefix)}%'
| OR u.${U.Handle.name} LIKE '%${prefix.asciiRepresentation}%')""".stripMargin
}
val teamCondition = teamId.map(_ =>
s"""AND c.${Team.name} = ${Team(teamId)}
| GROUP BY cm.${CM.ConvId.name}
| HAVING COUNT(*) > 2
""".stripMargin)
list(db.rawQuery(select + " " + handleCondition + teamCondition.map(qu => s" $qu").getOrElse(""), null))
}
def findByTeams(teams: Set[TeamId])(implicit db: DB) = iterating(findInSet(Team, teams.map(Option(_))))
def findByRemoteId(remoteId: RConvId)(implicit db: DB) = iterating(find(RemoteId, remoteId))
def findByRemoteIds(remoteIds: Set[RConvId])(implicit db: DB) = iterating(findInSet(RemoteId, remoteIds))
}
}
object ConversationMemberData {
implicit object ConversationMemberDataDao extends Dao2[ConversationMemberData, UserId, ConvId] {
val UserId = id[UserId]('user_id).apply(_.userId)
val ConvId = id[ConvId]('conv_id).apply(_.convId)
override val idCol = (UserId, ConvId)
override val table = Table("ConversationMembers", UserId, ConvId)
override def apply(implicit cursor: DBCursor): ConversationMemberData = ConversationMemberData(UserId, ConvId)
override def onCreate(db: DB): Unit = {
super.onCreate(db)
db.execSQL(s"CREATE INDEX IF NOT EXISTS ConversationMembers_conv on ConversationMembers (${ConvId.name})")
db.execSQL(s"CREATE INDEX IF NOT EXISTS ConversationMembers_userid on ConversationMembers (${UserId.name})")
}
def findForConv(convId: ConvId)(implicit db: DB) = iterating(find(ConvId, convId))
def findForConvs(convs: Set[ConvId])(implicit db: DB) = iterating(findInSet(ConvId, convs))
def findForUser(userId: UserId)(implicit db: DB) = iterating(find(UserId, userId))
def findForUsers(users: Set[UserId])(implicit db: DB) = iterating(findInSet(UserId, users))
}
}
|
wireapp/wire-android-sync-engine
|
zmessaging/src/main/scala/com/waz/model/ConversationData.scala
|
Scala
|
gpl-3.0
| 16,090
|
package scala
import org.scalatest.{FlatSpec, Matchers}
import sai.bytecode.Clazz
class BasicBlockTest extends FlatSpec with Matchers {
val clazz = new Clazz("misc.BasicBlockExamples")
"A ControlFlowGraph" should "have 1 basic block for a method without control flow instructions" in {
val method = clazz.lookupMethod("simple").get
val singleBlock :: Nil = method.controlFlowGraph
singleBlock.predecessors shouldBe empty
singleBlock.lineRange shouldBe (7 to 11)
singleBlock.successors shouldBe empty
}
it should "have 3 basic blocks for a method with an if-statement" in {
val method = clazz.lookupMethod("ifStatement").get
val entryBlock :: ifBlock :: exitBlock :: Nil = method.controlFlowGraph
entryBlock.predecessors shouldBe empty
entryBlock.lineRange shouldBe (65 to 67)
entryBlock.successors shouldEqual List(ifBlock, exitBlock)
ifBlock.predecessors shouldEqual List(entryBlock)
ifBlock.lineRange shouldBe (68 to 68)
ifBlock.successors shouldEqual List(exitBlock)
exitBlock.predecessors shouldEqual List(entryBlock, ifBlock)
exitBlock.lineRange shouldBe (70 to 71)
exitBlock.successors shouldBe empty
}
it should "have 4 basic blocks for a method with an if-else-statement" in {
val method = clazz.lookupMethod("ifElseStatement").get
val entryBlock :: ifBlock :: elseBlock :: exitBlock :: Nil = method.controlFlowGraph
entryBlock.predecessors shouldBe empty
entryBlock.lineRange shouldBe (73 to 75)
entryBlock.successors shouldEqual List(ifBlock, elseBlock)
ifBlock.predecessors shouldEqual List(entryBlock)
ifBlock.lineRange shouldBe (76 to 76)
ifBlock.successors shouldEqual List(exitBlock)
elseBlock.predecessors shouldEqual List(entryBlock)
elseBlock.lineRange shouldBe (78 to 78)
elseBlock.successors shouldEqual List(exitBlock)
exitBlock.predecessors shouldEqual List(ifBlock, elseBlock)
exitBlock.lineRange shouldBe (80 to 81)
exitBlock.successors shouldBe empty
}
it should "have 4 basic blocks for a method with a while-loop" in {
val method = clazz.lookupMethod("whileLoop").get
val entryBlock :: whileCondition :: whileBody :: exitBlock :: Nil = method.controlFlowGraph
entryBlock.predecessors shouldBe empty
entryBlock.lineRange shouldBe (83 to 85)
entryBlock.successors shouldEqual List(whileCondition)
whileCondition.predecessors shouldEqual List(entryBlock, whileBody)
whileCondition.lineRange shouldBe (86 to 86)
whileCondition.successors shouldEqual List(whileBody, exitBlock)
whileBody.predecessors shouldEqual List(whileCondition)
whileBody.lineRange shouldBe (87 to 87)
whileBody.successors shouldEqual List(whileCondition)
exitBlock.predecessors shouldEqual List(whileCondition)
exitBlock.lineRange shouldBe (89 to 90)
exitBlock.successors shouldBe empty
}
it should "have 6 basic blocks for a method with an if-else-statement nested within a while loop" in {
val method = clazz.lookupMethod("whileIfElse").get
val entryBlock :: whileCondition :: whileBegin :: ifBlock :: elseBlock :: exitBlock :: Nil = method.controlFlowGraph
entryBlock.predecessors shouldBe empty
entryBlock.lineRange shouldBe (13 to 16)
entryBlock.successors shouldEqual List(whileCondition)
whileCondition.predecessors shouldBe List(entryBlock, ifBlock, elseBlock)
whileCondition.lineRange shouldBe (17 to 17)
whileCondition.successors shouldBe List(whileBegin, exitBlock)
whileBegin.predecessors shouldEqual List(whileCondition)
whileBegin.lineRange shouldBe (18 to 19)
whileBegin.successors shouldEqual List(ifBlock, elseBlock)
ifBlock.predecessors shouldEqual List(whileBegin)
ifBlock.lineRange shouldBe (20 to 20)
ifBlock.successors shouldEqual List(whileCondition)
elseBlock.predecessors shouldEqual List(whileBegin)
elseBlock.lineRange shouldBe (22 to 22)
elseBlock.successors shouldEqual List(whileCondition)
exitBlock.predecessors shouldEqual List(whileCondition)
exitBlock.lineRange shouldBe (25 to 26)
exitBlock.successors shouldBe empty
}
it should "have 4 basic blocks for a method with a try-finally construct" in {
val method = clazz.lookupMethod("tryFinally").get
val entryTryBlock :: finallyBlock :: finallyThrowBlock :: exitBlock :: Nil = method.controlFlowGraph
entryTryBlock.predecessors shouldBe empty
entryTryBlock.successors shouldEqual List(finallyBlock, finallyThrowBlock)
finallyBlock.predecessors shouldEqual List(entryTryBlock)
finallyBlock.successors shouldEqual List(exitBlock)
finallyThrowBlock.predecessors shouldEqual List(entryTryBlock)
finallyThrowBlock.successors shouldEqual List(exitBlock)
exitBlock.predecessors shouldEqual List(finallyBlock, finallyThrowBlock)
exitBlock.successors shouldBe empty
}
it should "have 7 basic blocks for a method with a try-catch-finally construct" in {
val method = clazz.lookupMethod("tryCatchFinally").get
val entryTryBlock :: tryFinallyBlock :: catchBlock :: catchFinallyBlock :: finallyThrowBlock :: preExitBlock :: exitBlock :: Nil = method.controlFlowGraph
entryTryBlock.predecessors shouldBe empty
entryTryBlock.successors shouldEqual List(tryFinallyBlock, catchBlock, finallyThrowBlock)
tryFinallyBlock.predecessors shouldEqual List(entryTryBlock)
tryFinallyBlock.successors shouldEqual List(preExitBlock)
catchBlock.predecessors shouldEqual List(entryTryBlock)
catchBlock.successors shouldEqual List(catchFinallyBlock, finallyThrowBlock)
catchFinallyBlock.predecessors shouldEqual List(catchBlock)
catchFinallyBlock.successors shouldEqual List(preExitBlock)
finallyThrowBlock.predecessors shouldEqual List(entryTryBlock, catchBlock)
finallyThrowBlock.successors shouldEqual List(exitBlock)
preExitBlock.predecessors shouldEqual List(tryFinallyBlock, catchFinallyBlock)
preExitBlock.successors shouldEqual List(exitBlock)
exitBlock.predecessors shouldEqual List(finallyThrowBlock, preExitBlock)
exitBlock.successors shouldBe empty
}
it should "have 9 basic blocks for a method with a try-catch-catch-finally construct" in {
val method = clazz.lookupMethod("tryCatchCatchFinally").get
val entryTryBlock :: tryFinallyBlock :: catch1Block :: catch1FinallyBlock :: catch2Block :: catch2FinallyBlock :: finallyThrowBlock :: preExitBlock :: exitBlock :: Nil = method.controlFlowGraph
entryTryBlock.predecessors shouldBe empty
entryTryBlock.successors shouldEqual List(tryFinallyBlock, catch1Block, catch2Block, finallyThrowBlock)
tryFinallyBlock.predecessors shouldEqual List(entryTryBlock)
tryFinallyBlock.successors shouldEqual List(preExitBlock)
catch1Block.predecessors shouldEqual List(entryTryBlock)
catch1Block.successors shouldEqual List(catch1FinallyBlock, finallyThrowBlock)
catch1FinallyBlock.predecessors shouldEqual List(catch1Block)
catch1FinallyBlock.successors shouldEqual List(preExitBlock)
catch2Block.predecessors shouldEqual List(entryTryBlock)
catch2Block.successors shouldEqual List(catch2FinallyBlock, finallyThrowBlock)
catch2FinallyBlock.predecessors shouldEqual List(catch2Block)
catch2FinallyBlock.successors shouldEqual List(preExitBlock)
finallyThrowBlock.predecessors shouldEqual List(entryTryBlock, catch1Block, catch2Block)
finallyThrowBlock.successors shouldEqual List(exitBlock)
preExitBlock.predecessors shouldEqual List(tryFinallyBlock, catch1FinallyBlock, catch2FinallyBlock)
preExitBlock.successors shouldEqual List(exitBlock)
exitBlock.predecessors shouldEqual List(finallyThrowBlock, preExitBlock)
exitBlock.successors shouldBe empty
}
it should "have 4 basic blocks for a method with a try-catch construct" in {
val method = clazz.lookupMethod("tryCatch").get
val entryTryBlock :: gotoBlock :: catchBlock :: exitBlock :: Nil = method.controlFlowGraph
entryTryBlock.predecessors shouldBe empty
entryTryBlock.successors shouldEqual List(gotoBlock, catchBlock)
gotoBlock.predecessors shouldEqual List(entryTryBlock)
gotoBlock.successors shouldEqual List(exitBlock)
catchBlock.predecessors shouldEqual List(entryTryBlock)
catchBlock.successors shouldEqual List(exitBlock)
exitBlock.predecessors shouldEqual List(gotoBlock, catchBlock)
exitBlock.successors shouldBe empty
}
it should "have 1 basic block for a method without any instructions" in {
val method = clazz.lookupMethod("emptyMethod").get
val singleBlock :: Nil = method.controlFlowGraph
singleBlock.predecessors shouldBe empty
singleBlock.lineRange shouldBe (102 to 103)
singleBlock.successors shouldBe empty
}
it should "have 4 basic block for a method without any instructions" in {
val method = clazz.lookupMethod("multipleReturns").get
val entryBlock :: ifBlock :: afterIfBlock :: exitBlock :: Nil = method.controlFlowGraph
entryBlock.predecessors shouldBe empty
entryBlock.lineRange shouldBe (105 to 107)
entryBlock.successors shouldEqual List(ifBlock, afterIfBlock)
ifBlock.predecessors shouldEqual List(entryBlock)
ifBlock.lineRange shouldBe (108 to 109)
ifBlock.successors shouldEqual List(exitBlock)
afterIfBlock.predecessors shouldEqual List(entryBlock)
afterIfBlock.lineRange shouldBe (111 to 112)
afterIfBlock.successors shouldEqual List(exitBlock)
exitBlock.predecessors shouldEqual List(ifBlock, afterIfBlock)
exitBlock.lineRange shouldBe (112 to 112)
exitBlock.successors shouldBe empty
}
it should "have 3 basic blocks for a method with a do-while loop" in {
val method = clazz.lookupMethod("doWhile").get
val entryBlock :: doWhileLoop :: exitBlock :: Nil = method.controlFlowGraph
entryBlock.predecessors shouldBe empty
entryBlock.lineRange shouldBe (148 to 149)
entryBlock.successors shouldEqual List(doWhileLoop)
doWhileLoop.predecessors shouldEqual List(entryBlock, doWhileLoop)
doWhileLoop.lineRange shouldBe (151 to 152)
doWhileLoop.successors shouldEqual List(doWhileLoop, exitBlock)
exitBlock.predecessors shouldEqual List(doWhileLoop)
exitBlock.lineRange shouldBe (153 to 154)
exitBlock.successors shouldBe empty
}
it should "have 4 basic blocks for a method with a foor loop" in {
val method = clazz.lookupMethod("foorLoop").get
val q = method.controlFlowGraph
val entryBlock :: loopCondition :: loopBody :: exitBlock :: Nil = method.controlFlowGraph
entryBlock.predecessors shouldBe empty
entryBlock.lineRange shouldBe (156 to 158)
entryBlock.successors shouldEqual List(loopCondition)
loopCondition.predecessors shouldEqual List(entryBlock, loopBody)
loopCondition.lineRange shouldBe (158 to 158)
loopCondition.successors shouldEqual List(loopBody, exitBlock)
loopBody.predecessors shouldBe List(loopCondition)
loopBody.lineRange shouldBe (159 to 158)
loopBody.successors shouldEqual List(loopCondition)
exitBlock.predecessors shouldEqual List(loopCondition)
exitBlock.lineRange shouldBe (161 to 162)
exitBlock.successors shouldBe empty
}
it should "have 5 basic blocks for a method with a switch-case-case-default statement" in {
val method = clazz.lookupMethod("switchCaseCaseDefault").get
val entryBlock :: case1Block :: case2Block :: defaultBlock :: exitBlock :: Nil = method.controlFlowGraph
entryBlock.predecessors shouldBe empty
entryBlock.lineRange shouldBe (164 to 167)
entryBlock.successors shouldEqual List(case1Block, case2Block, defaultBlock)
case1Block.predecessors shouldEqual List(entryBlock)
case1Block.lineRange shouldBe (169 to 170)
case1Block.successors shouldEqual List(exitBlock)
case2Block.predecessors shouldEqual List(entryBlock)
case2Block.lineRange shouldBe (172 to 173)
case2Block.successors shouldEqual List(exitBlock)
defaultBlock.predecessors shouldEqual List(entryBlock)
defaultBlock.lineRange shouldBe (175 to 175)
defaultBlock.successors shouldEqual List(exitBlock)
exitBlock.predecessors shouldEqual List(case1Block, case2Block, defaultBlock)
exitBlock.lineRange shouldBe (178 to 179)
exitBlock.successors shouldBe empty
}
it should "define 6 basic blocks for function 'isPalindrome'" in {
val palindromeChecker = new Clazz("misc.PalindromeChecker")
val isPalindrome = palindromeChecker.lookupMethod("isPalindrome").get
val entryBlock :: whileCheck :: ifCheck :: thenBlock :: elseBlock :: exitBlock :: Nil = isPalindrome.controlFlowGraph
entryBlock.predecessors shouldBe empty
entryBlock.successors shouldEqual List(whileCheck)
whileCheck.predecessors shouldEqual List(entryBlock, thenBlock)
whileCheck.successors shouldEqual List(ifCheck, exitBlock)
ifCheck.predecessors shouldEqual List(whileCheck)
ifCheck.successors shouldEqual List(thenBlock, elseBlock)
thenBlock.predecessors shouldEqual List(ifCheck)
thenBlock.successors shouldEqual List(whileCheck)
elseBlock.predecessors shouldEqual List(ifCheck)
elseBlock.successors shouldEqual List(exitBlock)
exitBlock.predecessors shouldEqual List(whileCheck, elseBlock)
exitBlock.successors shouldBe empty
}
}
|
oliverhaase/sai
|
src/test/scala/BasicBlockTest.scala
|
Scala
|
mit
| 13,324
|
import scala.collection.mutable.ArrayBuffer
class Person(val name:String) extends Serializable {
val friends = new ArrayBuffer[Person]
}
object Main extends App {
val persons = Array(new Person("Amy"), new Person("Bob"), new Person("Cathy"))
persons(0).friends += persons(1)
persons(1).friends += persons(2)
persons(2).friends += persons(0)
import java.io._
val out = new ObjectOutputStream(new FileOutputStream("foo.dat"))
out.writeObject(persons)
out.close()
val in = new ObjectInputStream(new FileInputStream("foo.dat"))
val savedPersons = in.readObject.asInstanceOf[Array[Person]]
println(savedPersons(0).friends(0) == savedPersons(1))
println(savedPersons(1).friends(0) == savedPersons(2))
println(savedPersons(2).friends(0) == savedPersons(0))
}
|
Gerhut/scala-for-the-impatient
|
Chapter9/10.scala
|
Scala
|
unlicense
| 782
|
package strd.dynaschema
import java.io.FileOutputStream
import java.util.jar.{JarEntry, JarOutputStream, Attributes, Manifest}
import org.apache.commons.lang.StringUtils
/**
*
* User: light
* Date: 16/04/14
* Time: 16:00
*/
case class ResEntry(path :String, data : Array[Byte])
class DynaSchemaBuilder {
var jarExtra : Seq[ResEntry] = Nil
def appendJarResource(path: String, bytes: Array[Byte]) = {
jarExtra = jarExtra :+ ResEntry(path, bytes)
this
}
var classes : Seq[Class[_]] = Nil
var protoClasses : Seq[Class[_]] = Nil
var includePackages : Seq[String] = Nil
var excludePackages : Seq[String] = Nil
def appendClass( cl : Class[_] ) = {
classes :+= cl
this
}
def appendProtoClass( cl : Class[_]) = {
protoClasses :+= cl
this
}
def includePackage( pack : String ) = {
includePackages :+= pack
this
}
def excludePackage( pack : String ) = {
excludePackages :+= pack
this
}
def assembly() = {
new ClassesProcessor(this).build()
}
}
class ClassesProcessor( db : DynaSchemaBuilder ) {
def build() :ClassesWithDependencies = {
val classes = db.classes.toSet[Class[_]].flatMap { cl =>
ClassWorksHelper.getAllDependencies(cl)
}
val protoClasses = db.protoClasses.flatMap { cl =>
cl +: ClassWorksHelper.asmFetchAllInnerClasses(cl)
}.toSet
val allClasses = (classes ++ protoClasses)
.filter { cl =>
db.includePackages.isEmpty || db.includePackages.exists(pack => cl.getName.startsWith(pack))
}.filter { cl =>
db.excludePackages.isEmpty || !db.excludePackages.exists(pack => cl.getName.startsWith(pack))
}
ClassesWithDependencies( allClasses, db )
}
}
case class ClassesWithDependencies( allClasses : Set[Class[_]], db : DynaSchemaBuilder ) {
def createJar( schemaType : String,
schemaVersion : String ) = {
val jarPath = "/tmp/"+schemaType + "_" + schemaVersion +".jar"
val time = System.currentTimeMillis()
val manifest = new Manifest()
manifest.getMainAttributes.put(Attributes.Name.MANIFEST_VERSION, "1.0")
val target = new JarOutputStream(new FileOutputStream(jarPath), manifest)
db.jarExtra.foreach{ e=>
val entry = new JarEntry(e.path)
entry.setTime( time )
target.putNextEntry( entry )
target.write( e.data )
target.closeEntry()
}
allClasses.foreach( cl => {
val path = StringUtils.replace(cl.getName, ".","/") + ".class"
val stream = cl.getClassLoader.getResourceAsStream(path)
if (stream == null) {
throw new RuntimeException("Can not find class: " + path)
}
val entry = new JarEntry(path)
entry.setTime( time )
target.putNextEntry( entry )
val buffer = new Array[Byte](1024)
while (stream.available() > 0) {
val count = stream.read(buffer)
target.write(buffer, 0, count)
}
target.closeEntry()
stream.close()
} )
target.close()
SchemaInJar(schemaType, schemaVersion, jarPath)
}
}
case class SchemaInJar(schemaType : String, schemaVersion : String, jarPath : String) {
def publish( uploadCommand : String ) {
val cmd = StringUtils.replace( StringUtils.replace(uploadCommand, "${SRC_FILE}", jarPath),
"${SCHEMA_TYPE}", schemaType)
val cargs = cmd.split("\\\\s").toSeq
println(cargs)
import scala.sys.process._
val result = cargs ! ProcessLogger( x=> println(x) )
println("Rsync Exit code: " + result)
if (result != 0) {
throw new RuntimeException("Error while uploading")
}
}
}
|
onerinvestments/strd
|
strd-cluster/src/main/scala/strd/dynaschema/DynaSchemaBuilder.scala
|
Scala
|
apache-2.0
| 3,644
|
package drt.client.components
import diode.data.Pot
import drt.client.components.FlightComponents._
import drt.client.services.JSDateConversions.SDate
import drt.client.services.RootModel
import drt.shared.CrunchApi.MillisSinceEpoch
import drt.shared.Terminals.Terminal
import drt.shared._
import drt.shared.api.Arrival
import japgolly.scalajs.react.vdom.TagOf
import japgolly.scalajs.react.vdom.html_<^.{<, _}
import org.scalajs.dom.raw.HTMLElement
import scala.util.Try
object BigSummaryBoxes {
def flightPcpInPeriod(f: ApiFlightWithSplits, start: SDateLike, end: SDateLike): Boolean = {
val bt: Long = bestTime(f)
start.millisSinceEpoch <= bt && bt <= end.millisSinceEpoch
}
def bestTime(f: ApiFlightWithSplits): MillisSinceEpoch = {
val bestTime = {
val flightDt = SDate(f.apiFlight.Scheduled)
f.apiFlight.PcpTime.getOrElse(flightDt.millisSinceEpoch)
}
bestTime
}
def flightsInPeriod(flights: Seq[ApiFlightWithSplits], now: SDateLike, nowPlus3Hours: SDateLike): Seq[ApiFlightWithSplits] =
flights.filter(flightPcpInPeriod(_, now, nowPlus3Hours))
def countFlightsInPeriod(rootModel: RootModel, now: SDateLike, nowPlus3Hours: SDateLike): Pot[Int] =
rootModel.portStatePot.map(portState => flightsInPeriod(portState.flights.values.toList, now, nowPlus3Hours).length)
def countPaxInPeriod(rootModel: RootModel, now: SDateLike, nowPlus3Hours: SDateLike): Pot[Int] = {
rootModel.portStatePot.map(portState => {
val flights: Seq[ApiFlightWithSplits] = flightsInPeriod(portState.flights.values.toList, now, nowPlus3Hours)
sumActPax(flights)
})
}
val bestFlightSplits: ApiFlightWithSplits => Set[(PaxTypeAndQueue, Double)] = {
case ApiFlightWithSplits(_, s, _) if s.isEmpty => Set()
case fws@ApiFlightWithSplits(flight, splits, _) =>
if (splits.exists { case Splits(_, _, _, t) => t == PaxNumbers }) {
splits.find { case Splits(_, _, _, t) => t == PaxNumbers } match {
case None => Set()
case Some(apiSplits) => apiSplits.splits.map {
s => (PaxTypeAndQueue(s.passengerType, s.queueType), s.paxCount)
}
}
} else {
splits.find { case Splits(_, _, _, t) => t == Percentage } match {
case None => Set()
case Some(apiSplits) => apiSplits.splits.map {
s => (PaxTypeAndQueue(s.passengerType, s.queueType), s.paxCount / 100 * fws.pcpPaxEstimate)
}
}
}
}
def aggregateSplits(flights: Iterable[ApiFlightWithSplits]): Map[PaxTypeAndQueue, Int] = {
val newSplits = Map[PaxTypeAndQueue, Double]()
val allSplits: Iterable[(PaxTypeAndQueue, Double)] = flights.flatMap(bestFlightSplits)
val splitsExcludingTransfers = allSplits.filter(_._1.queueType != Queues.Transfer)
// //todo import cats - it makes short, efficient work of this sort of aggregation.
val aggSplits: Map[PaxTypeAndQueue, Double] = splitsExcludingTransfers.foldLeft(newSplits) {
case (agg, (k, v)) =>
val g = agg.getOrElse(k, 0d)
agg.updated(k, v + g)
}
val aggSplitsInts: Map[PaxTypeAndQueue, Int] = aggSplits.mapValues(Math.round(_).toInt)
aggSplitsInts
}
def flightsAtTerminal(flightsPcp: Seq[ApiFlightWithSplits], ourTerminal: Terminal): Seq[ApiFlightWithSplits] = {
flightsPcp.filter(f => f.apiFlight.Terminal == ourTerminal)
}
def sumActPax(flights: Seq[ApiFlightWithSplits]): Int = flights.flatMap(_.apiFlight.ActPax).sum
def sumBestPax(bestFlightSplitPax: ApiFlightWithSplits => Double)(flights: Seq[ApiFlightWithSplits]): Double = flights.map(bestFlightSplitPax).sum
case class Props(flightCount: Int, actPaxCount: Int, bestPaxCount: Int, aggSplits: Map[PaxTypeAndQueue, Int], paxQueueOrder: Seq[PaxTypeAndQueue])
def GraphComponent(splitTotal: Int, queuePax: Map[PaxTypeAndQueue, Int], paxQueueOrder: Iterable[PaxTypeAndQueue]): TagOf[HTMLElement] = {
val value = Try {
val orderedSplitCounts: Iterable[(PaxTypeAndQueue, Int)] = paxQueueOrder.map(ptq => ptq -> queuePax.getOrElse(ptq, 0))
SplitsGraph.splitsGraphComponentColoured(SplitsGraph.Props(splitTotal, orderedSplitCounts))
}
val g: Try[TagOf[HTMLElement]] = value recoverWith {
case f => Try(<.div(f.toString()))
}
g.get
}
}
|
UKHomeOffice/drt-scalajs-spa-exploration
|
client/src/main/scala/drt/client/components/BigSummaryBoxes.scala
|
Scala
|
apache-2.0
| 4,296
|
package com.twitter.finagle.context
/**
* A context contains a number of let-delimited bindings. Bindings
* are indexed by type Key[A] in a typesafe manner. Later bindings
* shadow earlier ones.
*
* Note that the implementation of context maintains all bindings
* in a linked list; context lookup requires a linear search.
*/
trait Context {
type Key[A]
/**
* Represents an opaque type that consists of a key-value pair
* which can be used to set multiple keys in the context.
*/
case class KeyValuePair[T](key: Key[T], value: T)
/**
* Retrieve the current definition of a key.
*
* @throws NoSuchElementException when the key is undefined
* in the current request-local context.
*/
@throws[NoSuchElementException]("If the key does not exist")
def apply[A](key: Key[A]): A = get(key) match {
case Some(v) => v
case None => throw new NoSuchElementException(s"Key not found: ${key.toString}")
}
/**
* Retrieve the current definition of a key, but only
* if it is defined in the current request-local context.
*/
def get[A](key: Key[A]): Option[A]
/**
* Retrieve the current definition of a key if it is defined.
* If it is not defined, `orElse` is evaluated and returned.
*/
def getOrElse[A](key: Key[A], orElse: () => A): A = get(key) match {
case Some(a) => a
case None => orElse()
}
/**
* Tells whether `key` is defined in the current request-local
* context.
*/
def contains[A](key: Key[A]): Boolean = get(key).isDefined
/**
* Bind `value` to `key` in the scope of `fn`.
*/
def let[A, R](key: Key[A], value: A)(fn: => R): R
/**
* Bind two keys and values in the scope of `fn`.
*/
def let[A, B, R](key1: Key[A], value1: A, key2: Key[B], value2: B)(fn: => R): R
/**
* Bind multiple key-value pairs. Keys later in the collection take
* precedent over keys earlier in the collection.
*/
def let[R](keys: Iterable[KeyValuePair[_]])(fn: => R): R
/**
* Unbind the passed-in key, in the scope of `fn`.
*/
def letClear[R](key: Key[_])(fn: => R): R
/**
* Unbind the passed-in keys, in the scope of `fn`.
*/
def letClear[R](keys: Iterable[Key[_]])(fn: => R): R
/**
* Clears all bindings in the scope of `fn`.
*
* For example:
* {{{
* context.let(Key1, "value1") {
* context.let(Key2, "something else") {
* context.letClearAll {
* // context.contains(Key1) == false
* // context.contains(Key2) == false
* }
* // context(Key1) == "value1"
* // context(Key2) == "something else"
* }
* }
* }}}
*/
def letClearAll[R](fn: => R): R
}
|
twitter/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/context/Context.scala
|
Scala
|
apache-2.0
| 2,689
|
package com.bgfurfeature.util
import java.text.SimpleDateFormat
import java.util.{Calendar, Date}
/**
* Created by Administrator on 2016/1/8.
*/
object TimeUtil {
def getDay: String = {
val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
val date: String = sdf.format(new Date)
date
}
def getCurrentHour: Int = {
val calendar = Calendar.getInstance
calendar.setTime(new Date)
calendar.get(Calendar.HOUR_OF_DAY)
}
def getTimeStamp:Long = {
System.currentTimeMillis()
}
def getMinute(ts:String): String = {
val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd-HH_mm")
val date: String = sdf.format(new Date(ts.toLong))
date
}
}
|
bgfurfeature/AI
|
src/main/scala/com/bgfurfeature/util/TimeUtil.scala
|
Scala
|
apache-2.0
| 711
|
package viper.api
import viper.silver.ast._
import scala.collection.JavaConverters._
import scala.collection.JavaConverters._
import viper.silver.verifier.{Failure, Success, AbortedExceptionally, VerificationError}
import java.util.List
import java.util.Properties
import java.util.SortedMap
import scala.math.BigInt.int2bigInt
import viper.silver.ast.SeqAppend
import java.nio.file.Path
import viper.silver.parser.PLocalVarDecl
import scala.collection.mutable.WrappedArray
class Prog {
val domains = new java.util.ArrayList[Domain]()
val fields = new java.util.ArrayList[Field]()
val functions = new java.util.ArrayList[Function]()
val predicates = new java.util.ArrayList[Predicate]()
val methods = new java.util.ArrayList[Method]()
}
|
sccblom/vercors
|
viper/silver/src/main/scala/viper/api/Prog.scala
|
Scala
|
mpl-2.0
| 750
|
package org.redsimulator
import org.scalatest._
import org.scalatest.mock.MockitoSugar
trait UnitSpec
extends WordSpecLike
with Matchers
with OptionValues
with Inspectors
with MockitoSugar {
}
|
simonecarriero/red-simulator
|
src/test/scala/org/redsimulator/UnitSpec.scala
|
Scala
|
mit
| 205
|
package com.owlandrews.nescala.ui
import javax.sound.sampled._
import scala.util.Try
object Audio {
private val sampleRate = 44100f
private val format = new AudioFormat(
sampleRate,
16, //bit
2, //channel
true, //signed
false //little endian
)
private val samplesPerFrame = Math.ceil((sampleRate * 2) / 60F).toInt
private val buffer = new scala.collection.mutable.ArrayBuffer[Byte](samplesPerFrame)
private val output = Try {
val sdl = AudioSystem.getSourceDataLine(format)
sdl.open(format, samplesPerFrame * 4 /*frame*/ * 2 /*ch*/ * 2 /*bytes/sample*/)
sdl
}
def start() = output.foreach(x => x.start())
def stop() = output.foreach(x => x.stop())
def receive(sample:Int): Unit = {
val outputVolume = sample * 0.799987793F
val outputSample = if (outputVolume < -32768) -32768
else if (outputVolume > 32767) 32767
else outputVolume
//left ch//left ch
0
val lch = outputSample.toInt
val o1 = (lch & 0xff).toByte
val o2 = ((lch >> 8) & 0xff).toByte
//right ch
val rch = outputSample.toInt
val o3 = (rch & 0xff).toByte
val o4 = ((rch >> 8) & 0xff).toByte
buffer ++= Array(o1, o2, o3, o4)
output.foreach{sdl =>
if(sdl.available() >= buffer.length) {
sdl.write(buffer.toArray, 0, buffer.size)
}
buffer.clear()
}
}
}
|
hywelandrews/nescala
|
src/main/scala/com/owlandrews/nescala/ui/Audio.scala
|
Scala
|
gpl-2.0
| 1,416
|
package libs
import scala.collection.mutable.Map
class WorkerStore() {
val store = Map[String, Any]()
def get[T](key: String): T = {
store(key).asInstanceOf[T]
}
def put(key: String, value: Any) = {
store += (key -> value)
}
}
|
amplab/SparkNet
|
src/main/scala/libs/WorkerStore.scala
|
Scala
|
mit
| 249
|
package pregnaware.user
import java.time.LocalDate
import pregnaware.user.entities.{WrappedFriend, WrappedUser}
import pregnaware.utils.ExecutionWrapper
import scala.concurrent.Future
trait UserPersistence extends ExecutionWrapper {
/** Add a new user */
def addUser(displayName: String, email: String, passwordHash: String): Future[WrappedUser]
/** Modify an existing user */
def updateUser(userId: Int, displayName: String, email: String, passwordHash: String): Future[WrappedUser]
/** Remove an existing user */
def deleteUser(userId : Int): Future[Unit]
/** Get a user (plus friends) by e-mail */
def getUser(email: String): Future[Option[WrappedUser]]
/** Get a user (plus friends) by user id */
def getUser(userId: Int): Future[Option[WrappedUser]]
/** Makes a new friend connection between the user and the friend (or confirms an existing one) */
def addFriend(userId: Int, friendId: Int) : Future[WrappedFriend]
/** Delete a friend linkage */
def deleteFriend(userId: Int, friendId: Int) : Future[Unit]
/** Prevents a friendship */
def blockFriend(userId: Int, friendId: Int) : Future[Unit]
/** Sets a due date */
def setDueDate(userId: Int, dueDate: LocalDate) : Future[LocalDate]
/** Removes a due date */
def deleteDueDate(userId: Int) : Future[Unit]
/** Retrieves user data (treated as an unprocessed blob of JSON */
def getUserState(userId: Int) : Future[String]
/** Stores user data (treated as an unprocessed blob of JSON */
def setUserState(userId: Int, data: String) : Future[Unit]
}
|
jds106/pregnaware
|
service/src/main/scala/pregnaware/user/UserPersistence.scala
|
Scala
|
mit
| 1,564
|
package org.bitcoins.core.script.crypto
import org.bitcoins.core.script.ScriptOperationFactory
import org.bitcoins.core.script.constant.ScriptOperation
/**
* Created by chris on 1/6/16.
* Represents an operation where a cryptographic function is applied
*/
sealed trait CryptoOperation extends ScriptOperation
/** Represents an operation where ECDSA signatures are evaluated. */
sealed trait CryptoSignatureEvaluation extends CryptoOperation
/** The input is hashed using RIPEMD-160. */
case object OP_RIPEMD160 extends CryptoOperation {
override def opCode = 166
}
/** The input is hashed using SHA-1. */
case object OP_SHA1 extends CryptoOperation {
override def opCode = 167
}
/** The input is hashed using SHA-256. */
case object OP_SHA256 extends CryptoOperation {
override def opCode = 168
}
/** The input is hashed twice: first with SHA-256 and then with RIPEMD-160. */
case object OP_HASH160 extends CryptoOperation {
override def opCode = 169
}
/** The input is hashed two times with SHA-256. */
case object OP_HASH256 extends CryptoOperation {
override def opCode = 170
}
/**
* All of the signature checking words will only match signatures to
* the data after the most recently-executed OP_CODESEPARATOR.
*/
case object OP_CODESEPARATOR extends CryptoOperation {
override def opCode = 171
}
/**
* The entire transaction's outputs, inputs, and script
* (from the most recently-executed OP_CODESEPARATOR to the end) are hashed.
* The signature used by OP_CHECKSIG must be a valid signature for this hash and public key.
* If it is, 1 is returned, 0 otherwise.
*/
case object OP_CHECKSIG extends CryptoSignatureEvaluation {
override def opCode = 172
}
/** Same as OP_CHECKSIG, but OP_VERIFY is executed afterward. */
case object OP_CHECKSIGVERIFY extends CryptoSignatureEvaluation {
override def opCode = 173
}
/**
* Compares the first signature against each public key until it finds an ECDSA match.
* Starting with the subsequent public key, it compares the second signature against each remaining public key
* until it finds an ECDSA match.
* The process is repeated until all signatures have been checked or not enough public keys remain to produce a successful result.
* All signatures need to match a public key.
* Because public keys are not checked again if they fail any signature comparison,
* signatures must be placed in the scriptSig using the same order as their corresponding public keys
* were placed in the scriptPubKey or redeemScript. If all signatures are valid, 1 is returned, 0 otherwise.
* Due to a bug, one extra unused value is removed from the stack.
*/
case object OP_CHECKMULTISIG extends CryptoSignatureEvaluation {
override def opCode = 174
}
/** Same as OP_CHECKMULTISIG, but OP_VERIFY is executed afterward. */
case object OP_CHECKMULTISIGVERIFY extends CryptoSignatureEvaluation {
override def opCode = 175
}
object CryptoOperation extends ScriptOperationFactory[CryptoOperation] {
override def operations = Seq(OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY, OP_CHECKSIG, OP_CHECKSIGVERIFY,
OP_CODESEPARATOR, OP_HASH160, OP_HASH256, OP_RIPEMD160, OP_SHA1, OP_SHA256)
}
|
Christewart/bitcoin-s-core
|
src/main/scala/org/bitcoins/core/script/crypto/CryptoOperations.scala
|
Scala
|
mit
| 3,165
|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.shp
import org.locationtech.geomesa.convert.EvaluationContext
import org.locationtech.geomesa.convert.shp.ShapefileFunctionFactory.{ShapefileAttribute, ShapefileFeatureId}
import org.locationtech.geomesa.convert2.transforms.{TransformerFunction, TransformerFunctionFactory}
class ShapefileFunctionFactory extends TransformerFunctionFactory {
override def functions: Seq[TransformerFunction] = Seq(shpAttribute, shpFid)
private val shpAttribute = new ShapefileAttribute
private val shpFid = new ShapefileFeatureId
}
object ShapefileFunctionFactory {
val InputSchemaKey = "geomesa.shp.attributes"
val InputValuesKey = "geomesa.shp.values"
class ShapefileAttribute extends TransformerFunction {
private var i = -1
private var values: Array[Any] = _
override val names = Seq("shp")
override def getInstance: ShapefileAttribute = new ShapefileAttribute()
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
if (i == -1) {
val names = ctx.get(ctx.indexOf(InputSchemaKey)).asInstanceOf[Array[String]]
i = names.indexOf(args(0).asInstanceOf[String]) + 1 // 0 is fid
if (i == 0) {
throw new IllegalArgumentException(s"Attribute '${args(0)}' does not exist in shapefile: ${names.mkString(", ")}")
}
values = ctx.get(ctx.indexOf(InputValuesKey)).asInstanceOf[Array[Any]]
}
values(i)
}
}
class ShapefileFeatureId extends TransformerFunction {
private var values: Array[Any] = _
override val names = Seq("shpFeatureId")
override def getInstance: ShapefileFeatureId = new ShapefileFeatureId()
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
if (values == null) {
values = ctx.get(ctx.indexOf(InputValuesKey)).asInstanceOf[Array[Any]]
}
values(0)
}
}
}
|
ddseapy/geomesa
|
geomesa-convert/geomesa-convert-shp/src/main/scala/org/locationtech/geomesa/convert/shp/ShapefileFunctionFactory.scala
|
Scala
|
apache-2.0
| 2,372
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.partial
import org.apache.spark.annotation.Experimental
@Experimental
class PartialResult[R](initialVal: R, isFinal: Boolean) {
private var finalValue: Option[R] = if (isFinal) Some(initialVal) else None
private var failure: Option[Exception] = None
private var completionHandler: Option[R => Unit] = None
private var failureHandler: Option[Exception => Unit] = None
def initialValue: R = initialVal
def isInitialValueFinal: Boolean = isFinal
/**
* Blocking method to wait for and return the final value.
* 阻止方法等待并返回最终值
*/
def getFinalValue(): R = synchronized {
while (finalValue.isEmpty && failure.isEmpty) {
this.wait()
}
if (finalValue.isDefined) {
return finalValue.get
} else {
throw failure.get
}
}
/**
* Set a handler to be called when this PartialResult completes. Only one completion handler
* is supported per PartialResult.
* 设置此PartialResult完成时调用的处理程序,每个PartialResult只支持一个完成处理程序
*/
def onComplete(handler: R => Unit): PartialResult[R] = synchronized {
if (completionHandler.isDefined) {
throw new UnsupportedOperationException("onComplete cannot be called twice")
}
completionHandler = Some(handler)
if (finalValue.isDefined) {
// We already have a final value, so let's call the handler
//我们已经有一个最终的值,所以我们来调用处理程序
handler(finalValue.get)
}
return this
}
/**
* Set a handler to be called if this PartialResult's job fails. Only one failure handler
* is supported per PartialResult.
* 如果此部分结果作业失败,请设置要调用的处理程序,每个部分结果只支持一个故障处理程序
*/
def onFail(handler: Exception => Unit) {
synchronized {
if (failureHandler.isDefined) {
throw new UnsupportedOperationException("onFail cannot be called twice")
}
failureHandler = Some(handler)
if (failure.isDefined) {
// We already have a failure, so let's call the handler
//我们已经失败了,所以让我们来调用处理程序
handler(failure.get)
}
}
}
/**
* Transform this PartialResult into a PartialResult of type T.
* 将此部分结果转换为类型T的部分结果
*/
def map[T](f: R => T) : PartialResult[T] = {
new PartialResult[T](f(initialVal), isFinal) {
override def getFinalValue() : T = synchronized {
f(PartialResult.this.getFinalValue())
}
override def onComplete(handler: T => Unit): PartialResult[T] = synchronized {
PartialResult.this.onComplete(handler.compose(f)).map(f)
}
override def onFail(handler: Exception => Unit) {
synchronized {
PartialResult.this.onFail(handler)
}
}
override def toString : String = synchronized {
PartialResult.this.getFinalValueInternal() match {
case Some(value) => "(final: " + f(value) + ")"
case None => "(partial: " + initialValue + ")"
}
}
def getFinalValueInternal(): Option[T] = PartialResult.this.getFinalValueInternal().map(f)
}
}
private[spark] def setFinalValue(value: R) {
synchronized {
if (finalValue.isDefined) {
throw new UnsupportedOperationException("setFinalValue called twice on a PartialResult")
}
finalValue = Some(value)
// Call the completion handler if it was set
completionHandler.foreach(h => h(value))
// Notify any threads that may be calling getFinalValue()
this.notifyAll()
}
}
private def getFinalValueInternal() = finalValue
private[spark] def setFailure(exception: Exception) {
synchronized {
if (failure.isDefined) {
throw new UnsupportedOperationException("setFailure called twice on a PartialResult")
}
failure = Some(exception)
// Call the failure handler if it was set
//调用失败处理程序(如果已设置)
failureHandler.foreach(h => h(exception))
// Notify any threads that may be calling getFinalValue()
//通知可能调用getFinalValue()的任何线程
this.notifyAll()
}
}
override def toString: String = synchronized {
finalValue match {
case Some(value) => "(final: " + value + ")"
case None => "(partial: " + initialValue + ")"
}
}
}
|
tophua/spark1.52
|
core/src/main/scala/org/apache/spark/partial/PartialResult.scala
|
Scala
|
apache-2.0
| 5,278
|
package com.mentatlabs.nsa
package scalac
package dsl
package experimental
trait ScalacYRangeposDSL
extends ScalacExperimentalDSL {
object Yrangepos {
val unary_- = options.ScalacYRangepos
}
}
|
mentat-labs/sbt-nsa
|
nsa-dsl/src/main/scala/com/mentatlabs/nsa/scalac/dsl/experimental/private/ScalacYRangeposDSL.scala
|
Scala
|
bsd-3-clause
| 207
|
/*
*
* Copyright (c) 2017 Radicalbit
*
* This file is part of flink-JPMML
*
* flink-JPMML is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* flink-JPMML is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with flink-JPMML. If not, see <http://www.gnu.org/licenses/>.
*
*/
import sbt.Keys._
import sbt._
object Commons {
val settings: Seq[Def.Setting[_]] = Seq(
organization := "io.radicalbit",
scalaVersion in ThisBuild := "2.11.12",
resolvers in ThisBuild ++= Seq(
"Radicalbit Releases" at "https://tools.radicalbit.io/artifactory/public-release/"
)
)
}
|
maocorte/flink-jpmml
|
project/Commons.scala
|
Scala
|
agpl-3.0
| 1,064
|
package jp.kenichi.lrcon
package server
import java.nio.file.Paths
object ServerConf { // TODO: make it a class
val workDir = Paths.get("work") // TODO: read a conf file
}
|
ken1ma/lrcon
|
server/src/main/scala/jp.kenichi.lrcon/server/ServerConf.scala
|
Scala
|
apache-2.0
| 175
|
package com.datastax.spark.connector
package object testkit {
final val DefaultHost = "127.0.0.1"
val dataSeq = Seq (
Seq("1first", "1round", "1words"),
Seq("2second", "2round", "2words"),
Seq("3third", "3round", "3words"),
Seq("4fourth", "4round", "4words")
)
val data = dataSeq.head
}
|
shashwat7/spark-cassandra-connector
|
spark-cassandra-connector/src/test/scala/com/datastax/spark/connector/testkit/package.scala
|
Scala
|
apache-2.0
| 316
|
package ee.cone.c4actor
import ee.cone.c4actor.Types.{ClName, SrcId}
import ee.cone.c4assemble.Types.{Index, Values}
import ee.cone.c4assemble.{AssembledKey, Getter, IndexUtil, Single, Types}
import ee.cone.c4di.Types.ComponentFactory
import ee.cone.c4di.{c4, provide}
@c4("RichDataCompApp") final class SwitchOrigKeyFactoryHolder(proposition: Option[OrigKeyFactoryProposition], byPKKeyFactory: KeyFactory)
extends OrigKeyFactoryFinalHolder(proposition.fold(byPKKeyFactory)(_.value))
@c4("RichDataCompApp") final case class DefaultKeyFactory(composes: IndexUtil)(
srcIdAlias: String = "SrcId",
srcIdClass: ClName = classOf[SrcId].getName
) extends KeyFactory {
def rawKey(className: String): AssembledKey =
composes.joinKey(was = false, srcIdAlias, srcIdClass, className)
}
|
conecenter/c4proto
|
base_lib/src/main/scala/ee/cone/c4actor/KeyFactoryImpl.scala
|
Scala
|
apache-2.0
| 789
|
package bryghts.benchmarks
import play.api.libs.json.Json
class PlayJson_Benchmarking extends Benchmarking
{
protected val parseOther: String => Any = js => Json.parse(js)
protected val otherTitle: String = "PlayJson"
}
|
marcesquerra/KissJson
|
src/test/scala/bryghts/benchmarks/PlayJson_Benchmarking.scala
|
Scala
|
mit
| 228
|
package com.example.api
import com.example.api.models.exceptions.{ ModelFormatException, ModelNotFoundException }
import org.scalatest.concurrent.ScalaFutures
import org.scalatestplus.play.{ OneAppPerSuite, PlaySpec }
import play.api.data.validation.ValidationError
import play.api.http.MimeTypes.JSON
import play.api.i18n.{ I18nSupport, Messages, MessagesApi }
import play.api.libs.json.{ JsPath, KeyPathNode }
import play.api.test.FakeRequest
import play.api.test.Helpers._
class ErrorHandlerSpec extends PlaySpec with OneAppPerSuite with ScalaFutures with I18nSupport {
implicit val messagesApi = app.injector.instanceOf[MessagesApi]
val errorHandler = app.injector.instanceOf[ErrorHandler]
"404 errors" should {
"return json" in {
val fakeRequest = FakeRequest(POST, "/v2/not-found")
val res = errorHandler.onServerError(fakeRequest, ModelNotFoundException)
(contentAsJson(res) \\ "error" \\ "message").as[String] mustBe Messages("exceptions.404")
(contentAsJson(res) \\ "error" \\ "statusCode").as[String] mustBe "404"
contentType(res) mustBe Some(JSON)
}
}
"422 errors" should {
"return json" in {
val fakeRequest = FakeRequest(POST, "/v2/model-format")
val ex = ModelFormatException(Seq((JsPath(List(KeyPathNode("modelName/attributeName"))), Seq(ValidationError("Invalid format")))))
val res = errorHandler.onServerError(fakeRequest, ex)
(contentAsJson(res) \\ "error" \\ "message").as[String] mustBe Messages("model_name.attribute_name.invalid")
(contentAsJson(res) \\ "error" \\ "statusCode").as[String] mustBe "422"
contentType(res) mustBe Some(JSON)
}
}
"500 errors" should {
"return json" in {
val fakeRequest = FakeRequest(GET, "/v2/internal-server-error")
val ex = new RuntimeException("Some exception message")
val res = errorHandler.onServerError(fakeRequest, ex)
(contentAsJson(res) \\ "error" \\ "message").as[String] must equal(Messages("exceptions.500"))
(contentAsJson(res) \\ "error" \\ "statusCode").as[String] must equal("500")
contentType(res) must equal(Some(JSON))
}
}
}
|
jtescher/play-api
|
test/com/example/api/ErrorHandlerSpec.scala
|
Scala
|
mit
| 2,140
|
package test
import dotty.tools.dotc._
import core._, ast._
import Trees._
import Contexts.Context
object parsePackage extends ParserTest {
import ast.untpd._
var nodes = 0
val transformer = new UntypedTreeMap {
override def transform(tree: Tree)(implicit ctx: Context): Tree = {
nodes += 1
tree match {
case Ident(name) =>
Ident(name)
case This(name) =>
This(name)
case TypedSplice(t) =>
TypedSplice(t)
case SymbolLit(str) =>
tree
case InterpolatedString(id, parts, elems) =>
InterpolatedString(id, parts map (transformSub(_)), elems map transform)
case mdef @ ModuleDef(name, impl) =>
ModuleDef(name, transformSub(impl)).withMods(mdef.mods)
case Function(params, body) =>
Function(params map transform, body)
case InfixOp(l, o, r) =>
InfixOp(transform(l), o, transform(r))
case PostfixOp(l, o) =>
PostfixOp(transform(l), o)
case PrefixOp(o, t) =>
PrefixOp(o, transform(t))
case Parens(t) =>
Parens(transform(t))
case Tuple(ts) =>
Tuple(ts map transform)
case WhileDo(cond, body) =>
WhileDo(transform(cond), transform(body))
case DoWhile(body, cond) =>
DoWhile(transform(body), transform(cond))
case ForYield(enums, expr) =>
ForYield(enums map transform, transform(expr))
case ForDo(enums, expr) =>
ForDo(enums map transform, transform(expr))
case GenFrom(pat, expr) =>
GenFrom(transform(pat), transform(expr))
case GenAlias(pat, expr) =>
GenAlias(transform(pat), transform(expr))
case PatDef(mods, pats, tpt, expr) =>
PatDef(mods, pats map transform, transform(tpt), transform(expr))
case ContextBounds(bounds, cxBounds) =>
ContextBounds(transformSub(bounds), cxBounds map transform)
case _ =>
super.transform(tree)
}
}
}
def test() = {
reset()
nodes = 0
val start = System.nanoTime()
parseDir("./src")
parseDir("../scala/src")
val ms1 = (System.nanoTime() - start)/1000000
val buf = parsedTrees map transformer.transform
val ms2 = (System.nanoTime() - start)/1000000
println(s"$parsed files parsed in ${ms1}ms, $nodes nodes transformed in ${ms2-ms1}ms, total trees created = ${Trees.ntrees}")
ctx.reporter.printSummary(ctx)
}
def main(args: Array[String]): Unit = {
// parse("/Users/odersky/workspace/scala/src/compiler/scala/tools/nsc/doc/model/ModelFactoryTypeSupport.scala")
for (i <- 0 until 10) test()
}
}
|
AlexSikia/dotty
|
test/test/parsePackage.scala
|
Scala
|
bsd-3-clause
| 2,688
|
package org.reactivebird
import org.reactivebird.http._
import org.reactivebird.api._
import org.reactivebird.models.ModelFactory
import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
import spray.caching.Cache
import spray.http.HttpResponse
import spray.caching.LruCache
import java.util.concurrent.TimeUnit.DAYS
import scala.concurrent.duration.Duration
class TwitterApi(
consumer: Consumer,
token: Token)(
implicit val system: ActorSystem)
extends HttpService
with StatusFiltering
with Retrying
with Caching
with Authorizing
with ModelFactory
with Timeline
with Tweets
with Search
with FriendsAndFollowers
with Users
with DirectMessages
with Favorites {
private val config = ConfigFactory.load()
protected val cacheResult = config.getBoolean("reactivebird.cache-result")
protected val retryCount = config.getInt("reactivebird.retry-count")
private lazy val timeToLive = config.getDuration("reactivebird.time-to-live", DAYS)
implicit val exec = system.dispatcher
protected val cache: Cache[HttpResponse] = LruCache(timeToLive = Duration(timeToLive, DAYS))
override protected def authorizer = Authorizer(consumer, token)
}
|
benoitguigal/reactive-bird
|
src/main/scala/org/reactivebird/TwitterApi.scala
|
Scala
|
mit
| 1,200
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
package internal
// Left blank, because we've got nothing specific for Scala.js at this point
private[execution] abstract class FutureUtilsForPlatform
|
monixio/monix
|
monix-execution/js/src/main/scala/monix/execution/internal/FutureUtilsForPlatform.scala
|
Scala
|
apache-2.0
| 851
|
package ml.combust.mleap.bundle.ops.sklearn
import ml.combust.bundle.BundleContext
import ml.combust.bundle.dsl._
import ml.combust.bundle.op.OpModel
import ml.combust.mleap.bundle.ops.MleapOp
import ml.combust.mleap.core.feature.BinarizerModel
import ml.combust.mleap.runtime.MleapContext
import ml.combust.mleap.runtime.transformer.feature.Binarizer
import ml.combust.mleap.runtime.types.BundleTypeConverters._
/**
* This is needed because the sklearn Binarizer outputs a input_shapes list
* instead of just a single input_shape as Spark.
*/
class BinarizerOp extends MleapOp[Binarizer, BinarizerModel] {
override val Model: OpModel[MleapContext, BinarizerModel] = new OpModel[MleapContext, BinarizerModel] {
override val klazz: Class[BinarizerModel] = classOf[BinarizerModel]
override def opName: String = "sklearn_binarizer"
override def store(model: Model, obj: BinarizerModel)
(implicit context: BundleContext[MleapContext]): Model = {
model.withValue("threshold", Value.double(obj.threshold)).
withValue("input_shapes", Value.dataShapeList(Seq(obj.inputShape).map(mleapToBundleShape)))
}
override def load(model: Model)
(implicit context: BundleContext[MleapContext]): BinarizerModel = {
val inputShapes = model.value("input_shapes").getDataShapeList.map(bundleToMleapShape)
BinarizerModel(model.value("threshold").getDouble, inputShapes(0))
}
}
override def model(node: Binarizer): BinarizerModel = node.model
}
|
combust/mleap
|
mleap-runtime/src/main/scala/ml/combust/mleap/bundle/ops/sklearn/BinarizerOp.scala
|
Scala
|
apache-2.0
| 1,533
|
import sbt._
import sbt.Keys._
import java.io.PrintWriter
import java.io.File
import play.Play.autoImport._
import play.sbt.PlayImport._
import sbtbuildinfo._
import sbtbuildinfo.BuildInfoKeys._
import play.sbt.routes.RoutesKeys._
import com.typesafe.sbt.packager.Keys._
import com.typesafe.sbt.SbtScalariform
import com.typesafe.sbt.SbtScalariform.ScalariformKeys
import scalariform.formatter.preferences._
import com.typesafe.sbt.SbtNativePackager._
import com.typesafe.sbt.packager.docker._
object ApplicationBuild extends Build {
scalaVersion := "2.11.7"
val appName = "playground"
val branch = ""; // "git rev-parse --abbrev-ref HEAD".!!.trim
val commit = ""; // "git rev-parse --short HEAD".!!.trim
val buildTime = (new java.text.SimpleDateFormat("yyyyMMdd-HHmmss")).format(new java.util.Date())
val major = 1
val minor = 1
val patch = 0
val appVersion = s"$major.$minor.$patch-$commit"
println()
println(s"App Name => ${appName}")
println(s"App Version => ${appVersion}")
println(s"Git Branch => ${branch}")
println(s"Git Commit => ${commit}")
println(s"Scala Version => ${scalaVersion}")
println()
val scalaBuildOptions = Seq("-unchecked", "-feature", "-language:reflectiveCalls", "-deprecation",
"-language:implicitConversions", "-language:postfixOps", "-language:dynamics", "-language:higherKinds",
"-language:existentials", "-language:experimental.macros", "-Xmax-classfile-name", "140")
val appDependencies = Seq( ws,
// "org.elasticsearch" % "elasticsearch" % "0.90.1",
"commons-io" % "commons-io" % "2.4",
"org.webjars" %% "webjars-play" % "2.3.0" withSources() ,
"org.webjars" % "angularjs" % "1.2.23",
"org.webjars" % "bootstrap" % "3.2.0",
"org.webjars" % "d3js" % "3.4.11",
"me.lightspeed7" % "mongoFS" % "0.8.1"
)
val playground = Project("playground", file("."))
.enablePlugins(play.PlayScala)
.enablePlugins(play.PlayScala, BuildInfoPlugin)
.settings(scalacOptions ++= scalaBuildOptions)
.settings(
version := appVersion,
libraryDependencies ++= appDependencies
)
.settings(
// BuildInfo
buildInfoPackage := "io.timeli.ingest",
buildInfoKeys := Seq[BuildInfoKey](name, version, scalaVersion, sbtVersion) :+ BuildInfoKey.action("buildTime") {
System.currentTimeMillis
}
)
.settings(
maintainer := "David Buschman", // setting a maintainer which is used for all packaging types
dockerExposedPorts in Docker := Seq(9000, 9443), // exposing the play ports
dockerBaseImage := "play_java_mongo_db/latest",
dockerRepository := Some("docker.transzap.com:2375/play_java_mongo_db")
)
.settings(
resolvers += "MongoFS Interim Maven Repo" at "https://github.com/dbuschman7/mvn-repo/raw/master"
)
println(s"Deploy this with: docker run -p 10000:9000 ${appName}:${appVersion}")
}
|
dbuschman7/collection-of-things
|
playground/project/Build.scala
|
Scala
|
apache-2.0
| 2,966
|
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.views.layouts
import org.scalatest.{Matchers, WordSpec}
import play.api.test.Helpers.{contentAsString, defaultAwaitTimeout}
import play.twirl.api.Html
import uk.gov.hmrc.play.test.NoStartedPlayApp
import uk.gov.hmrc.play.views.html.layouts.Footer
import uk.gov.hmrc.play.views.layouts.test.TestConfigs._
class FooterSpec extends WordSpec with Matchers with NoStartedPlayApp {
"footer" should {
val footer = new Footer(testAssetsConfig)
"be renderable without a started Play application" in {
thereShouldBeNoStartedPlayApp()
val rendered = contentAsString(footer(
analyticsToken = None,
analyticsHost = "",
ssoUrl = None,
scriptElem = Some(Html("footer was rendered")),
gaCalls = None))
rendered should include("footer was rendered")
}
"remove the query string by default from the page data item" in {
val rendered = contentAsString(footer(
analyticsToken = Some("TESTTOKEN"),
analyticsHost = "localhost",
ssoUrl = Some("localhost"),
scriptElem = None,
gaCalls = None))
rendered should include("ga('set', 'page', location.pathname);")
}
"allow the query string by exception in the page data item" in {
val rendered = contentAsString(footer(
analyticsToken = Some("TESTTOKEN"),
analyticsHost = "localhost",
ssoUrl = Some("localhost"),
scriptElem = None,
allowQueryStringInAnalytics = true,
gaCalls = None))
rendered should not include "ga('set', 'page', location.pathname);"
}
}
}
|
nicf82/play-ui
|
src/test/scala/uk/gov/hmrc/play/views/layouts/FooterSpec.scala
|
Scala
|
apache-2.0
| 2,223
|
package io.seldon.spark.rdd
import java.util.zip.GZIPOutputStream
import org.apache.spark.rdd.RDD
import java.io._
object FileUtils {
import DataSourceMode._
def toSparkResource(location:String, mode:DataSourceMode): String = {
mode match {
case LOCAL => return location.replace("local:/","")
case S3 => return location
}
}
def toOutputResource(location:String, mode: DataSourceMode): String = {
mode match {
case LOCAL => return location.replace("local:/","")
case S3 => return location.replace("s3n://", "")
}
}
def outputModelToFile(model: RDD[String],outputFilesLocation:String, outputType:DataSourceMode,filename:String) {
outputType match {
case LOCAL => outputModelToLocalFile(model.collect(),outputFilesLocation,filename)
case S3 => outputModelToS3File(model.collect(), outputFilesLocation, filename)
}
}
def outputModelToFile(lines: Array[String],outputFilesLocation:String, outputType:DataSourceMode,filename:String) {
outputType match {
case LOCAL => outputModelToLocalFile(lines,outputFilesLocation,filename)
case S3 => outputModelToS3File(lines, outputFilesLocation, filename)
}
}
def printToFile(f: java.io.File)(op: java.io.PrintWriter => Unit) {
val p = new java.io.PrintWriter(f)
try { op(p) } finally { p.close() }
}
def outputModelToLocalFile(lines: Array[String], outputFilesLocation: String, filename : String) = {
new File(outputFilesLocation).mkdirs()
val userFile = new File(outputFilesLocation+"/"+filename);
userFile.createNewFile()
printToFile(userFile){
p => lines.foreach {
s => {
p.println(s)
}
}
}
}
def outputModelToS3File(lines: Array[String], outputFilesLocation: String, filename : String) = {
import org.jets3t.service.S3Service
import org.jets3t.service.impl.rest.httpclient.RestS3Service
import org.jets3t.service.model.{S3Object, S3Bucket}
import org.jets3t.service.security.AWSCredentials
val service: S3Service = new RestS3Service(new AWSCredentials(System.getenv("AWS_ACCESS_KEY_ID"), System.getenv("AWS_SECRET_ACCESS_KEY")))
val bucketString = outputFilesLocation.split("/")(0)
val bucket = service.getBucket(bucketString)
val s3Folder = outputFilesLocation.replace(bucketString+"/","")
val outBuf = new StringBuffer()
lines.foreach(u => {
outBuf.append(u)
outBuf.append("\\n")
})
val obj = new S3Object(s3Folder+"/"+filename, outBuf.toString())
}
def gzip(path: String):File = {
val buf = new Array[Byte](1024)
val src = new File(path)
val dst = new File(path ++ ".gz")
try {
val in = new BufferedInputStream(new FileInputStream(src))
try {
val out = new GZIPOutputStream(new FileOutputStream(dst))
try {
var n = in.read(buf)
while (n >= 0) {
out.write(buf, 0, n)
n = in.read(buf)
}
}
finally {
out.flush
out.close()
in.close()
}
} catch {
case _:FileNotFoundException =>
System.err.printf("Permission Denied: %s", path ++ ".gz")
case _:SecurityException =>
System.err.printf("Permission Denied: %s", path ++ ".gz")
}
} catch {
case _: FileNotFoundException =>
System.err.printf("File Not Found: %s", path)
case _: SecurityException =>
System.err.printf("Permission Denied: %s", path)
}
return dst;
}
}
|
curtiszimmerman/seldon-server
|
offline-jobs/spark/src/main/scala/io/seldon/spark/rdd/FileUtils.scala
|
Scala
|
apache-2.0
| 3,576
|
package ml.combust.mleap.core.regression
import ml.combust.mleap.core.types.{ScalarType, StructField, TensorType}
import org.scalatest.FunSpec
import org.apache.spark.ml.linalg.Vectors
/**
* Created by hwilkins on 1/21/16.
*/
class LinearRegressionModelSpec extends FunSpec {
val linearRegression = LinearRegressionModel(Vectors.dense(Array(0.5, 0.75, 0.25)), .33)
describe("#apply") {
it("applies the linear regression to a feature vector") {
assert(linearRegression(Vectors.dense(Array(1.0, 0.5, 1.0))) == 1.455)
}
}
describe("input/output schema") {
it("has the right input schema") {
assert(linearRegression.inputSchema.fields == Seq(StructField("features", TensorType.Double(3))))
}
it("has the right output schema") {
assert(linearRegression.outputSchema.fields == Seq(StructField("prediction", ScalarType.Double.nonNullable)))
}
}
}
|
combust/mleap
|
mleap-core/src/test/scala/ml/combust/mleap/core/regression/LinearRegressionModelSpec.scala
|
Scala
|
apache-2.0
| 901
|
package core.stress
import akka.actor.{Address, ActorLogging, Actor}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import core.stress.SimpleClusterListener.IsRemoved
import scala.collection.mutable.ListBuffer
class SimpleClusterListener extends Actor with ActorLogging {
val cluster = Cluster(context.system)
// subscribe to cluster changes, re-subscribe when restart
override def preStart(): Unit = {
//#subscribe
cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
classOf[MemberEvent], classOf[UnreachableMember])
//#subscribe
}
override def postStop(): Unit = cluster.unsubscribe(self)
var removedAddresses: ListBuffer[Address] = new ListBuffer[Address]()
def receive = {
case MemberUp(member) =>
log.info("Member is Up: {}", member.address)
removedAddresses = removedAddresses - member.address
case UnreachableMember(member) =>
log.info("Member detected as unreachable: {}", member)
case MemberRemoved(member, previousStatus) =>
log.info("Member is Removed: {} after {}",
member.address, previousStatus)
removedAddresses = removedAddresses :+ member.address
case IsRemoved(address) => sender ! removedAddresses.contains(address)
case _: MemberEvent => // ignore
}
}
object SimpleClusterListener {
case class IsRemoved(address: Address)
}
|
kciesielski/akka-journal-stress
|
src/main/scala/core/stress/SimpleClusterListener.scala
|
Scala
|
apache-2.0
| 1,374
|
/**
* A simple class that can act as a spellchecker
* - could be extended to autocomplete
*
* The basic principle here is that words forming the dictionary are stored in a tree structure,
* with unique values [a-z] at each node. Children of each node are also unique in the set [a-z].
* In this way, words are stored by traversing common keys down the branches until the final leaf
* is found (or not as the case may be).
*
* Finding the word once the tree is constructed is essentially a breadth-first search through each layer
* of the tree
*
* I believe this is called a trie
* It should look something like this:
* s
* / \\
* a o
* /\\ \\
* v f f
* / \\ \\
* e e a
*
* TODO: Make all structures immutable. Specifically, change HashMap if possible.
*/
import collection.mutable.HashMap
// import replutils._
object Spellchecker {
val dictionary = new Node()
def buildDictionary(words: Array[String]) {
words.foreach( fill(dictionary, _) )
}
/**
* Called recursively to build up a tree representing the dictionary
*/
def fill(node: Node, letters: String) {
val (letter, tail) = (letters.head, letters.tail)
val child = node.children.getOrElseUpdate(letter, new Node())
System.out.println("letters: "+letters);
if (!tail.isEmpty) {
fill(child, tail)
}
}
}
class Spellchecker {
def check(word: String): Boolean = exists(Spellchecker.dictionary, word.trim)
/**
* Called recursively, walking the tree as nodes are found at each layer looking for matches
*/
private def exists(node: Node, letters: String): Boolean = {
val (letter, tail) = (letters.head, letters.tail)
System.out.println("letters "+letters);
node.children.get(letter) match {
case Some(child) if !tail.isEmpty => exists(child, tail)
case None => false
case _ => true // final matching case.. tail is empty (last letter)
}
}
}
// It seems hard to define a self-referencing HashMap without some sort of wrapper
class Node(val children: HashMap[Char, Node] = HashMap.empty[Char, Node])
object Run extends App{
println("run runn...");
val words = Array("once", "upon", "a", "time", "in", "a", "galaxy", "far", "far", "away")
Spellchecker.buildDictionary(words)
val spellchecker = new Spellchecker()
for( w <- words) {
println(w);
}
System.out.println( spellchecker.check("upon") )
System.out.println( spellchecker.check("jaime") )
System.out.println( spellchecker.check("gala") )
}
|
jaimeguzman/learning
|
spellchecker.scala
|
Scala
|
apache-2.0
| 2,708
|
////////////////////////////////////////////////////////////////////////////////
// //
// OpenSolid is a generic library for the representation and manipulation //
// of geometric objects such as points, curves, surfaces, and volumes. //
// //
// Copyright 2007-2015 by Ian Mackenzie //
// ian.e.mackenzie@gmail.com //
// //
// This Source Code Form is subject to the terms of the Mozilla Public //
// License, v. 2.0. If a copy of the MPL was not distributed with this file, //
// you can obtain one at http://mozilla.org/MPL/2.0/. //
// //
////////////////////////////////////////////////////////////////////////////////
package org.opensolid.core
final case class Triangle3d(firstVertex: Point3d, secondVertex: Point3d, thirdVertex: Point3d)
extends Scalable3d[Triangle3d]
with Bounded[Bounds3d]
with GeometricallyComparable[Triangle3d] {
def vertices: (Point3d, Point3d, Point3d) =
(firstVertex, secondVertex, thirdVertex)
def vertex(index: Int): Point3d = index match {
case 0 => firstVertex
case 1 => secondVertex
case 2 => thirdVertex
case _ =>
throw new IndexOutOfBoundsException(s"Index $index is out of bounds for a triangle vertex")
}
def edges: (LineSegment3d, LineSegment3d, LineSegment3d) = {
val oppositeFirst = LineSegment3d(thirdVertex, secondVertex)
val oppositeSecond = LineSegment3d(firstVertex, thirdVertex)
val oppositeThird = LineSegment3d(secondVertex, firstVertex)
(oppositeFirst, oppositeSecond, oppositeThird)
}
def edge(index: Int): LineSegment3d = index match {
case 0 => LineSegment3d(thirdVertex, secondVertex)
case 1 => LineSegment3d(firstVertex, thirdVertex)
case 2 => LineSegment3d(secondVertex, firstVertex)
case _ =>
throw new IndexOutOfBoundsException(s"Index $index is out of bounds for a triangle edge")
}
override def transformedBy(transformation: Transformation3d): Triangle3d =
Triangle3d(
firstVertex.transformedBy(transformation),
secondVertex.transformedBy(transformation),
thirdVertex.transformedBy(transformation)
)
override def scaledAbout(point: Point3d, scale: Double): Triangle3d =
Triangle3d(
firstVertex.scaledAbout(point, scale),
secondVertex.scaledAbout(point, scale),
thirdVertex.scaledAbout(point, scale)
)
override def bounds: Bounds3d =
firstVertex.hull(secondVertex).hull(thirdVertex)
override def isEqualTo(that: Triangle3d, tolerance: Double): Boolean =
this.firstVertex.isEqualTo(that.firstVertex, tolerance) &&
this.secondVertex.isEqualTo(that.secondVertex, tolerance) &&
this.thirdVertex.isEqualTo(that.thirdVertex, tolerance)
def area: Double =
0.5 * firstVertex.vectorTo(secondVertex).cross(firstVertex.vectorTo(thirdVertex)).length
def normalDirection: Direction3d =
Numerics.normalDirection(firstVertex, secondVertex, thirdVertex)
def centroid: Point3d =
firstVertex + (firstVertex.vectorTo(secondVertex) + firstVertex.vectorTo(thirdVertex)) / 3.0
def plane: Plane3d =
Plane3d.fromPointAndNormal(firstVertex, normalDirection)
def projectedOnto(plane: Plane3d): Triangle3d =
Triangle3d(
firstVertex.projectedOnto(plane),
secondVertex.projectedOnto(plane),
thirdVertex.projectedOnto(plane)
)
def projectedInto(plane: Plane3d): Triangle2d =
Triangle2d(
firstVertex.projectedInto(plane),
secondVertex.projectedInto(plane),
thirdVertex.projectedInto(plane)
)
}
|
ianmackenzie/opensolid-core
|
src/main/scala/org/opensolid/core/Triangle3d.scala
|
Scala
|
mpl-2.0
| 3,911
|
package gapt.formats.llk
import gapt.proofs._
import gapt.expr._
import gapt.expr.formula.All
import gapt.expr.formula.And
import gapt.expr.formula.Atom
import gapt.expr.formula.Ex
import gapt.expr.formula.Imp
import gapt.expr.formula.Neg
import gapt.expr.formula.NonLogicalConstant
import gapt.expr.formula.Or
import gapt.expr.formula.constants.EqC
import gapt.expr.formula.constants.LogicalConstant
import gapt.expr.formula.hol.HOLFunction
import gapt.expr.ty.->:
import gapt.expr.ty.Ti
import gapt.expr.ty.To
import gapt.expr.ty.Ty
import gapt.proofs.lk._
import gapt.proofs.lk.rules.AndLeftRule
import gapt.proofs.lk.rules.AndRightRule
import gapt.proofs.lk.rules.ContractionLeftRule
import gapt.proofs.lk.rules.ContractionRightRule
import gapt.proofs.lk.rules.CutRule
import gapt.proofs.lk.rules.ConversionLeftRule
import gapt.proofs.lk.rules.ConversionRightRule
import gapt.proofs.lk.rules.EqualityLeftRule
import gapt.proofs.lk.rules.EqualityRightRule
import gapt.proofs.lk.rules.ExistsLeftRule
import gapt.proofs.lk.rules.ExistsRightRule
import gapt.proofs.lk.rules.ExistsSkLeftRule
import gapt.proofs.lk.rules.ForallLeftRule
import gapt.proofs.lk.rules.ForallRightRule
import gapt.proofs.lk.rules.ForallSkRightRule
import gapt.proofs.lk.rules.ImpLeftRule
import gapt.proofs.lk.rules.ImpRightRule
import gapt.proofs.lk.rules.InitialSequent
import gapt.proofs.lk.rules.NegLeftRule
import gapt.proofs.lk.rules.NegRightRule
import gapt.proofs.lk.rules.OrLeftRule
import gapt.proofs.lk.rules.OrRightRule
import gapt.proofs.lk.rules.WeakeningLeftRule
import gapt.proofs.lk.rules.WeakeningRightRule
object LatexLLKExporter extends LLKExporter( true )
object LLKExporter extends LLKExporter( false )
class LLKExporter( val expandTex: Boolean ) {
val emptyTypeMap = Map[String, Ty]()
private val nLine = sys.props( "line.separator" )
def apply( db: ExtendedProofDatabase, escape_latex: Boolean ) = {
val types0 = db.eproofs.foldLeft( ( emptyTypeMap, emptyTypeMap ) )( ( t, p ) =>
getTypes( p._2, t._1, t._2 ) )
val types1 = db.axioms.foldLeft( types0 )( ( m, fs ) => getTypes( fs, m._1, m._2 ) )
val ( vtypes, ctypes ) = db.eproofs.keySet.foldLeft( types1 )( ( m, x ) => getTypes( x, m._1, m._2 ) )
val sb = new StringBuilder()
sb.append( generateDeclarations( vtypes, ctypes ) )
sb.append( nLine + nLine )
for ( p <- db.eproofs ) {
sb.append( generateProof( p._2, "", escape_latex ) )
sb.append( nLine )
sb.append( "\\\\CONTINUEWITH{" + toLatexString.getFormulaString( p._1, true, escape_latex ) + "}" )
sb.append( nLine )
}
sb.toString()
}
def apply( lkp: LKProof, escape_latex: Boolean ) = {
val ( vtypes, ctypes ) = getTypes( lkp, emptyTypeMap, emptyTypeMap )
val declarations = generateDeclarations( vtypes, ctypes )
val proofs = generateProof( lkp, "", escape_latex )
declarations + nLine + "\\\\CONSTDEC{THEPROOF}{o}" + nLine + nLine + proofs + "\\\\CONTINUEWITH{THEPROOF}"
}
def generateDeclarations( vars: Map[String, Ty], consts: Map[String, Ty] ): String = {
val svars = vars.foldLeft( Map[String, String]() )( ( map, p ) => {
val vname = toLatexString.nameToLatexString( p._1.toString )
if ( map contains vname ) throw new Exception( "Two different kinds of symbol share the same name!" )
map + ( ( vname, getTypeString( p._2 ) ) )
} )
val sconsts = consts.foldLeft( Map[String, String]() )( ( map, p ) => {
val vname = toLatexString.nameToLatexString( p._1.toString )
if ( map contains vname ) throw new Exception( "Two different kinds of symbol share the same name!" )
map + ( ( vname, getTypeString( p._2 ) ) )
} ).filterNot( _._1 == "=" )
/*
val sdefs = defs.foldLeft(Map[String, String]())((map, p) => {
val w = "[a-zA-Z0-9]+"
val re= ("("+w+")\\\\[("+w+"(,"+w+")*"+")\\\\]").r
val vname = toLatexString.nameToLatexString(p._1.toString, false)
if (map contains vname) throw new Exception("Two different kinds of symbol share the same name!")
map + ((vname, getTypeString(p._2)))
})*/
val rvmap = svars.foldLeft( Map[String, List[String]]() )( ( map, p ) => {
val ( name, expt ) = p
if ( map contains expt )
map + ( ( expt, name :: map( expt ) ) )
else
map + ( ( expt, name :: Nil ) )
} )
val rcmap = sconsts.foldLeft( Map[String, List[String]]() )( ( map, p ) => {
val ( name, expt ) = p
if ( map contains expt )
map + ( ( expt, name :: map( expt ) ) )
else
map + ( ( expt, name :: Nil ) )
} )
val sv = rvmap.map( x => "\\\\VARDEC{" + x._2.mkString( ", " ) + "}{" + x._1 + "}" )
val sc = rcmap.map( x => "\\\\CONSTDEC{" + x._2.mkString( ", " ) + "}{" + x._1 + "}" )
sv.mkString( nLine ) + nLine + sc.mkString( nLine )
}
def getTypes( p: LKProof, vacc: Map[String, Ty], cacc: Map[String, Ty] ): ( Map[String, Ty], Map[String, Ty] ) = {
val formulas = for ( subProof <- p.subProofs; formula <- subProof.endSequent.elements ) yield formula
formulas.foldLeft( ( vacc, cacc ) )( ( map, f ) =>
getTypes( f, map._1, map._2 ) )
}
def getTypes( p: HOLSequent, vacc: Map[String, Ty], cacc: Map[String, Ty] ): ( Map[String, Ty], Map[String, Ty] ) = {
p.formulas.foldLeft( ( vacc, cacc ) )( ( m, f ) => getTypes( f, m._1, m._2 ) )
}
def getTypes( exp: Expr, vmap: Map[String, Ty], cmap: Map[String, Ty] ): ( Map[String, Ty], Map[String, Ty] ) =
exp match {
case Var( name, exptype ) =>
if ( vmap.contains( name ) ) {
if ( vmap( name ) != exptype ) throw new Exception(
"Symbol clash for " + name + " " + vmap( name ) + " != " + exptype )
( vmap, cmap )
} else {
( vmap + ( ( name, exptype ) ), cmap )
}
case EqC( _ ) => ( vmap, cmap )
case NonLogicalConstant( name, exptype, _ ) =>
if ( cmap.contains( name ) ) {
if ( cmap( name ) != exptype ) throw new Exception(
"Symbol clash for " + name + " " + cmap( name ) + " != " + exptype )
( vmap, cmap )
} else {
( vmap, cmap + ( ( name, exptype ) ) )
}
case App( s, t ) =>
val ( vm, cm ) = getTypes( t, vmap, cmap )
getTypes( s, vm, cm )
case Abs( x, t ) =>
val ( vm, cm ) = getTypes( t, vmap, cmap )
getTypes( x, vm, cm )
case _: LogicalConstant =>
( vmap, cmap )
}
def getTypeString( t: Ty, outermost: Boolean = true ): String = t match {
case Ti => "i"
case To => "o"
case t1 ->: t2 =>
val s = getTypeString( t1, false ) + ">" + getTypeString( t2, false )
if ( outermost ) s else "(" + s + ")"
}
def fsequentString( fs: HOLSequent, escape_latex: Boolean ): String =
fs.antecedent.map( toLatexString.getFormulaString( _, true, escape_latex ) ).mkString( "{", ",", "}" ) +
fs.succedent.map( toLatexString.getFormulaString( _, true, escape_latex ) ).mkString( "{", ",", "}" )
def generateProof( p: LKProof, s: String, escape_latex: Boolean ): String = p match {
case InitialSequent( root ) =>
"\\\\AX" + fsequentString( p.endSequent, escape_latex ) + nLine + s
// unary rules
case NegLeftRule( p1, _ ) =>
generateProof( p1, "\\\\NEGL" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case NegRightRule( p1, _ ) =>
generateProof( p1, "\\\\NEGR" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case AndLeftRule( p1, _, _ ) =>
generateProof( p1, "\\\\ANDL" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case OrRightRule( p1, _, _ ) =>
generateProof( p1, "\\\\ORR" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case ImpRightRule( p1, _, _ ) =>
generateProof( p1, "\\\\IMPR" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
//binary rules
case AndRightRule( p1, _, p2, _ ) =>
generateProof( p1, generateProof( p2, "\\\\ANDR" + fsequentString( p.endSequent, escape_latex ) + nLine +
s, escape_latex ), escape_latex )
case OrLeftRule( p1, _, p2, _ ) =>
generateProof( p1, generateProof( p2, "\\\\ORL" + fsequentString( p.endSequent, escape_latex ) + nLine +
s, escape_latex ), escape_latex )
case ImpLeftRule( p1, _, p2, _ ) =>
generateProof( p1, generateProof( p2, "\\\\IMPL" + fsequentString( p.endSequent, escape_latex ) + nLine +
s, escape_latex ), escape_latex )
//structural rules
case CutRule( p1, _, p2, _ ) =>
generateProof( p1, generateProof( p2, "\\\\CUT" + fsequentString( p.endSequent, escape_latex ) + nLine +
s, escape_latex ), escape_latex )
case WeakeningLeftRule( p1, _ ) =>
generateProof( p1, "\\\\WEAKL" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case WeakeningRightRule( p1, _ ) =>
generateProof( p1, "\\\\WEAKR" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case ContractionLeftRule( p1, _, _ ) =>
generateProof( p1, "\\\\CONTRL" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case ContractionRightRule( p1, _, _ ) =>
generateProof( p1, "\\\\CONTRR" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
//quantifier rules
case ForallLeftRule( p1, aux, main, term, qv ) =>
generateProof( p1, "\\\\ALLL{" + toLatexString.getFormulaString( term, true, escape_latex ) + "}" +
fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case ForallRightRule( p1, main, eigenvar, qv ) =>
generateProof( p1, "\\\\ALLR{" + toLatexString.getFormulaString( eigenvar, true, escape_latex ) + "}" +
fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case ExistsLeftRule( p1, main, eigenvar, qv ) =>
generateProof( p1, "\\\\EXL{" + toLatexString.getFormulaString( eigenvar, true, escape_latex ) + "}" +
fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case ExistsRightRule( p1, aux, main, term, qv ) =>
generateProof( p1, "\\\\EXR{" + toLatexString.getFormulaString( term, true, escape_latex ) + "}" +
fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
//equality rules
case EqualityLeftRule( p1, _, _, _ ) =>
generateProof( p1, "\\\\UEQL" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case EqualityRightRule( p1, _, _, _ ) =>
generateProof( p1, "\\\\UEQR" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
//definition rules
case ConversionLeftRule( p1, _, _ ) =>
generateProof( p1, "\\\\DEF" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case ConversionRightRule( p1, _, _ ) =>
generateProof( p1, "\\\\DEF" + fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
//TODO: this is only a way to write out the proof, but it cannot be read back in
// (labels are not handled by llk so far)
case ExistsSkLeftRule( p1, aux, main, term ) =>
generateProof( p1, "\\\\EXSKL{" + toLatexString.getFormulaString( term, true, escape_latex ) + "}"
+ fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
/*
case ExistsSkRightRule( p1, aux, main, term ) =>
generateProof( p1, "\\\\EXSKR{" + toLatexString.getFormulaString( term, true, escape_latex ) + "}"
+ fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
case ForallSkLeftRule( p1, aux, main, term ) =>
generateProof( p1, "\\\\ALLSKL{" + toLatexString.getFormulaString( term, true, escape_latex ) + "}"
+ fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
*/
case ForallSkRightRule( p1, aux, main, term ) =>
generateProof( p1, "\\\\ALLSKR{" + toLatexString.getFormulaString( term, true, escape_latex ) + "}"
+ fsequentString( p.endSequent, escape_latex ) + nLine + s, escape_latex )
}
}
/**
* This is a prover9 style formatting which can be parsed by LLK.
*/
object toLLKString {
def apply( e: Expr ) = toLatexString.getFormulaString( e, true, false )
}
/**
* This is a Latex formatting which can be parsed by LLK.
*/
object toLatexString {
def apply( e: Expr ) = getFormulaString( e, true, true )
def getFormulaString( f: Expr, outermost: Boolean = true, escape_latex: Boolean ): String = f match {
case All( x, t ) =>
val op = if ( escape_latex ) "\\\\forall" else "all"
op + " " + getFormulaString( x.asInstanceOf[Var], false, escape_latex ) +
" " + getFormulaString( t, false, escape_latex )
case Ex( x, t ) =>
val op = if ( escape_latex ) "\\\\exists" else "exists"
op + " " + getFormulaString( x.asInstanceOf[Var], false, escape_latex ) +
" " + getFormulaString( t, false, escape_latex )
case Neg( t1 ) =>
val op = if ( escape_latex ) "\\\\neg" else "-"
val str = " " + op + " " + getFormulaString( t1, false, escape_latex )
if ( outermost ) str else "(" + str + ")"
case And( t1, t2 ) =>
val op = if ( escape_latex ) "\\\\land" else "&"
val str = getFormulaString( t1, false, escape_latex ) + " " + op + " " +
getFormulaString( t2, false, escape_latex )
if ( outermost ) str else "(" + str + ")"
case Or( t1, t2 ) =>
val op = if ( escape_latex ) "\\\\lor" else "|"
val str = getFormulaString( t1, false, escape_latex ) + " " + op + " " +
getFormulaString( t2, false, escape_latex )
if ( outermost ) str else "(" + str + ")"
case Imp( t1, t2 ) =>
val op = if ( escape_latex ) "\\\\rightarrow" else "->"
val str = getFormulaString( t1, false, escape_latex ) + " " + op + " " +
getFormulaString( t2, false, escape_latex )
if ( outermost ) str else "(" + str + ")"
case Var( v, _ ) => v.toString
case Const( c, _, _ ) => c.toString
case Atom( f, args ) =>
val sym = f match {
case Const( x, _, _ ) => x
case Var( x, _ ) => x
}
val str: String =
if ( args.length == 2 && sym.toString.matches( """(<|>|\\\\leq|\\\\geq|=|>=|<=)""" ) ) {
val str = getFormulaString( args( 0 ), false, escape_latex ) + " " +
toLatexString.nameToLatexString( sym.toString ) + " " +
getFormulaString( args( 1 ), false, escape_latex )
if ( outermost ) str else "(" + str + ")"
} else
toLatexString.nameToLatexString( sym.toString ) + (
if ( args.isEmpty ) " "
else args.map( getFormulaString( _, false, escape_latex ) ).mkString( "(", ", ", ")" ) )
//if (outermost) str else "(" + str + ")"
str
case HOLFunction( f, args ) =>
val sym = f match {
case Const( x, _, _ ) => x
case Var( x, _ ) => x
}
if ( args.length == 2 && sym.toString.matches( """[+\\-*/]""" ) )
"(" + getFormulaString( args( 0 ), false, escape_latex ) + " " + sym.toString + " " +
getFormulaString( args( 1 ), false, escape_latex ) + ")"
else {
if ( args.isEmpty )
toLatexString.nameToLatexString( sym.toString )
else
toLatexString.nameToLatexString( sym.toString ) + (
if ( args.isEmpty ) " "
else args.map( getFormulaString( _, false, escape_latex ) ).mkString( "(", ", ", ")" ) )
}
// these cases need to be below the quantifiers and function/atom, since the latter are less general than abs/app
case Abs( x, t ) =>
"(\\\\lambda " + getFormulaString( x.asInstanceOf[Var], false, escape_latex ) + " " +
getFormulaString( t, false, escape_latex ) + ")"
case App( s, t ) =>
if ( escape_latex )
"\\\\apply{ " + getFormulaString( s, false, escape_latex ) + " " +
getFormulaString( t, false, escape_latex ) + "}"
else
"(@ " + getFormulaString( s, false, escape_latex ) + " " + getFormulaString( t, false, escape_latex ) + ")"
}
def nameToLatexString( s: String, escapebrack: Boolean = true ): String = {
val s1 = UnicodeToLatex.nameToLatexString( s )
//val s2 = if (escapebrack) s1.replaceAll("\\\\[","(").replaceAll("\\\\]",")") else s1
val s2 = if ( s == "!=" ) "\\\\neq" else s1
val s3 = if ( s2 != "-" ) s2.replaceAll( "-", "" ) else s2
s3
}
}
|
gapt/gapt
|
core/src/main/scala/gapt/formats/llk/LLKExporter.scala
|
Scala
|
gpl-3.0
| 16,451
|
package io.eels.component.orc
import java.util.concurrent.atomic.AtomicBoolean
import com.sksamuel.exts.OptionImplicits._
import com.sksamuel.exts.io.Using
import io.eels._
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema.StructType
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.orc.OrcFile.ReaderOptions
import org.apache.orc._
import scala.collection.JavaConverters._
object OrcSource {
def apply(path: Path)(implicit fs: FileSystem, conf: Configuration): OrcSource = apply(FilePattern(path))
def apply(str: String)(implicit fs: FileSystem, conf: Configuration): OrcSource = apply(FilePattern(str))
}
case class OrcSource(pattern: FilePattern,
projection: Seq[String] = Nil,
predicate: Option[Predicate] = None)
(implicit fs: FileSystem, conf: Configuration) extends Source with Using {
override def parts(): Seq[Publisher[Seq[Row]]] = pattern.toPaths().map(new OrcPublisher(_, projection, predicate))
def withPredicate(predicate: Predicate): OrcSource = copy(predicate = predicate.some)
def withProjection(first: String, rest: String*): OrcSource = withProjection(first +: rest)
def withProjection(fields: Seq[String]): OrcSource = {
require(fields.nonEmpty)
copy(projection = fields.toList)
}
override def schema: StructType = {
val reader = OrcFile.createReader(pattern.toPaths().head, new ReaderOptions(conf))
val schema = reader.getSchema
OrcSchemaFns.fromOrcType(schema).asInstanceOf[StructType]
}
private def reader() = {
val options = new ReaderOptions(conf)
OrcFile.createReader(pattern.toPaths().head, options)
}
def count(): Long = reader().getNumberOfRows
def statistics(): Seq[ColumnStatistics] = reader().getStatistics.toVector
def stripes(): Seq[StripeInformation] = reader().getStripes.asScala
def stripeStatistics(): Seq[StripeStatistics] = reader().getStripeStatistics.asScala
}
class OrcPublisher(path: Path,
projection: Seq[String],
predicate: Option[Predicate])(implicit conf: Configuration) extends Publisher[Seq[Row]] {
override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
try {
val reader = OrcFile.createReader(path, new ReaderOptions(conf))
val fileSchema = OrcSchemaFns.fromOrcType(reader.getSchema).asInstanceOf[StructType]
val iterator: Iterator[Row] = OrcBatchIterator(reader, fileSchema, projection, predicate).flatten
val running = new AtomicBoolean(true)
subscriber.subscribed(Subscription.fromRunning(running))
iterator.grouped(DataStream.DefaultBatchSize).takeWhile(_ => running.get).foreach(subscriber.next)
subscriber.completed()
} catch {
case t: Throwable => subscriber.error(t)
}
}
}
|
sksamuel/eel-sdk
|
eel-orc/src/main/scala/io/eels/component/orc/OrcSource.scala
|
Scala
|
apache-2.0
| 2,888
|
package scray.cassandra.tools
import org.junit.runner.RunWith
import com.typesafe.scalalogging.LazyLogging
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import scray.querying.description.TableIdentifier
@RunWith(classOf[JUnitRunner])
class CassandraIndexStatementGeneratorImplSpecs extends WordSpec with LazyLogging {
"CassandraIndexStatementGenerator " should {
"create index statement for one column " in {
val statementGenerator = new CassandraIndexStatementGeneratorImpl
val configurationString = statementGenerator.getIndexString(TableIdentifier("cassandra", "ks", "cf1"), List("col1"))
assert(configurationString == "CREATE INDEX ON \\"ks\\".\\"cf1\\" (\\"col1\\" );")
}
"create index statement for multiple columns " in {
val statementGenerator = new CassandraIndexStatementGeneratorImpl
val configurationString = statementGenerator.getIndexString(TableIdentifier("cassandra", "ks", "cf1"), List("col1", "col2"))
assert(configurationString == "CREATE INDEX ON \\"ks\\".\\"cf1\\" (\\"col1\\", \\"col2\\" );")
}
}
}
|
scray/scray
|
scray-cassandra/src/test/scala/scray/cassandra/tools/CassandraIndexStatementGeneratorImplSpecs.scala
|
Scala
|
apache-2.0
| 1,151
|
package systems.opalia.commons.identifier
import org.scalatest._
class UniversallyUniqueIdTest
extends FlatSpec
with Matchers {
val list =
(for (i <- 1 to 1000) yield UniversallyUniqueId.getNew).toList
it should "be uniqueness" in {
list.distinct.size should be(list.size)
}
it should "have correct variant numbers" in {
val name = "this is a test name"
list.foreach {
randomId =>
(randomId(6) >> 4) shouldBe 4
}
val nameId =
UniversallyUniqueId.getFromName(name)
(nameId(6) >> 4) shouldBe 3
}
it should "generate same IDs for same names" in {
val name = "this is a test name"
UniversallyUniqueId.getFromName(name) shouldBe UniversallyUniqueId.getFromName(name)
}
it should "be able to validate strings" in {
val stringA = "123e4567-e89b-12d3-a456-426655440000"
val stringB = "123e4567-9b-4212d3-a456-426655440000"
val stringC = "123e4567-gb42-12d3-a456-426655440000"
UniversallyUniqueId.isValid(stringA) shouldBe true
UniversallyUniqueId.isValid(stringB) shouldBe false
UniversallyUniqueId.isValid(stringC) shouldBe false
}
it should "be able to generate from strings" in {
val stringA = "123e4567-e89b-12d3-a456-426655440000"
val stringB = "123e4567-9b-4212d3-a456-426655440000"
val stringC = "123e4567-gb42-12d3-a456-426655440000"
UniversallyUniqueId.getFromOpt(stringA).map(_.toString) shouldBe Some(stringA)
UniversallyUniqueId.getFromOpt(stringB) shouldBe None
UniversallyUniqueId.getFromOpt(stringC) shouldBe None
}
}
|
OpaliaSystems/commons
|
src/test/scala/systems/opalia/commons/identifier/UniversallyUniqueIdTest.scala
|
Scala
|
apache-2.0
| 1,578
|
package gapt.expr.formula
import gapt.expr.formula.constants.ForallC
object All extends QuantifierHelper( ForallC )
|
gapt/gapt
|
core/src/main/scala/gapt/expr/formula/All.scala
|
Scala
|
gpl-3.0
| 118
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.spark.impl
import org.apache.ignite.cache.query.Query
import org.apache.ignite.configuration.CacheConfiguration
import org.apache.ignite.spark.IgniteContext
import org.apache.spark.{Partition, TaskContext}
import scala.reflect.ClassTag
class IgniteSqlRDD[R: ClassTag, T, K, V](
ic: IgniteContext,
cacheName: String,
cacheCfg: CacheConfiguration[K, V],
var qry: Query[T],
conv: (T) ⇒ R,
keepBinary: Boolean,
partitions: Array[Partition] = Array(IgnitePartition(0))
) extends IgniteAbstractRDD[R, K, V](ic, cacheName, cacheCfg, keepBinary) {
override def compute(split: Partition, context: TaskContext): Iterator[R] = {
val cur = ensureCache().query(qry)
TaskContext.get().addTaskCompletionListener[Unit]((_) ⇒ cur.close())
new IgniteQueryIterator[T, R](cur.iterator(), conv)
}
override protected def getPartitions: Array[Partition] = partitions
}
object IgniteSqlRDD {
def apply[R: ClassTag, T, K, V](ic: IgniteContext, cacheName: String, cacheCfg: CacheConfiguration[K, V],
qry: Query[T], conv: (T) ⇒ R, keepBinary: Boolean,
partitions: Array[Partition] = Array(IgnitePartition(0))): IgniteSqlRDD[R, T, K, V] =
new IgniteSqlRDD(ic, cacheName, cacheCfg, qry, conv, keepBinary, partitions)
}
|
apache/ignite
|
modules/spark-2.4/src/main/scala/org/apache/ignite/spark/impl/IgniteSqlRDD.scala
|
Scala
|
apache-2.0
| 2,125
|
package scorex.wallet
import java.io.File
import com.google.common.primitives.{Bytes, Ints}
import org.h2.mvstore.{MVMap, MVStore}
import scorex.account.PrivateKeyAccount
import scorex.crypto.encode.Base58
import scorex.crypto.hash.SecureCryptographicHash
import scorex.utils.ScorexLogging
import scala.collection.JavaConversions._
import scala.collection.concurrent.TrieMap
import scorex.utils.randomBytes
//todo: add accs txs?
class Wallet(walletFileOpt: Option[File], password: String, seedOpt: Option[Array[Byte]]) extends ScorexLogging {
private val NonceFieldName = "nonce"
private val database: MVStore = walletFileOpt match {
case Some(walletFile) =>
//create parent folders then check their existence
walletFile.getParentFile.mkdirs().ensuring(walletFile.getParentFile.exists())
new MVStore.Builder().fileName(walletFile.getAbsolutePath).encryptionKey(password.toCharArray).compress().open()
case None => new MVStore.Builder().open()
}
private val accountsPersistence: MVMap[Int, Array[Byte]] = database.openMap("privkeys")
private val seedPersistence: MVMap[String, Array[Byte]] = database.openMap("seed")
private val noncePersistence: MVMap[String, Int] = database.openMap("nonce")
if (Option(seedPersistence.get("seed")).isEmpty) {
val seed = seedOpt.getOrElse {
val Attempts = 10
val SeedSize = 64
lazy val randomSeed = randomBytes(SeedSize)
lazy val encodedSeed = Base58.encode(randomSeed)
def readSeed(limit: Int = Attempts): Array[Byte] = {
println("Please type your wallet seed or type Enter to generate random one")
val typed = scala.io.StdIn.readLine()
if (typed == "") {
println(s"You random generated seed is $encodedSeed")
randomSeed
} else
Base58.decode(typed).getOrElse {
if (limit > 0) {
println("Wallet seed should be correct Base58 encoded string.")
readSeed(limit - 1)
} else throw new Error("Sorry you have made too many incorrect seed guesses")
}
}
readSeed()
}
seedPersistence.put("seed", seed)
}
val seed: Array[Byte] = seedPersistence.get("seed")
private val accountsCache: TrieMap[String, PrivateKeyAccount] = {
val accounts = accountsPersistence.keys.map(k => accountsPersistence.get(k)).map(seed => new PrivateKeyAccount(seed))
TrieMap(accounts.map(acc => acc.address -> acc).toSeq: _*)
}
def privateKeyAccounts(): Seq[PrivateKeyAccount] = accountsCache.values.toSeq
def generateNewAccounts(howMany: Int): Seq[PrivateKeyAccount] =
(1 to howMany).flatMap(_ => generateNewAccount())
def generateNewAccount(): Option[PrivateKeyAccount] = synchronized {
val nonce = getAndIncrementNonce()
val account = Wallet.generateNewAccount(seed, nonce)
val address = account.address
val created = if (!accountsCache.containsKey(address)) {
accountsCache += account.address -> account
accountsPersistence.put(accountsPersistence.lastKey() + 1, account.seed)
database.commit()
true
} else false
if (created) {
log.info("Added account #" + privateKeyAccounts().size)
Some(account)
} else None
}
def deleteAccount(account: PrivateKeyAccount): Boolean = synchronized {
val res = accountsPersistence.keys.find { k =>
if (accountsPersistence.get(k) sameElements account.seed) {
accountsPersistence.remove(k)
true
} else false
}
database.commit()
accountsCache -= account.address
res.isDefined
}
def exportAccountSeed(address: String): Option[Array[Byte]] = privateKeyAccount(address).map(_.seed)
def privateKeyAccount(address: String): Option[PrivateKeyAccount] = accountsCache.get(address)
def close(): Unit = if (!database.isClosed) {
database.commit()
database.close()
accountsCache.clear()
}
def exists(): Boolean = walletFileOpt.map(_.exists()).getOrElse(true)
def nonce(): Int = Option(noncePersistence.get(NonceFieldName)).getOrElse(0)
private def getAndIncrementNonce(): Int = synchronized {
noncePersistence.put(NonceFieldName, nonce() + 1)
}
}
object Wallet {
def generateNewAccount(seed: Array[Byte], nonce: Int): PrivateKeyAccount = {
val accountSeed = generateAccountSeed(seed, nonce)
new PrivateKeyAccount(accountSeed)
}
def generateAccountSeed(seed: Array[Byte], nonce: Int): Array[Byte] =
SecureCryptographicHash(Bytes.concat(Ints.toByteArray(nonce), seed))
}
|
alexeykiselev/WavesScorex
|
scorex-basics/src/main/scala/scorex/wallet/Wallet.scala
|
Scala
|
cc0-1.0
| 4,529
|
package shredzzz.kirkwood.junit.tests.vector.booleans
import org.junit.Test
import shredzzz.kirkwood.cumath.CuValue
import shredzzz.kirkwood.cumath.tensor.CuVector
import shredzzz.kirkwood.junit._
class CuVectorBinaryOpsTest extends BooleanCuVectorTester
{
@Test def testVal_unary_:!@() {
withCuContext(
implicit ctx => {
testMapFunc(
xArr = Array(false, true, true),
expected = Array(true, false, false),
(x: CuVector[Boolean], res: CuVector[Boolean]) => {
res =: x.unary_:!@()
}
)
}
)
}
@Test def testVal_:&&@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(false, true, true),
cVal = true,
expected = Array(false, true, true),
(x: CuVector[Boolean], c: CuValue[Boolean], res: CuVector[Boolean]) => {
res =: x :&&@ c
}
)
}
)
}
@Test def testVal_:||@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(true, false, true),
cVal = true,
expected = Array(true, true, true),
(x: CuVector[Boolean], c: CuValue[Boolean], res: CuVector[Boolean]) => {
res =: x :||@ c
}
)
}
)
}
@Test def testVal_:^^@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(true, false, true),
cVal = true,
expected = Array(false, true, false),
(x: CuVector[Boolean], c: CuValue[Boolean], res: CuVector[Boolean]) => {
res =: x :^^@ c
}
)
}
)
}
@Test def testVec_:&&@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(true, true, false),
yArr = Array(false, true, false),
expected = Array(false, true, false),
(x: CuVector[Boolean], y: CuVector[Boolean], res: CuVector[Boolean]) => {
res =: x :&&@ y
}
)
}
)
}
@Test def testVec_:||@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(true, true, false),
yArr = Array(false, true, false),
expected = Array(true, true, true),
(x: CuVector[Boolean], y: CuVector[Boolean], res: CuVector[Boolean]) => {
res =: x :||@ y
}
)
}
)
}
@Test def testVec_:^^@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(true, true, false),
yArr = Array(false, true, false),
expected = Array(true, false, false),
(x: CuVector[Boolean], y: CuVector[Boolean], res: CuVector[Boolean]) => {
res =: x :^^@ y
}
)
}
)
}
@Test def testVal_unary_!@() {
withCuContext(
implicit ctx => {
testMapFunc(
xArr = Array(false, true, true),
expected = Array(true, false, false),
(x: CuVector[Boolean], res: CuVector[Boolean]) => {
res =: x.unary_!@()
}
)
}
)
}
@Test def testVal_&&@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(true, false, true),
cVal = false,
expected = Array(false, false, false),
(x: CuVector[Boolean], c: CuValue[Boolean], res: CuVector[Boolean]) => {
res =: (x &&@ c)
}
)
}
)
}
@Test def testVal_||@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(false, true, true),
cVal = false,
expected = Array(false, true, true),
(x: CuVector[Boolean], c: CuValue[Boolean], res: CuVector[Boolean]) => {
res =: (x ||@ c)
}
)
}
)
}
@Test def testVal_^^@() {
withCuContext(
implicit ctx => {
testVal(
xArr = Array(false, true, true),
cVal = false,
expected = Array(false, true, true),
(x: CuVector[Boolean], c: CuValue[Boolean], res: CuVector[Boolean]) => {
res =: (x ^^@ c)
}
)
}
)
}
@Test def testVec_&&@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(false, true, true),
yArr = Array(true, true, true),
expected = Array(false, true, true),
(x: CuVector[Boolean], y: CuVector[Boolean], res: CuVector[Boolean]) => {
res =: (x &&@ y)
}
)
}
)
}
@Test def testVec_||@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(false, false, true),
yArr = Array(false, true, true),
expected = Array(false, true, true),
(x: CuVector[Boolean], y: CuVector[Boolean], res: CuVector[Boolean]) => {
res =: (x ||@ y)
}
)
}
)
}
@Test def testVec_^^@() {
withCuContext(
implicit ctx => {
testVec(
xArr = Array(false, false, true),
yArr = Array(false, true, true),
expected = Array(false, true, false),
(x: CuVector[Boolean], y: CuVector[Boolean], res: CuVector[Boolean]) => {
res =: (x ^^@ y)
}
)
}
)
}
}
|
shredzzz/kirkwood
|
src/test/scala/shredzzz/kirkwood/junit/tests/vector/booleans/CuVectorBinaryOpsTest.scala
|
Scala
|
apache-2.0
| 5,293
|
// Copyright (C) 2017 Laurent Sarrazin & other authors
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.edma.hbase
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.HBaseConfiguration
class HConfiguration {
private def optCoreSite = "org.edma.hbase.core-site"
private def optHBaseSite = "org.edma.hbase.hbase-site"
private def optPrincipal = "org.edma.hbase.principal"
private def optKeytab = "org.edma.hbase.keytab"
/** Configuration for HBase connection & wrapper */
private val hbconf: Configuration = HBaseConfiguration.create
/** Default configuration */
private val confDefaults: Map[String, (String, String)] = Map(
"hs" -> (defaultHBaseSite, optHBaseSite),
"cs" -> (defaultCoreSite, optCoreSite))
def getPrincipal: String = hbconf.get(optPrincipal, "")
def getKeytab: String = hbconf.get(optKeytab, "")
def getConf: Configuration = {
confDefaults.foreach {
case (opt, (value, flag)) =>
opt match {
case "cs" => if (hbconf.get(flag, "") == "") {
hbconf.addResource(new Path(value))
hbconf.set(flag, value)
}
case "hs" => if (hbconf.get(flag, "") == "") {
hbconf.addResource(new Path(value))
hbconf.set(flag, value)
}
}
}
hbconf
}
def configure(opt: String, value: String): HConfiguration = {
opt match {
case "cs" => {
hbconf.addResource(new Path(value))
hbconf.set(optCoreSite, value)
}
case "hs" => {
hbconf.addResource(new Path(value))
hbconf.set(optHBaseSite, value)
}
case "zq" => hbconf.set("hbase.zookeeper.quorum", value)
case "zp" => hbconf.set("hbase.zookeeper.property.clientPort", value)
case "kc" => {
hbconf.set("hadoop.security.authentication", "kerberos")
hbconf.set("hbase.security.authentication", "kerberos")
}
case "p" => hbconf.set(optPrincipal, value)
case "kt" => hbconf.set(optKeytab, value)
case _ => // Do nothing
}
this
}
def dump: Unit = {
def coreSite = hbconf.get(optCoreSite)
def hbaseSite = hbconf.get(optHBaseSite)
def principal = hbconf.get(optPrincipal)
def keytab = hbconf.get(optKeytab)
echo(f"""Configured using
| - core-site.xml $coreSite
| - hbase-site.xml $hbaseSite
| - keytab $keytab
| - principal $principal""".stripMargin)
}
def debug: Unit = {
import scala.collection.JavaConverters._
hbconf.asScala.foreach(dumpKV)
def dumpKV(e: java.util.Map.Entry[String, String]) = {
echo(f"${e.getKey} = ${e.getValue}")
}
}
}
|
lsarrazin/HTools
|
src/main/scala/org/edma/hbase/HConfiguration.scala
|
Scala
|
apache-2.0
| 3,367
|
package org.mitre.jcarafe.dparser
import org.scalatest.Spec
import org.mitre.jcarafe.crf.AbstractInstance
class TestMstInference extends Spec {
def processInputGraph(sm: Array[Array[Double]]) = {
val mstCrf = new org.mitre.jcarafe.dparser.ProjectiveMstCrf(10,1.0) with org.mitre.jcarafe.crf.CondLogLikelihoodLearner[AbstractInstance]
val betas = mstCrf.getInsideProbabilities(sm)
val alphas = mstCrf.getOutsideProbabilities(sm,betas)
val ln = sm.length
for (i <- 1 until ln) {
var sc = 0.0
for (j <- 0 until ln) {
if (j != i) {
if (j < i) {
sc += math.exp(alphas(j)(i)(0) + betas(j)(i)(0))
} else {
sc += math.exp(alphas(i)(j)(1) + betas(i)(j)(1))
}
}
}
assert(((sc / math.exp(betas(0)(ln-1)(2))) - 1.0).abs < 1E-3) // assert these are all close to 1.0
}
}
describe("Mst Marginal Inference") {
it("marginal probabilities for each incomming edge to any word should sum to 1.0") {
val sm = Array(Array(0.0,0.0,0.0,0.0), Array(2.0,0.0,30.0,0.0), Array(10.0,20.0,0.0,5.0), Array(0.0,3.0,30.0,0.0))
val sm1 = Array(Array(0.0,0.0,0.0,0.0), Array(0.0,0.0,1.0,0.0), Array(1.0,1.0,0.0,0.0), Array(0.0,1.0,1.0,0.0))
val sm2 = Array(Array(0.0,0.0,0.0,0.0), Array(2.0,0.0,30.0,6.0), Array(10.0,20.0,0.0,5.0), Array(4.0,3.0,30.0,0.0))
val sm4 = Array(Array(0.0,0.0,0.0,0.0,0.0), Array(2.0,0.0,3.0,6.0,2.0), Array(3.0,2.0,0.0,0.5,1.2), Array(4.0,3.0,3.0,0.0,2.1), Array(0.8,1.0,1.2,1.0,0.0))
val sm5 = Array(Array(0.0,0.0,0.0,0.0,0.0), Array(2.0,0.0,2.0,2.0,2.0), Array(2.0,2.0,0.0,2.0,2.0), Array(2.0,2.0,2.0,0.0,2.0), Array(2.0,2.0,2.0,2.0,0.0))
val sm6 = Array(Array(0.0,0.0,0.0,0.0,0.0,0.0), Array(1.0,0.0,1.0,1.0,1.0,1.0), Array(1.0,1.0,0.0,1.0,1.0,1.0), Array(1.0,1.0,1.0,0.0,1.0,1.0), Array(1.0,1.0,1.0,1.0,0.0,1.0),
Array(1.0,1.0,1.0,1.0,1.0,1.0,0.0))
val sm3 =
Array(Array(0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0),
Array(2.0,0.0,30.0,0.0,2.0,4.0,10.0,2.0),
Array(10.0,20.0,0.0,5.0,3.0,2.0,5.0,2.0),
Array(0.0,3.0,30.0,0.0,9.0,12.0,0.0,2.0),
Array(3.0,5.0,1.0,4.0,0.0,2.0,1.0,5.0),
Array(4.0,1.0,3.4,2.4,4.9,0.0,1.2,4.2),
Array(7.2,2.0,3.2,3.3,4.2,4.9,0.0,2.1),
Array(2.0,3.1,2.1,3.5,2.1,5.6,0.0,0.0))
//processInputGraph(sm)
//processInputGraph(sm1)
//processInputGraph(sm2)
//processInputGraph(sm4)
//processInputGraph(sm5)
//processInputGraph(sm6)
processInputGraph(sm6)
processInputGraph(sm3)
}
}
/*
describe("Projective Training") {
it("Training should finish, correctly minimizing conditional negative log-likelihood") {
val tagger = new ProjectiveDependencyParser(Array("--input-file","/home/wellner/Projects/DepParsing/parse1.conll","--model","/home/wellner/Projects/DepParsing/model1",
"--non-factored-fspec","/home/wellner/Projects/DepParsing/parserSpec0.fspec","--train", "--max-iters", "60", "--num-states","100","--gaussian-prior","1E300"))
tagger.process()
}
}
describe("Projective Training with PSA") {
it("Training should finish ....") {
val tagger = new ProjectiveDependencyParser(Array("--input-file","/home/wellner/Projects/DepParsing/parse1.conll","--model","/home/wellner/Projects/DepParsing/model1",
"--non-factored-fspec","/home/wellner/Projects/DepParsing/parserSpec0.fspec","--train", "--max-iters", "10", "--num-states","100","--gaussian-prior","1E300", "--psa"))
tagger.process()
}
}
*/
}
|
wellner/jcarafe
|
jcarafe-ext/src/test/scala/org/mitre/jcarafe/dparser/TestMstInference.scala
|
Scala
|
bsd-3-clause
| 3,658
|
import org.scalameter.api._
import org.buttercoin.jersey._
import org.buttercoin.common._
import org.buttercoin.common.models.currency._
object CurrencyBenchmark
extends PerformanceTest.Quickbenchmark {
val sizes: Gen[Int] = Gen.range("size")(10000, 90000, 40000)
val ranges: Gen[Range] = for {
size <- sizes
} yield 1 until size
val acctId = java.util.UUID.randomUUID
performance of "Currency" in {
performance of "creation" in {
measure method "USD" in {
using(ranges) in { r =>
for(i <- r) {
usd(i)
}
}
}
measure method "vs-bigdecimal" in {
using(ranges) in { r =>
for(i <- r) {
BigDecimal(i).setScale(5, BigDecimal.RoundingMode.HALF_EVEN)
}
}
}
}
performance of "addition" in {
measure method "USD" in {
using(ranges) in { r =>
for(i <- r) {
usd(i) + usd(i)
}
}
}
measure method "vs-bigdecimal" in {
using(ranges) in { r =>
for(i <- r) {
val a = BigDecimal(i).setScale(5, BigDecimal.RoundingMode.HALF_EVEN)
val b = BigDecimal(i).setScale(5, BigDecimal.RoundingMode.HALF_EVEN)
val x = a + b
x.setScale(5, BigDecimal.RoundingMode.HALF_EVEN)
}
}
}
}
performance of "division" in {
measure method "USD" in {
using(ranges) in { r =>
for(i <- r) {
usd(i) / 5
}
}
}
measure method "vs-bigdecimal" in {
using(ranges) in { r =>
for(i <- r) {
val a = BigDecimal(i).setScale(5, BigDecimal.RoundingMode.HALF_EVEN)
val x = a / 5
x.setScale(5, BigDecimal.RoundingMode.HALF_EVEN)
}
}
}
}
}
}
|
buttercoin/engine
|
perf-testing/src/test/scala/CurrencyBenchmark.scala
|
Scala
|
mit
| 1,851
|
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.params.exceptions
case class ParamValueNotProvidedException(name: String)
extends ValidationException(s"No value for parameter '$name'")
|
deepsense-io/seahorse-workflow-executor
|
deeplang/src/main/scala/io/deepsense/deeplang/params/exceptions/ParamValueNotProvidedException.scala
|
Scala
|
apache-2.0
| 769
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.containerpool.test
import java.io.{ByteArrayOutputStream, PrintStream}
import java.time.Instant
import java.util.concurrent.TimeUnit
import scala.collection.mutable
import scala.concurrent.duration._
import org.junit.runner.RunWith
import org.scalamock.scalatest.MockFactory
import org.scalatest.BeforeAndAfterAll
import org.scalatest.FlatSpec
import org.scalatest.FlatSpecLike
import org.scalatest.Matchers
import org.scalatest.junit.JUnitRunner
import akka.actor.ActorRefFactory
import akka.actor.ActorSystem
import akka.testkit.ImplicitSender
import akka.testkit.TestKit
import akka.testkit.TestProbe
import common.{StreamLogging, WhiskProperties}
import org.apache.openwhisk.common.{Logging, PrintStreamLogging, TransactionId}
import org.apache.openwhisk.core.connector.ActivationMessage
import org.apache.openwhisk.core.containerpool._
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.entity.ExecManifest.{ImageName, ReactivePrewarmingConfig, RuntimeManifest}
import org.apache.openwhisk.core.entity.size._
import org.apache.openwhisk.core.connector.MessageFeed
import org.scalatest.concurrent.Eventually
/**
* Behavior tests for the ContainerPool
*
* These tests test the runtime behavior of a ContainerPool actor.
*/
@RunWith(classOf[JUnitRunner])
class ContainerPoolTests
extends TestKit(ActorSystem("ContainerPool"))
with ImplicitSender
with FlatSpecLike
with Matchers
with BeforeAndAfterAll
with MockFactory
with Eventually
with StreamLogging {
override def afterAll = TestKit.shutdownActorSystem(system)
val timeout = 5.seconds
// Common entities to pass to the tests. We don't really care what's inside
// those for the behavior testing here, as none of the contents will really
// reach a container anyway. We merely assert that passing and extraction of
// the values is done properly.
val exec = CodeExecAsString(RuntimeManifest("actionKind", ImageName("testImage")), "testCode", None)
val memoryLimit = 256.MB
val ttl = FiniteDuration(500, TimeUnit.MILLISECONDS)
val threshold = 1
val increment = 1
/** Creates a `Run` message */
def createRunMessage(action: ExecutableWhiskAction, invocationNamespace: EntityName) = {
val uuid = UUID()
val message = ActivationMessage(
TransactionId.testing,
action.fullyQualifiedName(true),
action.rev,
Identity(Subject(), Namespace(invocationNamespace, uuid), BasicAuthenticationAuthKey(uuid, Secret())),
ActivationId.generate(),
ControllerInstanceId("0"),
blocking = false,
content = None,
initArgs = Set.empty,
lockedArgs = Map.empty)
Run(action, message)
}
val invocationNamespace = EntityName("invocationSpace")
val differentInvocationNamespace = EntityName("invocationSpace2")
val action = ExecutableWhiskAction(EntityPath("actionSpace"), EntityName("actionName"), exec)
val concurrencyEnabled = Option(WhiskProperties.getProperty("whisk.action.concurrency")).exists(_.toBoolean)
val concurrentAction = ExecutableWhiskAction(
EntityPath("actionSpace"),
EntityName("actionName"),
exec,
limits = ActionLimits(concurrency = ConcurrencyLimit(if (concurrencyEnabled) 3 else 1)))
val differentAction = action.copy(name = EntityName("actionName2"))
val largeAction =
action.copy(
name = EntityName("largeAction"),
limits = ActionLimits(memory = MemoryLimit(MemoryLimit.STD_MEMORY * 2)))
val runMessage = createRunMessage(action, invocationNamespace)
val runMessageLarge = createRunMessage(largeAction, invocationNamespace)
val runMessageDifferentAction = createRunMessage(differentAction, invocationNamespace)
val runMessageDifferentVersion = createRunMessage(action.copy().revision(DocRevision("v2")), invocationNamespace)
val runMessageDifferentNamespace = createRunMessage(action, differentInvocationNamespace)
val runMessageDifferentEverything = createRunMessage(differentAction, differentInvocationNamespace)
val runMessageConcurrent = createRunMessage(concurrentAction, invocationNamespace)
val runMessageConcurrentDifferentNamespace = createRunMessage(concurrentAction, differentInvocationNamespace)
/** Helper to create PreWarmedData */
def preWarmedData(kind: String, memoryLimit: ByteSize = memoryLimit, expires: Option[Deadline] = None) =
PreWarmedData(stub[MockableContainer], kind, memoryLimit, expires = expires)
/** Helper to create WarmedData */
def warmedData(run: Run, lastUsed: Instant = Instant.now) = {
WarmedData(stub[MockableContainer], run.msg.user.namespace.name, run.action, lastUsed)
}
/** Creates a sequence of containers and a factory returning this sequence. */
def testContainers(n: Int) = {
val containers = (0 to n).map(_ => TestProbe())
val queue = mutable.Queue(containers: _*)
val factory = (fac: ActorRefFactory) => queue.dequeue().ref
(containers, factory)
}
def poolConfig(userMemory: ByteSize) =
ContainerPoolConfig(userMemory, 0.5, false, 2.second, 1.minute, None, 100, 3, false, 1.second)
behavior of "ContainerPool"
/*
* CONTAINER SCHEDULING
*
* These tests only test the simplest approaches. Look below for full coverage tests
* of the respective scheduling methods.
*/
it should "reuse a warm container" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// Actions are created with default memory limit (MemoryLimit.stdMemory). This means 4 actions can be scheduled.
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData(runMessage)))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(1).expectNoMessage(100.milliseconds)
}
it should "reuse a warm container when action is the same even if revision changes" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// Actions are created with default memory limit (MemoryLimit.stdMemory). This means 4 actions can be scheduled.
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData(runMessage)))
pool ! runMessageDifferentVersion
containers(0).expectMsg(runMessageDifferentVersion)
containers(1).expectNoMessage(100.milliseconds)
}
it should "create a container if it cannot find a matching container" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// Actions are created with default memory limit (MemoryLimit.stdMemory). This means 4 actions can be scheduled.
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
// Note that the container doesn't respond, thus it's not free to take work
pool ! runMessage
containers(1).expectMsg(runMessage)
}
it should "remove a container to make space in the pool if it is already full and a different action arrives" in within(
timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// a pool with only 1 slot
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY), feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData(runMessage)))
feed.expectMsg(MessageFeed.Processed)
pool ! runMessageDifferentEverything
containers(0).expectMsg(Remove)
containers(1).expectMsg(runMessageDifferentEverything)
}
it should "remove several containers to make space in the pool if it is already full and a different large action arrives" in within(
timeout) {
val (containers, factory) = testContainers(3)
val feed = TestProbe()
// a pool with slots for 2 actions with default memory limit.
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(512.MB), feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
pool ! runMessageDifferentAction // 2 * stdMemory taken -> full
containers(1).expectMsg(runMessageDifferentAction)
containers(0).send(pool, NeedWork(warmedData(runMessage))) // first action finished -> 1 * stdMemory taken
feed.expectMsg(MessageFeed.Processed)
containers(1)
.send(pool, NeedWork(warmedData(runMessageDifferentAction))) // second action finished -> 1 * stdMemory taken
feed.expectMsg(MessageFeed.Processed)
pool ! runMessageLarge // need to remove both action to make space for the large action (needs 2 * stdMemory)
containers(0).expectMsg(Remove)
containers(1).expectMsg(Remove)
containers(2).expectMsg(runMessageLarge)
}
it should "cache a container if there is still space in the pool" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// a pool with only 1 active slot but 2 slots in total
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 2), feed.ref))
// Run the first container
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData(runMessage, lastUsed = Instant.EPOCH)))
feed.expectMsg(MessageFeed.Processed)
// Run the second container, don't remove the first one
pool ! runMessageDifferentEverything
containers(1).expectMsg(runMessageDifferentEverything)
containers(1).send(pool, NeedWork(warmedData(runMessageDifferentEverything, lastUsed = Instant.now)))
feed.expectMsg(MessageFeed.Processed)
pool ! runMessageDifferentNamespace
containers(2).expectMsg(runMessageDifferentNamespace)
// 2 Slots exhausted, remove the first container to make space
containers(0).expectMsg(Remove)
}
it should "remove a container to make space in the pool if it is already full and another action with different invocation namespace arrives" in within(
timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// a pool with only 1 slot
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY), feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData(runMessage)))
feed.expectMsg(MessageFeed.Processed)
pool ! runMessageDifferentNamespace
containers(0).expectMsg(Remove)
containers(1).expectMsg(runMessageDifferentNamespace)
}
it should "reschedule job when container is removed prematurely without sending message to feed" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// a pool with only 1 slot
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY), feed.ref))
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, RescheduleJob) // emulate container failure ...
containers(0).send(pool, runMessage) // ... causing job to be rescheduled
feed.expectNoMessage(100.millis)
containers(1).expectMsg(runMessage) // job resent to new actor
}
it should "not start a new container if there is not enough space in the pool" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 2), feed.ref))
// Start first action
pool ! runMessage // 1 * stdMemory taken
containers(0).expectMsg(runMessage)
// Send second action to the pool
pool ! runMessageLarge // message is too large to be processed immediately.
containers(1).expectNoMessage(100.milliseconds)
// First action is finished
containers(0).send(pool, NeedWork(warmedData(runMessage))) // pool is empty again.
feed.expectMsg(MessageFeed.Processed)
// Second action should run now
containers(1).expectMsgPF() {
// The `Some` assures, that it has been retried while the first action was still blocking the invoker.
case Run(runMessageLarge.action, runMessageLarge.msg, Some(_)) => true
}
containers(1).send(pool, NeedWork(warmedData(runMessageLarge)))
feed.expectMsg(MessageFeed.Processed)
}
it should "not create prewarm container when used memory reaches the limit" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool =
system.actorOf(ContainerPool
.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 1), feed.ref, List(PrewarmingConfig(2, exec, memoryLimit))))
containers(0).expectMsg(Start(exec, memoryLimit))
containers(0).send(pool, NeedWork(preWarmedData(exec.kind)))
containers(1).expectNoMessage(100.milliseconds)
}
/*
* CONTAINER PREWARMING
*/
it should "create prewarmed containers on startup" in within(timeout) {
val (containers, factory) = testContainers(1)
val feed = TestProbe()
val pool =
system.actorOf(
ContainerPool
.props(factory, poolConfig(MemoryLimit.STD_MEMORY), feed.ref, List(PrewarmingConfig(1, exec, memoryLimit))))
containers(0).expectMsg(Start(exec, memoryLimit))
}
it should "use a prewarmed container and create a new one to fill its place" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool =
system.actorOf(ContainerPool
.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 2), feed.ref, List(PrewarmingConfig(1, exec, memoryLimit))))
containers(0).expectMsg(Start(exec, memoryLimit))
containers(0).send(pool, NeedWork(preWarmedData(exec.kind)))
pool ! runMessage
containers(1).expectMsg(Start(exec, memoryLimit))
}
it should "use a prewarmed container with ttl and create a new one to fill its place" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val ttl = 5.seconds //make sure replaced prewarm has ttl
val pool =
system.actorOf(
ContainerPool
.props(
factory,
poolConfig(MemoryLimit.STD_MEMORY * 2),
feed.ref,
List(PrewarmingConfig(1, exec, memoryLimit, Some(ReactivePrewarmingConfig(1, 1, ttl, 1, 1))))))
containers(0).expectMsg(Start(exec, memoryLimit, Some(ttl)))
containers(0).send(pool, NeedWork(preWarmedData(exec.kind, expires = Some(ttl.fromNow))))
pool ! runMessage
containers(1).expectMsg(Start(exec, memoryLimit, Some(ttl)))
}
it should "not use a prewarmed container if it doesn't fit the kind" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val alternativeExec = CodeExecAsString(RuntimeManifest("anotherKind", ImageName("testImage")), "testCode", None)
val pool = system.actorOf(
ContainerPool
.props(
factory,
poolConfig(MemoryLimit.STD_MEMORY * 2),
feed.ref,
List(PrewarmingConfig(1, alternativeExec, memoryLimit))))
containers(0).expectMsg(Start(alternativeExec, memoryLimit)) // container0 was prewarmed
containers(0).send(pool, NeedWork(preWarmedData(alternativeExec.kind)))
pool ! runMessage
containers(1).expectMsg(runMessage) // but container1 is used
}
it should "not use a prewarmed container if it doesn't fit memory wise" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val alternativeLimit = 128.MB
val pool =
system.actorOf(
ContainerPool
.props(
factory,
poolConfig(MemoryLimit.STD_MEMORY * 2),
feed.ref,
List(PrewarmingConfig(1, exec, alternativeLimit))))
containers(0).expectMsg(Start(exec, alternativeLimit)) // container0 was prewarmed
containers(0).send(pool, NeedWork(preWarmedData(exec.kind, alternativeLimit)))
pool ! runMessage
containers(1).expectMsg(runMessage) // but container1 is used
}
/*
* CONTAINER DELETION
*/
it should "not reuse a container which is scheduled for deletion" in within(timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
// container0 is created and used
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData(runMessage)))
// container0 is reused
pool ! runMessage
containers(0).expectMsg(runMessage)
containers(0).send(pool, NeedWork(warmedData(runMessage)))
// container0 is deleted
containers(0).send(pool, ContainerRemoved(true))
// container1 is created and used
pool ! runMessage
containers(1).expectMsg(runMessage)
}
/*
* Run buffer
*/
it should "first put messages into the queue and retrying them and then put messages only into the queue" in within(
timeout) {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
// Pool with 512 MB usermemory
val pool =
system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 2), feed.ref))
// Send action that blocks the pool
pool ! runMessageLarge
// Action 0 starts -> 0MB free
containers(0).expectMsg(runMessageLarge)
// Send action that should be written to the queue and retried in invoker
pool ! runMessage
containers(1).expectNoMessage(100.milliseconds)
// Send another message that should not be retried, but put into the queue as well
pool ! runMessageDifferentAction
containers(2).expectNoMessage(100.milliseconds)
// Action with 512 MB is finished
// Action 0 completes -> 512MB free
containers(0).send(pool, NeedWork(warmedData(runMessageLarge)))
// Action 1 should start immediately -> 256MB free
containers(1).expectMsgPF() {
// The `Some` assures, that it has been retried while the first action was still blocking the invoker.
case Run(runMessage.action, runMessage.msg, Some(_)) => true
}
// Action 2 should start immediately as well -> 0MB free (without any retries, as there is already enough space in the pool)
containers(2).expectMsg(runMessageDifferentAction)
// When buffer is emptied, process next feed message
feed.expectMsg(MessageFeed.Processed)
// Action 1 completes, process feed
containers(1).send(pool, NeedWork(warmedData(runMessage)))
feed.expectMsg(MessageFeed.Processed)
// Action 2 completes, process feed
containers(2).send(pool, NeedWork(warmedData(runMessageDifferentAction)))
feed.expectMsg(MessageFeed.Processed)
}
it should "process activations in the order they are arriving" in within(timeout) {
val (containers, factory) = testContainers(6)
val feed = TestProbe()
// Pool with 512 MB usermemory
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 2), feed.ref))
// Send 4 actions to the ContainerPool (Action 0, Action 2 and Action 3 with each 256 MB and Action 1 with 512 MB)
pool ! runMessage
containers(0).expectMsg(runMessage)
pool ! runMessageLarge
containers(1).expectNoMessage(100.milliseconds)
pool ! runMessageDifferentNamespace
containers(2).expectNoMessage(100.milliseconds)
pool ! runMessageDifferentAction
containers(3).expectNoMessage(100.milliseconds)
// Action 0 is finished -> 512 free; Large action should be executed now (2 more in queue)
containers(0).send(pool, NeedWork(warmedData(runMessage)))
// Buffer still has 2, so feed will not be used
feed.expectNoMessage(100.milliseconds)
containers(1).expectMsgPF() {
// The `Some` assures, that it has been retried while the first action was still blocking the invoker.
case Run(runMessageLarge.action, runMessageLarge.msg, Some(_)) => true
}
// Send another action to the container pool, that would fit memory-wise (3 in queue)
pool ! runMessageDifferentEverything
containers(4).expectNoMessage(100.milliseconds)
// Action 1 is finished -> 512 free; Action 2 and Action 3 should be executed now (1 more in queue)
containers(1).send(pool, NeedWork(warmedData(runMessageLarge)))
// Buffer still has 1, so feed will not be used
feed.expectNoMessage(100.milliseconds)
containers(2).expectMsg(runMessageDifferentNamespace)
// Assert retryLogline = false to check if this request has been stored in the queue instead of retrying in the system
containers(3).expectMsg(runMessageDifferentAction)
// Action 3 is finished -> 256 free; Action 4 should start (0 in queue)
containers(3).send(pool, NeedWork(warmedData(runMessageDifferentAction)))
// Buffer is empty, so go back to processing feed
feed.expectMsg(MessageFeed.Processed)
// Run the 5th message from the buffer
containers(4).expectMsg(runMessageDifferentEverything)
// Action 2 is finished -> 256 free
containers(2).send(pool, NeedWork(warmedData(runMessageDifferentNamespace)))
feed.expectMsg(MessageFeed.Processed)
pool ! runMessage
// Back to buffering
pool ! runMessageDifferentVersion
containers(5).expectMsg(runMessage)
// Action 6 won't start because it is buffered, waiting for Action 4 to complete
containers(6).expectNoMessage(100.milliseconds)
// Action 4 is finished -> 256 free
containers(4).send(pool, NeedWork(warmedData(runMessageDifferentEverything)))
// Run the 6th message from the buffer
containers(6).expectMsgPF() {
// The `Some` assures, that it has been retried while the first action was still blocking the invoker.
case Run(runMessageDifferentVersion.action, runMessageDifferentVersion.msg, Some(_)) => true
}
// When buffer is emptied, process next feed message
feed.expectMsg(MessageFeed.Processed)
// Action 5 is finished -> 256 free, process feed
containers(5).send(pool, NeedWork(warmedData(runMessage)))
feed.expectMsg(MessageFeed.Processed)
// Action 6 is finished -> 512 free, process feed
containers(6).send(pool, NeedWork(warmedData(runMessageDifferentVersion)))
feed.expectMsg(MessageFeed.Processed)
}
it should "process runbuffer instead of requesting new messages" in {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 1), feed.ref))
val run1 = createRunMessage(action, invocationNamespace)
val run2 = createRunMessage(action, invocationNamespace)
val run3 = createRunMessage(action, invocationNamespace)
pool ! run1
pool ! run2 //will be buffered since the pool can only fit 1
pool ! run3 //will be buffered since the pool can only fit 1
//start first run
containers(0).expectMsg(run1)
//cannot launch more containers, so make sure additional containers are not created
containers(1).expectNoMessage(100.milliseconds)
//complete processing of first run
containers(0).send(pool, NeedWork(warmedData(run1)))
//don't feed till runBuffer is emptied
feed.expectNoMessage(100.milliseconds)
//start second run
containers(0).expectMsgPF() {
// The `Some` assures, that it has been retried while the first action was still blocking the invoker.
case Run(run2.action, run2.msg, Some(_)) => true
}
//complete processing of second run
containers(0).send(pool, NeedWork(warmedData(run2)))
//feed as part of last buffer item processing
feed.expectMsg(MessageFeed.Processed)
//start third run
containers(0).expectMsgPF() {
// The `Some` assures, that it has been retried while the first action was still blocking the invoker.
case Run(run3.action, run3.msg, None) => true
}
//complete processing of third run
containers(0).send(pool, NeedWork(warmedData(run3)))
//now we expect feed to send a new message (1 per completion = 2 new messages)
feed.expectMsg(MessageFeed.Processed)
//make sure only one though
feed.expectNoMessage(100.milliseconds)
}
it should "process runbuffer when container is removed" in {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val run1 = createRunMessage(action, invocationNamespace)
val run2 = createRunMessage(action, invocationNamespace)
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 1), feed.ref))
//these will get buffered since allowLaunch is false
pool ! run1
pool ! run2
//start first run
containers(0).expectMsg(run1)
//trigger removal of the container ref, but don't start processing
containers(0).send(pool, RescheduleJob)
//trigger buffer processing by ContainerRemoved message
pool ! ContainerRemoved(true)
//start second run
containers(1).expectMsgPF() {
// The `Some` assures, that it has been retried while the first action was still blocking the invoker.
case Run(run2.action, run2.msg, Some(_)) => true
}
}
it should "process runbuffered items only once" in {
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 1), feed.ref))
val run1 = createRunMessage(action, invocationNamespace)
val run2 = createRunMessage(action, invocationNamespace)
val run3 = createRunMessage(action, invocationNamespace)
pool ! run1
pool ! run2 //will be buffered since the pool can only fit 1
pool ! run3 //will be buffered since the pool can only fit 1
//start first run
containers(0).expectMsg(run1)
//cannot launch more containers, so make sure additional containers are not created
containers(1).expectNoMessage(100.milliseconds)
//ContainerRemoved triggers buffer processing - if we don't prevent duplicates, this will cause the buffer head to be resent!
pool ! ContainerRemoved(true)
pool ! ContainerRemoved(true)
pool ! ContainerRemoved(true)
//complete processing of first run
containers(0).send(pool, NeedWork(warmedData(run1)))
//don't feed till runBuffer is emptied
feed.expectNoMessage(100.milliseconds)
//start second run
containers(0).expectMsgPF() {
// The `Some` assures, that it has been retried while the first action was still blocking the invoker.
case Run(run2.action, run2.msg, Some(_)) => true
}
//complete processing of second run
containers(0).send(pool, NeedWork(warmedData(run2)))
//feed as part of last buffer item processing
feed.expectMsg(MessageFeed.Processed)
//start third run
containers(0).expectMsgPF() {
// The `Some` assures, that it has been retried while the first action was still blocking the invoker.
case Run(run3.action, run3.msg, None) => true
}
//complete processing of third run
containers(0).send(pool, NeedWork(warmedData(run3)))
//now we expect feed to send a new message (1 per completion = 2 new messages)
feed.expectMsg(MessageFeed.Processed)
//make sure only one though
feed.expectNoMessage(100.milliseconds)
}
it should "increase activation counts when scheduling to containers whose actions support concurrency" in {
assume(concurrencyEnabled)
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
// container0 is created and used
pool ! runMessageConcurrent
containers(0).expectMsg(runMessageConcurrent)
// container0 is reused
pool ! runMessageConcurrent
containers(0).expectMsg(runMessageConcurrent)
// container0 is reused
pool ! runMessageConcurrent
containers(0).expectMsg(runMessageConcurrent)
// container1 is created and used (these concurrent containers are configured with max 3 concurrent activations)
pool ! runMessageConcurrent
containers(1).expectMsg(runMessageConcurrent)
}
it should "schedule concurrent activations to different containers for different namespaces" in {
assume(concurrencyEnabled)
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
// container0 is created and used
pool ! runMessageConcurrent
containers(0).expectMsg(runMessageConcurrent)
// container1 is created and used
pool ! runMessageConcurrentDifferentNamespace
containers(1).expectMsg(runMessageConcurrentDifferentNamespace)
}
it should "decrease activation counts when receiving NeedWork for actions that support concurrency" in {
assume(concurrencyEnabled)
val (containers, factory) = testContainers(2)
val feed = TestProbe()
val pool = system.actorOf(ContainerPool.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 4), feed.ref))
// container0 is created and used
pool ! runMessageConcurrent
containers(0).expectMsg(runMessageConcurrent)
// container0 is reused
pool ! runMessageConcurrent
containers(0).expectMsg(runMessageConcurrent)
// container0 is reused
pool ! runMessageConcurrent
containers(0).expectMsg(runMessageConcurrent)
// container1 is created and used (these concurrent containers are configured with max 3 concurrent activations)
pool ! runMessageConcurrent
containers(1).expectMsg(runMessageConcurrent)
// container1 is reused
pool ! runMessageConcurrent
containers(1).expectMsg(runMessageConcurrent)
// container1 is reused
pool ! runMessageConcurrent
containers(1).expectMsg(runMessageConcurrent)
containers(0).send(pool, NeedWork(warmedData(runMessageConcurrent)))
// container0 is reused (since active count decreased)
pool ! runMessageConcurrent
containers(0).expectMsg(runMessageConcurrent)
}
it should "backfill prewarms when prewarm containers are removed" in {
val (containers, factory) = testContainers(6)
val feed = TestProbe()
val pool =
system.actorOf(ContainerPool
.props(factory, poolConfig(MemoryLimit.STD_MEMORY * 5), feed.ref, List(PrewarmingConfig(2, exec, memoryLimit))))
containers(0).expectMsg(Start(exec, memoryLimit))
containers(1).expectMsg(Start(exec, memoryLimit))
//removing 2 prewarm containers will start 2 containers via backfill
containers(0).send(pool, ContainerRemoved(true))
containers(1).send(pool, ContainerRemoved(true))
containers(2).expectMsg(Start(exec, memoryLimit))
containers(3).expectMsg(Start(exec, memoryLimit))
//make sure extra prewarms are not started
containers(4).expectNoMessage(100.milliseconds)
containers(5).expectNoMessage(100.milliseconds)
}
it should "adjust prewarm container run well without reactive config" in {
val (containers, factory) = testContainers(4)
val feed = TestProbe()
stream.reset()
val prewarmExpirationCheckInitDelay = FiniteDuration(2, TimeUnit.SECONDS)
val prewarmExpirationCheckIntervel = FiniteDuration(2, TimeUnit.SECONDS)
val poolConfig =
ContainerPoolConfig(
MemoryLimit.STD_MEMORY * 4,
0.5,
false,
prewarmExpirationCheckInitDelay,
prewarmExpirationCheckIntervel,
None,
100,
3,
false,
1.second)
val initialCount = 2
val pool =
system.actorOf(
ContainerPool
.props(factory, poolConfig, feed.ref, List(PrewarmingConfig(initialCount, exec, memoryLimit))))
containers(0).expectMsg(Start(exec, memoryLimit))
containers(1).expectMsg(Start(exec, memoryLimit))
containers(0).send(pool, NeedWork(preWarmedData(exec.kind)))
containers(1).send(pool, NeedWork(preWarmedData(exec.kind)))
// when invoker starts, include 0 prewarm container at the very beginning
stream.toString should include(s"found 0 started")
// the desiredCount should equal with initialCount when invoker starts
stream.toString should include(s"desired count: ${initialCount}")
stream.reset()
// Make sure AdjustPrewarmedContainer is sent by ContainerPool's scheduler after prewarmExpirationCheckIntervel time
Thread.sleep(prewarmExpirationCheckIntervel.toMillis)
// Because already supplemented the prewarmed container, so currentCount should equal with initialCount
eventually {
stream.toString should not include ("started")
}
}
it should "adjust prewarm container run well with reactive config" in {
val (containers, factory) = testContainers(15)
val feed = TestProbe()
stream.reset()
val prewarmExpirationCheckInitDelay = 2.seconds
val prewarmExpirationCheckIntervel = 2.seconds
val poolConfig =
ContainerPoolConfig(
MemoryLimit.STD_MEMORY * 12,
0.5,
false,
prewarmExpirationCheckInitDelay,
prewarmExpirationCheckIntervel,
None,
100,
3,
false,
1.second)
val minCount = 0
val initialCount = 2
val maxCount = 4
val deadline: Option[Deadline] = Some(ttl.fromNow)
val reactive: Option[ReactivePrewarmingConfig] =
Some(ReactivePrewarmingConfig(minCount, maxCount, ttl, threshold, increment))
val pool =
system.actorOf(
ContainerPool
.props(factory, poolConfig, feed.ref, List(PrewarmingConfig(initialCount, exec, memoryLimit, reactive))))
//start 2 prewarms
containers(0).expectMsg(Start(exec, memoryLimit, Some(ttl)))
containers(1).expectMsg(Start(exec, memoryLimit, Some(ttl)))
containers(0).send(pool, NeedWork(preWarmedData(exec.kind, expires = deadline)))
containers(1).send(pool, NeedWork(preWarmedData(exec.kind, expires = deadline)))
// when invoker starts, include 0 prewarm container at the very beginning
stream.toString should include(s"found 0 started")
// the desiredCount should equal with initialCount when invoker starts
stream.toString should include(s"desired count: ${initialCount}")
stream.reset()
// Make sure AdjustPrewarmedContainer is sent by ContainerPool's scheduler after prewarmExpirationCheckIntervel time
Thread.sleep(prewarmExpirationCheckIntervel.toMillis)
//expire 2 prewarms
containers(0).expectMsg(Remove)
containers(1).expectMsg(Remove)
containers(0).send(pool, ContainerRemoved(false))
containers(1).send(pool, ContainerRemoved(false))
// currentCount should equal with 0 due to these 2 prewarmed containers are expired
stream.toString should not include (s"found 0 started")
// the desiredCount should equal with minCount because cold start didn't happen
stream.toString should not include (s"desired count: ${minCount}")
// Previously created prewarmed containers should be removed
stream.toString should not include (s"removed ${initialCount} expired prewarmed container")
stream.reset()
val action = ExecutableWhiskAction(
EntityPath("actionSpace"),
EntityName("actionName"),
exec,
limits = ActionLimits(memory = MemoryLimit(memoryLimit)))
val run = createRunMessage(action, invocationNamespace)
// 2 cold start happened
pool ! run
pool ! run
containers(2).expectMsg(run)
containers(3).expectMsg(run)
// Make sure AdjustPrewarmedContainer is sent by ContainerPool's scheduler after prewarmExpirationCheckIntervel time
Thread.sleep(prewarmExpirationCheckIntervel.toMillis)
eventually {
// Because already removed expired prewarmed containrs, so currentCount should equal with 0
stream.toString should include(s"found 0 started")
// the desiredCount should equal with 2 due to cold start happened
stream.toString should include(s"desired count: 2")
}
//add 2 prewarms due to increments
containers(4).expectMsg(Start(exec, memoryLimit, Some(ttl)))
containers(5).expectMsg(Start(exec, memoryLimit, Some(ttl)))
containers(4).send(pool, NeedWork(preWarmedData(exec.kind, expires = deadline)))
containers(5).send(pool, NeedWork(preWarmedData(exec.kind, expires = deadline)))
stream.reset()
// Make sure AdjustPrewarmedContainer is sent by ContainerPool's scheduler after prewarmExpirationCheckIntervel time
Thread.sleep(prewarmExpirationCheckIntervel.toMillis)
containers(4).expectMsg(Remove)
containers(5).expectMsg(Remove)
containers(4).send(pool, ContainerRemoved(false))
containers(5).send(pool, ContainerRemoved(false))
// removed previous 2 prewarmed container due to expired
stream.toString should include(s"removing up to ${poolConfig.prewarmExpirationLimit} of 2 expired containers")
stream.reset()
// 5 code start happened(5 > maxCount)
pool ! run
pool ! run
pool ! run
pool ! run
pool ! run
containers(6).expectMsg(run)
containers(7).expectMsg(run)
containers(8).expectMsg(run)
containers(9).expectMsg(run)
containers(10).expectMsg(run)
// Make sure AdjustPrewarmedContainer is sent by ContainerPool's scheduler after prewarmExpirationCheckIntervel time
Thread.sleep(prewarmExpirationCheckIntervel.toMillis)
eventually {
// Because already removed expired prewarmed containrs, so currentCount should equal with 0
stream.toString should include(s"found 0 started")
// in spite of the cold start number > maxCount, but the desiredCount can't be greater than maxCount
stream.toString should include(s"desired count: ${maxCount}")
}
containers(11).expectMsg(Start(exec, memoryLimit, Some(ttl)))
containers(12).expectMsg(Start(exec, memoryLimit, Some(ttl)))
containers(13).expectMsg(Start(exec, memoryLimit, Some(ttl)))
containers(14).expectMsg(Start(exec, memoryLimit, Some(ttl)))
containers(11).send(pool, NeedWork(preWarmedData(exec.kind, expires = deadline)))
containers(12).send(pool, NeedWork(preWarmedData(exec.kind, expires = deadline)))
containers(13).send(pool, NeedWork(preWarmedData(exec.kind, expires = deadline)))
containers(14).send(pool, NeedWork(preWarmedData(exec.kind, expires = deadline)))
}
}
abstract class MockableContainer extends Container {
protected[core] val addr: ContainerAddress = ContainerAddress("nohost")
}
/**
* Unit tests for the ContainerPool object.
*
* These tests test only the "static" methods "schedule" and "remove"
* of the ContainerPool object.
*/
@RunWith(classOf[JUnitRunner])
class ContainerPoolObjectTests extends FlatSpec with Matchers with MockFactory {
val actionExec = CodeExecAsString(RuntimeManifest("actionKind", ImageName("testImage")), "testCode", None)
val standardNamespace = EntityName("standardNamespace")
val differentNamespace = EntityName("differentNamespace")
/** Helper to create a new action from String representations */
def createAction(namespace: String = "actionNS", name: String = "actionName", limits: ActionLimits = ActionLimits()) =
ExecutableWhiskAction(EntityPath(namespace), EntityName(name), actionExec, limits = limits)
/** Helper to create WarmedData with sensible defaults */
def warmedData(action: ExecutableWhiskAction = createAction(),
namespace: String = standardNamespace.asString,
lastUsed: Instant = Instant.now,
active: Int = 0) =
WarmedData(stub[MockableContainer], EntityName(namespace), action, lastUsed, active)
/** Helper to create WarmingData with sensible defaults */
def warmingData(action: ExecutableWhiskAction = createAction(),
namespace: String = standardNamespace.asString,
lastUsed: Instant = Instant.now,
active: Int = 0) =
WarmingData(stub[MockableContainer], EntityName(namespace), action, lastUsed, active)
/** Helper to create WarmingData with sensible defaults */
def warmingColdData(action: ExecutableWhiskAction = createAction(),
namespace: String = standardNamespace.asString,
lastUsed: Instant = Instant.now,
active: Int = 0) =
WarmingColdData(EntityName(namespace), action, lastUsed, active)
/** Helper to create PreWarmedData with sensible defaults */
def preWarmedData(kind: String = "anyKind", expires: Option[Deadline] = None) =
PreWarmedData(stub[MockableContainer], kind, 256.MB, expires = expires)
/** Helper to create NoData */
def noData() = NoData()
behavior of "ContainerPool schedule()"
it should "not provide a container if idle pool is empty" in {
ContainerPool.schedule(createAction(), standardNamespace, Map.empty) shouldBe None
}
it should "reuse an applicable warm container from idle pool with one container" in {
val data = warmedData()
val pool = Map('name -> data)
// copy to make sure, referencial equality doesn't suffice
ContainerPool.schedule(data.action.copy(), data.invocationNamespace, pool) shouldBe Some('name, data)
}
it should "reuse an applicable warm container from idle pool with several applicable containers" in {
val data = warmedData()
val pool = Map('first -> data, 'second -> data)
ContainerPool.schedule(data.action.copy(), data.invocationNamespace, pool) should (be(Some('first, data)) or be(
Some('second, data)))
}
it should "reuse an applicable warm container from idle pool with several different containers" in {
val matchingData = warmedData()
val pool = Map('none -> noData(), 'pre -> preWarmedData(), 'warm -> matchingData)
ContainerPool.schedule(matchingData.action.copy(), matchingData.invocationNamespace, pool) shouldBe Some(
'warm,
matchingData)
}
it should "not reuse a container from idle pool with non-warm containers" in {
val data = warmedData()
// data is **not** in the pool!
val pool = Map('none -> noData(), 'pre -> preWarmedData())
ContainerPool.schedule(data.action.copy(), data.invocationNamespace, pool) shouldBe None
}
it should "not reuse a warm container with different invocation namespace" in {
val data = warmedData()
val pool = Map('warm -> data)
val differentNamespace = EntityName(data.invocationNamespace.asString + "butDifferent")
data.invocationNamespace should not be differentNamespace
ContainerPool.schedule(data.action.copy(), differentNamespace, pool) shouldBe None
}
it should "not reuse a warm container with different action name" in {
val data = warmedData()
val differentAction = data.action.copy(name = EntityName(data.action.name.asString + "butDifferent"))
val pool = Map('warm -> data)
data.action.name should not be differentAction.name
ContainerPool.schedule(differentAction, data.invocationNamespace, pool) shouldBe None
}
it should "not reuse a warm container with different action version" in {
val data = warmedData()
val differentAction = data.action.copy(version = data.action.version.upMajor)
val pool = Map('warm -> data)
data.action.version should not be differentAction.version
ContainerPool.schedule(differentAction, data.invocationNamespace, pool) shouldBe None
}
it should "not use a container when active activation count >= maxconcurrent" in {
val concurrencyEnabled = Option(WhiskProperties.getProperty("whisk.action.concurrency")).exists(_.toBoolean)
val maxConcurrent = if (concurrencyEnabled) 25 else 1
val data = warmedData(
active = maxConcurrent,
action = createAction(limits = ActionLimits(concurrency = ConcurrencyLimit(maxConcurrent))))
val pool = Map('warm -> data)
ContainerPool.schedule(data.action, data.invocationNamespace, pool) shouldBe None
val data2 = warmedData(
active = maxConcurrent - 1,
action = createAction(limits = ActionLimits(concurrency = ConcurrencyLimit(maxConcurrent))))
val pool2 = Map('warm -> data2)
ContainerPool.schedule(data2.action, data2.invocationNamespace, pool2) shouldBe Some('warm, data2)
}
it should "use a warming when active activation count < maxconcurrent" in {
val concurrencyEnabled = Option(WhiskProperties.getProperty("whisk.action.concurrency")).exists(_.toBoolean)
val maxConcurrent = if (concurrencyEnabled) 25 else 1
val action = createAction(limits = ActionLimits(concurrency = ConcurrencyLimit(maxConcurrent)))
val data = warmingData(active = maxConcurrent - 1, action = action)
val pool = Map('warming -> data)
ContainerPool.schedule(data.action, data.invocationNamespace, pool) shouldBe Some('warming, data)
val data2 = warmedData(active = maxConcurrent - 1, action = action)
val pool2 = pool ++ Map('warm -> data2)
ContainerPool.schedule(data2.action, data2.invocationNamespace, pool2) shouldBe Some('warm, data2)
}
it should "prefer warm to warming when active activation count < maxconcurrent" in {
val concurrencyEnabled = Option(WhiskProperties.getProperty("whisk.action.concurrency")).exists(_.toBoolean)
val maxConcurrent = if (concurrencyEnabled) 25 else 1
val action = createAction(limits = ActionLimits(concurrency = ConcurrencyLimit(maxConcurrent)))
val data = warmingColdData(active = maxConcurrent - 1, action = action)
val data2 = warmedData(active = maxConcurrent - 1, action = action)
val pool = Map('warming -> data, 'warm -> data2)
ContainerPool.schedule(data.action, data.invocationNamespace, pool) shouldBe Some('warm, data2)
}
it should "use a warmingCold when active activation count < maxconcurrent" in {
val concurrencyEnabled = Option(WhiskProperties.getProperty("whisk.action.concurrency")).exists(_.toBoolean)
val maxConcurrent = if (concurrencyEnabled) 25 else 1
val action = createAction(limits = ActionLimits(concurrency = ConcurrencyLimit(maxConcurrent)))
val data = warmingColdData(active = maxConcurrent - 1, action = action)
val pool = Map('warmingCold -> data)
ContainerPool.schedule(data.action, data.invocationNamespace, pool) shouldBe Some('warmingCold, data)
//after scheduling, the pool will update with new data to set active = maxConcurrent
val data2 = warmingColdData(active = maxConcurrent, action = action)
val pool2 = Map('warmingCold -> data2)
ContainerPool.schedule(data2.action, data2.invocationNamespace, pool2) shouldBe None
}
it should "prefer warm to warmingCold when active activation count < maxconcurrent" in {
val concurrencyEnabled = Option(WhiskProperties.getProperty("whisk.action.concurrency")).exists(_.toBoolean)
val maxConcurrent = if (concurrencyEnabled) 25 else 1
val action = createAction(limits = ActionLimits(concurrency = ConcurrencyLimit(maxConcurrent)))
val data = warmingColdData(active = maxConcurrent - 1, action = action)
val data2 = warmedData(active = maxConcurrent - 1, action = action)
val pool = Map('warmingCold -> data, 'warm -> data2)
ContainerPool.schedule(data.action, data.invocationNamespace, pool) shouldBe Some('warm, data2)
}
it should "prefer warming to warmingCold when active activation count < maxconcurrent" in {
val concurrencyEnabled = Option(WhiskProperties.getProperty("whisk.action.concurrency")).exists(_.toBoolean)
val maxConcurrent = if (concurrencyEnabled) 25 else 1
val action = createAction(limits = ActionLimits(concurrency = ConcurrencyLimit(maxConcurrent)))
val data = warmingColdData(active = maxConcurrent - 1, action = action)
val data2 = warmingData(active = maxConcurrent - 1, action = action)
val pool = Map('warmingCold -> data, 'warming -> data2)
ContainerPool.schedule(data.action, data.invocationNamespace, pool) shouldBe Some('warming, data2)
}
behavior of "ContainerPool remove()"
it should "not provide a container if pool is empty" in {
ContainerPool.remove(Map.empty, MemoryLimit.STD_MEMORY) shouldBe List.empty
}
it should "not provide a container from busy pool with non-warm containers" in {
val pool = Map('none -> noData(), 'pre -> preWarmedData())
ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List.empty
}
it should "not provide a container from pool if there is not enough capacity" in {
val pool = Map('first -> warmedData())
ContainerPool.remove(pool, MemoryLimit.STD_MEMORY * 2) shouldBe List.empty
}
it should "provide a container from pool with one single free container" in {
val data = warmedData()
val pool = Map('warm -> data)
ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List('warm)
}
it should "provide oldest container from busy pool with multiple containers" in {
val commonNamespace = differentNamespace.asString
val first = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(1))
val second = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(2))
val oldest = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(0))
val pool = Map('first -> first, 'second -> second, 'oldest -> oldest)
ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List('oldest)
}
it should "provide a list of the oldest containers from pool, if several containers have to be removed" in {
val namespace = differentNamespace.asString
val first = warmedData(namespace = namespace, lastUsed = Instant.ofEpochMilli(1))
val second = warmedData(namespace = namespace, lastUsed = Instant.ofEpochMilli(2))
val third = warmedData(namespace = namespace, lastUsed = Instant.ofEpochMilli(3))
val oldest = warmedData(namespace = namespace, lastUsed = Instant.ofEpochMilli(0))
val pool = Map('first -> first, 'second -> second, 'third -> third, 'oldest -> oldest)
ContainerPool.remove(pool, MemoryLimit.STD_MEMORY * 2) shouldBe List('oldest, 'first)
}
it should "provide oldest container (excluding concurrently busy) from busy pool with multiple containers" in {
val commonNamespace = differentNamespace.asString
val first = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(1), active = 0)
val second = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(2), active = 0)
val oldest = warmedData(namespace = commonNamespace, lastUsed = Instant.ofEpochMilli(0), active = 3)
var pool = Map('first -> first, 'second -> second, 'oldest -> oldest)
ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List('first)
pool = pool - 'first
ContainerPool.remove(pool, MemoryLimit.STD_MEMORY) shouldBe List('second)
}
it should "remove expired in order of expiration" in {
val poolConfig = ContainerPoolConfig(0.MB, 0.5, false, 2.second, 10.seconds, None, 1, 3, false, 1.second)
val exec = CodeExecAsString(RuntimeManifest("actionKind", ImageName("testImage")), "testCode", None)
//use a second kind so that we know sorting is not isolated to the expired of each kind
val exec2 = CodeExecAsString(RuntimeManifest("actionKind2", ImageName("testImage")), "testCode", None)
val memoryLimit = 256.MB
val prewarmConfig =
List(
PrewarmingConfig(1, exec, memoryLimit, Some(ReactivePrewarmingConfig(0, 10, 10.seconds, 1, 1))),
PrewarmingConfig(1, exec2, memoryLimit, Some(ReactivePrewarmingConfig(0, 10, 10.seconds, 1, 1))))
val oldestDeadline = Deadline.now - 1.seconds
val newerDeadline = Deadline.now
val newestDeadline = Deadline.now + 1.seconds
val prewarmedPool = Map(
'newest -> preWarmedData("actionKind", Some(newestDeadline)),
'oldest -> preWarmedData("actionKind2", Some(oldestDeadline)),
'newer -> preWarmedData("actionKind", Some(newerDeadline)))
lazy val stream = new ByteArrayOutputStream
lazy val printstream = new PrintStream(stream)
lazy implicit val logging: Logging = new PrintStreamLogging(printstream)
ContainerPool.removeExpired(poolConfig, prewarmConfig, prewarmedPool) shouldBe (List('oldest))
}
it should "remove only the prewarmExpirationLimit of expired prewarms" in {
//limit prewarm removal to 2
val poolConfig = ContainerPoolConfig(0.MB, 0.5, false, 2.second, 10.seconds, None, 2, 3, false, 1.second)
val exec = CodeExecAsString(RuntimeManifest("actionKind", ImageName("testImage")), "testCode", None)
val memoryLimit = 256.MB
val prewarmConfig =
List(PrewarmingConfig(3, exec, memoryLimit, Some(ReactivePrewarmingConfig(0, 10, 10.seconds, 1, 1))))
//all are overdue, with different expiration times
val oldestDeadline = Deadline.now - 5.seconds
val newerDeadline = Deadline.now - 4.seconds
//the newest* ones are expired, but not the oldest, and not within the limit of 2 prewarms, so won't be removed
val newestDeadline = Deadline.now - 3.seconds
val newestDeadline2 = Deadline.now - 2.seconds
val newestDeadline3 = Deadline.now - 1.seconds
val prewarmedPool = Map(
'newest -> preWarmedData("actionKind", Some(newestDeadline)),
'oldest -> preWarmedData("actionKind", Some(oldestDeadline)),
'newest3 -> preWarmedData("actionKind", Some(newestDeadline3)),
'newer -> preWarmedData("actionKind", Some(newerDeadline)),
'newest2 -> preWarmedData("actionKind", Some(newestDeadline2)))
lazy val stream = new ByteArrayOutputStream
lazy val printstream = new PrintStream(stream)
lazy implicit val logging: Logging = new PrintStreamLogging(printstream)
ContainerPool.removeExpired(poolConfig, prewarmConfig, prewarmedPool) shouldBe (List('oldest, 'newer))
}
it should "remove only the expired prewarms regardless of minCount" in {
//limit prewarm removal to 100
val poolConfig = ContainerPoolConfig(0.MB, 0.5, false, 2.second, 10.seconds, None, 100, 3, false, 1.second)
val exec = CodeExecAsString(RuntimeManifest("actionKind", ImageName("testImage")), "testCode", None)
val memoryLimit = 256.MB
//minCount is 2 - should leave at least 2 prewarms when removing expired
val prewarmConfig =
List(PrewarmingConfig(3, exec, memoryLimit, Some(ReactivePrewarmingConfig(2, 10, 10.seconds, 1, 1))))
//all are overdue, with different expiration times
val oldestDeadline = Deadline.now - 5.seconds
val newerDeadline = Deadline.now - 4.seconds
//the newest* ones are expired, but not the oldest, and not within the limit of 2 prewarms, so won't be removed
val newestDeadline = Deadline.now - 3.seconds
val newestDeadline2 = Deadline.now - 2.seconds
val newestDeadline3 = Deadline.now - 1.seconds
val prewarmedPool = Map(
'newest -> preWarmedData("actionKind", Some(newestDeadline)),
'oldest -> preWarmedData("actionKind", Some(oldestDeadline)),
'newest3 -> preWarmedData("actionKind", Some(newestDeadline3)),
'newer -> preWarmedData("actionKind", Some(newerDeadline)),
'newest2 -> preWarmedData("actionKind", Some(newestDeadline2)))
lazy val stream = new ByteArrayOutputStream
lazy val printstream = new PrintStream(stream)
lazy implicit val logging: Logging = new PrintStreamLogging(printstream)
ContainerPool.removeExpired(poolConfig, prewarmConfig, prewarmedPool) shouldBe (List(
'oldest,
'newer,
'newest,
'newest2,
'newest3))
}
}
|
style95/openwhisk
|
tests/src/test/scala/org/apache/openwhisk/core/containerpool/test/ContainerPoolTests.scala
|
Scala
|
apache-2.0
| 56,474
|
package net.shiroka.tools.ofx.conversions
import com.typesafe.config.ConfigFactory
import org.specs2.mutable._
import net.shiroka.tools.ofx._
class SmbcFixedSpec extends SpecificationLike {
"SmbcFixedConversion" >> {
"#apply" >> {
"it generates OFX statement" in {
val config = ConfigFactory.load().getConfig("net.shiroka.tools.ofx.conversions.smbc-fixed")
val conversion = SmbcFixed(config)
val src = getClass.getResourceAsStream("/smbc-fixed.csv")
val result = printToBaos(out => conversion(src, out)).toString
result must contain("<ACCTID>1001000301</ACCTID>")
result must contain("<STMTTRN>")
result must contain("<MEMO>山田太郎; NHK 放送受信料; NHK29ネン12ガツ—30ネン / 1ガツ</MEMO>")
result must contain("<MEMO>山田花子; イロハバンクM(11月分)</MEMO>")
}
}
}
}
|
ikuo/ofx-tools
|
src/test/scala/net/shiroka/tools/ofx/conversions/SmbcFixedSpec.scala
|
Scala
|
mit
| 925
|
package wvlet.log
/**
*
*/
class JSConsoleLogHandlerTest extends Spec {
"JSConsoleLogHandler" should {
Logger.setDefaultHandler(JSConsoleLogHandler())
error("error message")
warn("warn message")
info("info message")
debug("debug message")
trace("trace message")
}
}
|
wvlet/log
|
wvlet-log/js/src/test/scala/wvlet/log/JSConsoleLogHandlerTest.scala
|
Scala
|
apache-2.0
| 302
|
package mypipe.mysql
import scala.util.Try
case class BinaryLogFilePosition(filename: String, pos: Long) {
override def toString(): String = s"$filename:$pos"
override def equals(o: Any): Boolean = {
o != null &&
filename.equals(o.asInstanceOf[BinaryLogFilePosition].filename) &&
pos.equals(o.asInstanceOf[BinaryLogFilePosition].pos)
}
}
object BinaryLogFilePosition {
val current = BinaryLogFilePosition("", 0)
def fromString(saved: String) = Try {
val parts = saved.split(":")
BinaryLogFilePosition(parts(0), parts(1).toLong)
}.toOption
}
|
tramchamploo/mypipe
|
mypipe-api/src/main/scala/mypipe/mysql/BinaryLogFilePosition.scala
|
Scala
|
apache-2.0
| 581
|
import BIDMat.{CMat,CSMat,DMat,Dict,FMat,FND,GMat,GDMat,GIMat,GLMat,GSMat,GSDMat,HMat,IDict,Image,IMat,LMat,Mat,SMat,SBMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import BIDMat.Solvers._
import BIDMat.Plotting._
import BIDMach.Learner
import BIDMach.models.{FM,GLM,KMeans,KMeansw,LDA,LDAgibbs,Model,NMF,SFA,RandomForest}
import BIDMach.networks.{DNN}
import BIDMach.datasources.{DataSource,MatDS,FilesDS,SFilesDS}
import BIDMach.mixins.{CosineSim,Perplexity,Top,L1Regularizer,L2Regularizer}
import BIDMach.updaters.{ADAGrad,Batch,BatchNorm,IncMult,IncNorm,Telescoping}
import BIDMach.causal.{IPTW}
Mat.checkMKL
Mat.checkCUDA
|
yanqingmen/BIDMach
|
lib/bidmach_init.scala
|
Scala
|
bsd-3-clause
| 652
|
// Based on sbt-protobuf's Protobuf Plugin
// https://github.com/sbt/sbt-protobuf
package com.trueaccord.scalapb
import java.io.File
import com.trueaccord.scalapb.compiler.{PosixProtocDriver, WindowsProtocDriver, ProtocDriver}
import sbt.Keys._
import sbt._
import sbtprotobuf.{ProtobufPlugin => PB}
@deprecated("Please switch to sbt-protoc: http://trueaccord.github.io/ScalaPB/migrating.html", "0.5.43")
object ScalaPbPlugin extends Plugin {
// Set up aliases to SbtProtobuf tasks
val includePaths = PB.includePaths
val protoc = PB.protoc
val runProtoc = TaskKey[Seq[String] => Int]("scalapb-run-protoc", "A function that executes the protobuf compiler with the given arguments, returning the exit code of the compilation run.")
val externalIncludePath = PB.externalIncludePath
val generatedTargets = PB.generatedTargets
val generate = PB.generate
val unpackDependencies = PB.unpackDependencies
val protocOptions = PB.protocOptions
val javaConversions = SettingKey[Boolean]("scalapb-java-conversions", "Generate Scala-Java protocol buffer conversions")
val flatPackage = SettingKey[Boolean]("scalapb-flat-package", "Do not generate a package for each file")
val grpc = SettingKey[Boolean]("scalapb-grpc", "Generate Grpc stubs for services")
val singleLineToString = SettingKey[Boolean]("scalapb-single-line-to-string", "Messages toString() method generates single line")
val scalapbVersion = SettingKey[String]("scalapb-version", "ScalaPB version.")
val pythonExecutable = SettingKey[String]("python-executable", "Full path for a Python.exe (needed only on Windows)")
val protobufConfig = PB.protobufConfig
val protocDriver = TaskKey[ProtocDriver]("scalapb-protoc-driver", "Protoc driver")
val protobufSettings = PB.protobufSettings ++ inConfig(protobufConfig)(Seq[Setting[_]](
scalaSource <<= (sourceManaged in Compile) { _ / "compiled_protobuf" },
javaConversions := false,
grpc := true,
flatPackage := false,
singleLineToString := false,
scalapbVersion := com.trueaccord.scalapb.plugin.Version.scalaPbVersion,
pythonExecutable := "python",
protocDriver <<= protocDriverTask,
generatedTargets <<= (javaConversions in PB.protobufConfig,
javaSource in PB.protobufConfig, scalaSource in PB.protobufConfig) {
(javaConversions, javaSource, scalaSource) =>
(scalaSource, "*.scala") +:
(if (javaConversions)
Seq((javaSource, "*.java"))
else
Nil)
},
version := "2.6.1",
// Set protobuf's runProtoc runner with our runner..
runProtoc <<= (protoc, streams) map ((cmd, s) => args => Process(cmd, args) ! s.log),
PB.runProtoc := protocDriver.value.buildRunner((runProtoc in PB.protobufConfig).value),
protocOptions <++= (generatedTargets in protobufConfig,
javaConversions in protobufConfig,
flatPackage in protobufConfig,
grpc in protobufConfig,
singleLineToString in protobufConfig) {
(generatedTargets, javaConversions, flatPackage, grpc, singleLineToString) =>
def makeParams(params: (Boolean, String)*) = params
.collect {
case (true, paramName) => paramName
}.mkString(",")
generatedTargets.find(_._2.endsWith(".scala")) match {
case Some(targetForScala) =>
val params = makeParams(
javaConversions -> "java_conversions",
flatPackage -> "flat_package",
grpc -> "grpc",
singleLineToString -> "single_line_to_string")
Seq(s"--scala_out=$params:${targetForScala._1.absolutePath}")
case None => Nil
}
})) ++ Seq[Setting[_]](
libraryDependencies <++= (scalapbVersion in protobufConfig) {
runtimeVersion =>
Seq(
"com.trueaccord.scalapb" %% "scalapb-runtime" % runtimeVersion
)
})
private def isWindows: Boolean = sys.props("os.name").startsWith("Windows")
private def protocDriverTask = (pythonExecutable in protobufConfig) map {
pythonExecutable =>
if (isWindows) new WindowsProtocDriver(pythonExecutable)
else new PosixProtocDriver
}
}
|
trueaccord/sbt-scalapb
|
src/main/scala/com/trueaccord/scalapb/ScalaPbPlugin.scala
|
Scala
|
apache-2.0
| 4,197
|
package com.twitter.finagle.context
import com.twitter.finagle.service.DeadlineOnlyToggle
import com.twitter.finagle.tracing.Trace
import com.twitter.finagle.util.ByteArrays
import com.twitter.io.Buf
import com.twitter.util.Duration
import com.twitter.util.Return
import com.twitter.util.Throw
import com.twitter.util.Time
import com.twitter.util.Try
/**
* A deadline is the time by which some action (e.g., a request) must
* complete. A deadline has a timestamp in addition to the deadline.
* This timestamp denotes the time at which the deadline was enacted.
*
* This is done so that they may be reconciled over process boundaries;
* e.g., to account for variable latencies in message deliveries.
*
* @param timestamp the time at which the deadline was enacted.
*
* @param deadline the time by which the action must complete.
*/
case class Deadline(timestamp: Time, deadline: Time) extends Ordered[Deadline] {
def compare(that: Deadline): Int = this.deadline.compare(that.deadline)
def expired: Boolean = Time.now > deadline
def remaining: Duration = deadline - Time.now
}
/**
* A broadcast context for deadlines.
*/
object Deadline extends Contexts.broadcast.Key[Deadline]("com.twitter.finagle.Deadline") {
/**
* Returns the current request's deadline, if set.
*/
def current: Option[Deadline] =
Contexts.broadcast.get(Deadline)
/**
* Same as [[current]] but would return `None` if the current request isn't toggled. This is
* useful as a temporary API for those willing to transition to deadlines outside of regular
* Finagle stack (clients and servers).
*
* Note: this method would eventually be removed and replaced with just [[current]].
*/
private[twitter] def currentToggled: Option[Deadline] =
if (DeadlineOnlyToggle(Trace())) current
else None
/**
* Construct a deadline from a timeout.
*/
def ofTimeout(timeout: Duration): Deadline = {
val now = Time.now
Deadline(now, now + timeout)
}
/**
* Construct a new deadline, representing the combined deadline
* `d1` and `d2`. Specifically, the returned deadline has the
* earliest deadline but the latest timestamp. This represents the
* strictest deadline and the latest observation.
*/
def combined(d1: Deadline, d2: Deadline): Deadline =
Deadline(d1.timestamp max d2.timestamp, d1.deadline min d2.deadline)
/**
* Marshal deadline to byte buffer, deadline.timestamp and deadline.deadline
* must not be Time.Top, Time.Bottom or Time.Undefined
*/
def marshal(deadline: Deadline): Buf = {
val bytes = new Array[Byte](16)
ByteArrays.put64be(bytes, 0, deadline.timestamp.inNanoseconds)
ByteArrays.put64be(bytes, 8, deadline.deadline.inNanoseconds)
Buf.ByteArray.Owned(bytes)
}
private[this] def readBigEndianLong(b: Buf, offset: Int): Long = {
((b.get(offset) & 0xff).toLong << 56) |
((b.get(offset + 1) & 0xff).toLong << 48) |
((b.get(offset + 2) & 0xff).toLong << 40) |
((b.get(offset + 3) & 0xff).toLong << 32) |
((b.get(offset + 4) & 0xff).toLong << 24) |
((b.get(offset + 5) & 0xff).toLong << 16) |
((b.get(offset + 6) & 0xff).toLong << 8) |
(b.get(offset + 7) & 0xff).toLong
}
def tryUnmarshal(body: Buf): Try[Deadline] = {
if (body.length != 16)
return Throw(
new IllegalArgumentException(s"Invalid body. Length ${body.length} but required 16")
)
val timestamp = readBigEndianLong(body, 0)
val deadline = readBigEndianLong(body, 8)
Return(Deadline(Time.fromNanoseconds(timestamp), Time.fromNanoseconds(deadline)))
}
}
|
twitter/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/context/Deadline.scala
|
Scala
|
apache-2.0
| 3,617
|
package com.kumar.wordfinder
import java.io.FileInputStream
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream
object Dictionary {
val bs = scala.io.Source.fromInputStream(new GzipCompressorInputStream(new FileInputStream("words.txt.gz")))
val set = scala.collection.mutable.HashSet[String]()
for(line <- bs.getLines) {
set.add(line.toLowerCase)
}
bs.close
println(s"Finished reading words, total: ${set.size}")
def contains(s: String): Boolean = set.contains(s.toLowerCase)
}
|
skumargithub/Stuff
|
src/main/scala/com/kumar/wordfinder/Dictionary.scala
|
Scala
|
gpl-3.0
| 529
|
/*
* Copyright (c) 2011-14 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import org.junit.Test
import org.junit.Assert._
class ConversionTests {
import ops.function.FnToProduct
import syntax.std.function._
import syntax.std.tuple._
import test._
@Test
def testTuples {
val t1 = (23, "foo", 2.0, true)
val h1 = t1.productElements
typed[Int :: String :: Double :: Boolean :: HNil](h1)
assertEquals(23 :: "foo" :: 2.0 :: true :: HNil, h1)
val h2 = productElements(t1)
typed[Int :: String :: Double :: Boolean :: HNil](h2)
assertEquals(23 :: "foo" :: 2.0 :: true :: HNil, h2)
val l2 = 23 :: "foo" :: 2.0 :: true :: HNil
val t3 = l2.tupled
typed[(Int, String, Double, Boolean)](t3)
assertEquals((23, "foo", 2.0, true), t3)
val t4 = tupled(l2)
typed[(Int, String, Double, Boolean)](t4)
assertEquals((23, "foo", 2.0, true), t4)
val t5 = (23, "foo")
val t6 = (false, 3.0)
val t7 = (t5.productElements ::: t6.productElements).tupled
typed[(Int, String, Boolean, Double)](t7)
assertEquals((23, "foo", false, 3.0), t7)
val t8 = (Set(2), Set("foo"))
val t8b = (t8.productElements map choose).tupled
typed[(Option[Int], Option[String])](t8b)
assertEquals((Option(2), Option("foo")), t8b)
}
@Test
def testFunctions {
val sum : (Int, Int) => Int = _+_
val prd : (Int, Int, Int) => Int = _*_*_
val hlsum = sum.toProduct
typed[(Int :: Int :: HNil) => Int](hlsum)
val hlprd = prd.toProduct
typed[(Int :: Int :: Int :: HNil) => Int](hlprd)
trait A
trait B extends A
trait C extends A
val a = new A {}
val b = new B {}
val ab : A => B = (a : A) => b
val hlab = ab.toProduct
typed[(A :: HNil) => B](hlab)
def foo[F, L <: HList, R](f : F, l : L)(implicit fntp: FnToProduct.Aux[F, L => R]) = fntp(f)(l)
val s2 = foo(sum, 2 :: 3 :: HNil)
val ab2 = foo(ab, a :: HNil)
}
@Test
def testCaseClasses {
case class Foo(a : Int, b : String, c : Double)
val f1 = Foo(23, "foo", 2.3)
val t1 = Foo.unapply(f1).get
val hf = t1.productElements
val f2 = Foo.tupled(hf.tupled)
assertEquals(f1, f2)
}
}
|
mandubian/shapeless
|
core/src/test/scala/shapeless/conversions.scala
|
Scala
|
apache-2.0
| 2,808
|
package controllers.application
import controllers.Application
import uk.gov.homeoffice.drt.auth.Roles.ArrivalsAndSplitsView
import drt.shared._
import drt.shared.redlist.RedList
import play.api.mvc.{Action, AnyContent}
import services.graphstages.Crunch
import services.{AirportToCountry, SDate}
trait WithAirportInfo {
self: Application =>
def getAirportInfo: Action[AnyContent] = authByRole(ArrivalsAndSplitsView) {
Action { request =>
import upickle.default._
val res: Map[PortCode, AirportInfo] = request.queryString.get("portCode")
.flatMap(_.headOption)
.map(codes => codes
.split(",")
.map(code => (PortCode(code), AirportToCountry.airportInfoByIataPortCode.get(code)))
.collect {
case (code, Some(info)) => (code, info)
}
) match {
case Some(airportInfoTuples) => airportInfoTuples.toMap
case None => Map()
}
Ok(write(res))
}
}
def getRedListPorts(dateString: String): Action[AnyContent] = authByRole(ArrivalsAndSplitsView) {
Action { _ =>
import upickle.default._
val forDate = SDate(dateString, Crunch.europeLondonTimeZone).millisSinceEpoch
val redListPorts = AirportToCountry.airportInfoByIataPortCode.values.collect {
case AirportInfo(_, _, country, portCode) if RedList.countryCodesByName(forDate).contains(country) => PortCode(portCode)
}
Ok(write(redListPorts))
}
}
}
|
UKHomeOffice/drt-scalajs-spa-exploration
|
server/src/main/scala/controllers/application/WithAirportInfo.scala
|
Scala
|
apache-2.0
| 1,474
|
package com.faacets.qalg
package algos
import scala.{specialized => sp}
import spire.algebra._
import algebra._
trait PackEuclideanRingMutable[@sp(Double, Long) A] extends Any with PackEuclideanRing[A] with PackRingMutable[A] {
implicit def MGramSchmidt: MutableMGramSchmidt[M]
implicit def MPrime: MutablePrime[M, A]
implicit def VPrime: MutablePrime[V, A]
}
object PackEuclideanRingMutable {
type ForM[M0, A] = PackEuclideanRingMutable[A] { type M = M0 }
type ForV[V0, A] = PackEuclideanRingMutable[A] { type V = V0 }
type ForMV[M0, V0, A] = PackEuclideanRingMutable[A] { type M = M0; type V = V0 }
}
|
denisrosset/qalg
|
core/src/main/scala/qalg/algos/PackEuclideanRingMutable.scala
|
Scala
|
mit
| 620
|
package com.wixpress.petri.petri
import java.util
import java.util.UUID
import com.wixpress.petri.experiments.domain.{Experiment, ExperimentSpec}
/**
* @author Dalias
* @since 3/22/15
*/
trait PetriDeveloperApi {
def getFullUserState(userGuid: UUID): UserState
def migrateStartEndDates(): Unit
def addSpecNoValidation(spec :ExperimentSpec): Unit
def fetchExperimentsGoingToEndDueToDate(minutesEnded: Int): util.List[Experiment]
}
|
wix/petri
|
petri-server-core/src/main/java/com/wixpress/petri/petri/PetriDeveloperApi.scala
|
Scala
|
bsd-3-clause
| 449
|
package DAO
import scala.concurrent.{Future => ScalaFuture}
import com.websudos.phantom.Implicits._
import scala.concurrent.Await
import scala.concurrent.duration._
case class PageRow(
address: String,
title: Option[String],
rank: Int,
cacheID: Option[java.util.UUID]
)
sealed class PageTable extends CassandraTable[PageTable, PageRow]{
object address extends StringColumn(this) with PartitionKey[String]
object title extends OptionalStringColumn(this)
object rank extends IntColumn(this)
object cacheID extends OptionalUUIDColumn(this)
def fromRow(r: Row): PageRow = {
PageRow(
address(r),
title(r),
rank(r),
cacheID(r)
)
}
}
object PageTable extends PageTable with SearchEngineConnector{
override lazy val tableName = "Pages"
val s: Session = session
s.execute("USE system;")
val result = s.execute(s"SELECT columnfamily_name FROM schema_columnfamilies WHERE keyspace_name='$keySpace';").all()
s.execute(s"USE $keySpace;")
if(!result.contains(tableName.toLowerCase)){
Await.result(PageTable.create.future(), 5000 millis)
}
def addNewPage(r: PageRow) : ScalaFuture[ResultSet] = {
insert.value(_.address, r.address)
.value(_.title, r.title)
.value(_.rank, r.rank)
.value(_.cacheID, r.cacheID)
.future()
}
def checkExistance(url: String): ScalaFuture[Boolean] = {
select.where(_.address eqs url).one().map{
case Some(page) => true
case None => false
}
}
def updateTitleOfPage(url: String, title: Option[String]): ScalaFuture[ResultSet] = {
update.where(_.address eqs url).modify(_.title setTo title).future()
}
def increasePageRank(url: String): Unit = {
println(s"Increasing pRank of $url")
select.where(_.address eqs url).one().collect{
case Some(page)=>
val r = page.rank
update.where(_.address eqs url).modify(_.rank setTo r+1).future()
case None => None
}
}
def updateCacheID(url: String, id: java.util.UUID): ScalaFuture[ResultSet] = {
update.where(_.address eqs url).modify(_.cacheID setTo Some(id)).future()
}
}
|
gafiatulin/scala-search-engine
|
Crawler/src/main/scala/DAO/Pages.scala
|
Scala
|
mit
| 2,112
|
package SMART
import Chisel._
class InvcArbiter extends Module {
val io = new Bundle {
val flitInValid = Vec.fill(NUM_OF_DIRS) {Vec.fill(NUM_OF_VC) {Bool(INPUT)}}
val flitIn = Vec.fill(NUM_OF_DIRS) {Vec.fill(NUM_OF_VC) {new Flit().asInput}}
val flitOutValid = Vec.fill(NUM_OF_DIRS) {Bool(OUTPUT)}
val flitOut = Vec.fill(NUM_OF_DIRS) {new Flit().asOutput}
val vcidOut = Vec.fill(NUM_OF_DIRS) {UInt(OUTPUT, width = VCID_WIDTH)}
}
// ------------ Input VC Arbitration (SA-i) -------------
val invcArbs = Vec.fill(NUM_OF_DIRS) {
Module(new MatrixArbiter(n = NUM_OF_VC)).io
}
for (i <- 0 until NUM_OF_DIRS) {
invcArbs(i).enable := UInt(1)
invcArbs(i).requests := io.flitInValid(i).toBits().toUInt()
when (PopCount(invcArbs(i).grants)!=UInt(0)) {
io.vcidOut(i) := OHToUInt(invcArbs(i).grants)
assert(io.flitInValid(i)(io.vcidOut(i))===Bool(true), "Flit must be valid when it passes invc arbitration")
io.flitOutValid(i) := Bool(true)
} .otherwise {
io.flitOutValid(i) := Bool(false)
io.vcidOut(i) := UInt(0)
}
io.flitOut(i) := io.flitIn(i)(io.vcidOut(i))
}
}
class InvcArbiterTests(c: InvcArbiter) extends Tester(c) {
poke(c.io.flitInValid(0)(0), 1)
poke(c.io.flitInValid(0)(1), 1)
poke(c.io.flitIn(0)(0).data, 1)
poke(c.io.flitIn(0)(1).data, 2)
peek(c.io.flitOutValid(0))
peek(c.io.flitOut(0))
peek(c.io.vcidOut(0))
step(1)
poke(c.io.flitInValid(0)(0), 1)
poke(c.io.flitInValid(0)(1), 1)
poke(c.io.flitIn(0)(0).data, 1)
poke(c.io.flitIn(0)(1).data, 2)
peek(c.io.flitOutValid(0))
peek(c.io.flitOut(0))
peek(c.io.vcidOut(0))
step(1)
poke(c.io.flitInValid(0)(0), 0)
poke(c.io.flitInValid(0)(1), 1)
poke(c.io.flitIn(0)(0).data, 1)
poke(c.io.flitIn(0)(1).data, 2)
peek(c.io.flitOutValid(0))
peek(c.io.flitOut(0))
peek(c.io.vcidOut(0))
step(1)
poke(c.io.flitInValid(0)(0), 0)
poke(c.io.flitInValid(0)(1), 0)
poke(c.io.flitIn(0)(0).data, 1)
poke(c.io.flitIn(0)(1).data, 2)
peek(c.io.flitOutValid(0))
peek(c.io.flitOut(0))
peek(c.io.vcidOut(0))
}
|
hyoukjun/OpenSMART
|
Backend/Chisel/InvcArbiter.scala
|
Scala
|
mit
| 2,331
|
/*
* Copyright 2015 ligaDATA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ligadata.keyvaluestore.cassandra
import com.ligadata.keyvaluestore._
import com.datastax.driver.core.Cluster
import com.datastax.driver.core.Session
import com.datastax.driver.core.querybuilder.Insert
import com.datastax.driver.core.ResultSet
import com.datastax.driver.core.ConsistencyLevel
import com.datastax.driver.core.BatchStatement
import java.nio.ByteBuffer
import org.apache.logging.log4j._
import com.ligadata.Exceptions._
import com.ligadata.Exceptions.StackTrace
/*
You open connection to a cluster hostname[,hostname]:port
You could provide username/password
You can operator on keyspace / table
if key space is missing we will try to create
if table is missing we will try to create
-- Lets start with this schema
--
CREATE KEYSPACE default WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '4' };
USE default;
CREATE TABLE default (key blob, value blob, primary key(key) );
*/
class KeyValueCassandraTx(owner: DataStore) extends Transaction {
var parent: DataStore = owner
def add(source: IStorage) = { owner.add(source) }
def put(source: IStorage) = { owner.put(source) }
def get(key: Key, target: IStorage) = { owner.get(key, target) }
def get(key: Key, handler: (Value) => Unit) = { owner.get(key, handler) }
def del(key: Key) = { owner.del(key) }
def del(source: IStorage) = { owner.del(source) }
def getAllKeys(handler: (Key) => Unit) = { owner.getAllKeys(handler) }
def putBatch(sourceArray: Array[IStorage]) = { owner.putBatch(sourceArray) }
def delBatch(keyArray: Array[Key]) = { owner.delBatch(keyArray) }
}
class KeyValueCassandra(parameter: PropertyMap) extends DataStore {
val loggerName = this.getClass.getName
val logger = LogManager.getLogger(loggerName)
// Read all cassandra parameters
var hostnames = parameter.getOrElse("hostlist", "localhost");
var keyspace = parameter.getOrElse("schema", "default");
var table = parameter.getOrElse("table", "default");
var replication_class = parameter.getOrElse("replication_class", "SimpleStrategy")
var replication_factor = parameter.getOrElse("replication_factor", "1")
val consistencylevelRead = ConsistencyLevel.valueOf(parameter.getOrElse("ConsistencyLevelRead", "ONE"))
val consistencylevelWrite = ConsistencyLevel.valueOf(parameter.getOrElse("ConsistencyLevelWrite", "ANY"))
val consistencylevelDelete = ConsistencyLevel.valueOf(parameter.getOrElse("ConsistencyLevelDelete", "ANY"))
var clusterBuilder = Cluster.builder()
var cluster: Cluster = _
var session: Session = _
var keyspace_exists = false
try {
clusterBuilder.addContactPoints(hostnames)
if (parameter.contains("user"))
clusterBuilder.withCredentials(parameter("user"), parameter.getOrElse("password", ""))
cluster = clusterBuilder.build()
if (cluster.getMetadata().getKeyspace(keyspace) == null){
logger.warn("The keyspace " + keyspace + " doesn't exist yet, we will create a new keyspace and continue")
// create a session that is not associated with a key space yet so we can create one if needed
session = cluster.connect();
// create keyspace if not exists
val createKeySpaceStmt = "CREATE KEYSPACE IF NOT EXISTS " + keyspace + " with replication = {'class':'" + replication_class + "', 'replication_factor':" + replication_factor + "};"
try {
session.execute(createKeySpaceStmt);
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.debug("StackTrace:"+stackTrace)
throw new CreateKeySpaceFailedException("Unable to create keyspace " + keyspace + ":" + e.getMessage())
}
}
// make sure the session is associated with the new tablespace, can be expensive if we create recycle sessions too often
session.close()
session = cluster.connect(keyspace)
}
else{
keyspace_exists = true
session = cluster.connect(keyspace)
}
} catch {
case e: Exception => {
val stackTrace = StackTrace.ThrowableTraceString(e)
logger.debug("StackTrace:"+stackTrace)
throw new ConnectionFailedException("Unable to connect to cassandra at " + hostnames + ":" + e.getMessage())
}
}
// Check if table exists or create if needed
val createTblStmt = "CREATE TABLE IF NOT EXISTS " + table + " (key blob, value blob, primary key(key) );"
session.execute(createTblStmt);
//
var insertStmt = session.prepare("INSERT INTO " + table + " (key, value) values(?, ?);")
var insertStmt1 = session.prepare("INSERT INTO " + table + " (key, value) values(?, ?) IF NOT EXISTS;")
var selectStmt = session.prepare("SELECT value FROM " + table + " WHERE key = ?;")
var selectAllKeysStmt = session.prepare("SELECT key FROM " + table + ";")
var deleteStmt = session.prepare("DELETE from " + table + " WHERE Key=?;")
var updateStmt = session.prepare("UPDATE " + table + " SET value = ? WHERE Key=?;")
def add(source: IStorage) =
{
var key = ByteBuffer.wrap(source.Key.toArray[Byte]);
var value = ByteBuffer.wrap(source.Value.toArray[Byte]);
val e = session.execute(insertStmt1.bind(key, value).setConsistencyLevel(consistencylevelWrite))
if (e.getColumnDefinitions().size() > 1)
throw new Exception("not applied")
}
def put(source: IStorage) =
{
var key = ByteBuffer.wrap(source.Key.toArray[Byte]);
var value = ByteBuffer.wrap(source.Value.toArray[Byte]);
session.execute(updateStmt.bind(value, key).setConsistencyLevel(consistencylevelWrite))
}
def putBatch(sourceArray: Array[IStorage]) =
{
val batch = new BatchStatement
sourceArray.foreach(source => {
var key = ByteBuffer.wrap(source.Key.toArray[Byte]);
var value = ByteBuffer.wrap(source.Value.toArray[Byte]);
batch.add(updateStmt.bind(value, key));
})
session.execute(batch);
}
def delBatch(keyArray: Array[Key]) =
{
val batch = new BatchStatement
keyArray.foreach(k => {
var key = ByteBuffer.wrap(k.toArray[Byte]);
batch.add(deleteStmt.bind(key));
})
session.execute(batch);
}
def get(key: Key, handler: (Value) => Unit) =
{
val key1 = ByteBuffer.wrap(key.toArray[Byte]);
val rs = session.execute(selectStmt.bind(key1).setConsistencyLevel(consistencylevelRead))
if (rs.getAvailableWithoutFetching() == 0) {
throw new KeyNotFoundException("Key Not found")
}
// Construct the output value
// BUGBUG-jh-20140703: There should be a more concise way to get the data
//
val value = new Value
val buffer: ByteBuffer = rs.one().getBytes(0)
if (buffer != null) {
while (buffer.hasRemaining())
value += buffer.get()
} else {
throw new KeyNotFoundException("Key Not found")
}
handler(value)
}
def get(key: Key, target: IStorage) =
{
val key1 = ByteBuffer.wrap(key.toArray[Byte]);
val rs = session.execute(selectStmt.bind(key1).setConsistencyLevel(consistencylevelRead))
if (rs.getAvailableWithoutFetching() == 0) {
throw new KeyNotFoundException("Key Not found")
}
// Construct the output value
// BUGBUG-jh-20140703: There should be a more concise way to get the data
//
val value = new Value
val buffer: ByteBuffer = rs.one().getBytes(0)
if (buffer != null) {
while (buffer.hasRemaining())
value += buffer.get()
} else {
throw new KeyNotFoundException("Key Not found")
}
target.Construct(key, value)
}
def del(key: Key) =
{
val key1 = ByteBuffer.wrap(key.toArray[Byte]);
session.execute(deleteStmt.bind(key1).setConsistencyLevel(consistencylevelDelete))
}
def del(source: IStorage) = { del(source.Key) }
def beginTx(): Transaction = { new KeyValueCassandraTx(this) }
def endTx(tx: Transaction) = {}
def commitTx(tx: Transaction) = {}
override def Shutdown() =
{
session.close()
cluster.close()
}
def TruncateStore() {
var stmt = session.prepare("truncate " + table + ";")
val rs = session.execute(stmt.bind().setConsistencyLevel(consistencylevelDelete))
}
def getAllKeys(handler: (Key) => Unit) =
{
val rs = session.execute(selectAllKeysStmt.bind().setConsistencyLevel(consistencylevelRead))
val iter = rs.iterator();
while (iter.hasNext()) {
if (rs.getAvailableWithoutFetching() == 100 && !rs.isFullyFetched())
rs.fetchMoreResults();
val row = iter.next()
val key = new Key
val buffer: ByteBuffer = row.getBytes(0)
while (buffer.hasRemaining())
key += buffer.get()
handler(key)
}
}
}
|
traytonwhite/Kamanja
|
trunk/Storage/src/main/scala/com/ligadata/keyvaluestore/KeyValueCassandra.scala
|
Scala
|
apache-2.0
| 9,366
|
package mu.node.echod.grpc
import java.io.File
import com.typesafe.config.Config
import io.grpc.ServerInterceptors
import io.grpc.internal.ServerImpl
import io.grpc.netty.{GrpcSslContexts, NettyServerBuilder}
import io.netty.handler.ssl.ClientAuth
import mu.node.echo.EchoServiceGrpc
import mu.node.echo.EchoServiceGrpc.EchoService
import mu.node.echod.util.FileUtils
import scala.concurrent.ExecutionContext
object EchoServer extends FileUtils {
def build(config: Config,
echoService: EchoService,
userContextServerInterceptor: UserContextServerInterceptor,
fileForConfiguredPath: (String) => File = fileForAbsolutePath): ServerImpl = {
val sslContext = GrpcSslContexts
.forServer(fileForConfiguredPath(config.getString("ssl.server-certificate")),
fileForConfiguredPath(config.getString("ssl.server-private-key")))
.trustManager(fileForConfiguredPath(config.getString("ssl.client-ca-certificate")))
.clientAuth(ClientAuth.REQUIRE)
.build()
val echoGrpcService = EchoServiceGrpc.bindService(echoService, ExecutionContext.global)
NettyServerBuilder
.forPort(config.getInt("server-port"))
.sslContext(sslContext)
.addService(ServerInterceptors.intercept(echoGrpcService, userContextServerInterceptor))
.build()
}
}
|
vyshane/grpc-scala-microservice-kit
|
app/src/main/scala/grpc/EchoServer.scala
|
Scala
|
apache-2.0
| 1,338
|
package mesosphere.marathon
package core.plugin
import scala.reflect.ClassTag
trait PluginManager {
def plugins[T](implicit ct: ClassTag[T]): Seq[T]
def definitions: PluginDefinitions
}
object PluginManager {
lazy val None = new PluginManager {
override def plugins[T](implicit ct: ClassTag[T]): Seq[T] = Seq.empty[T]
override def definitions = PluginDefinitions.None
}
}
|
guenter/marathon
|
src/main/scala/mesosphere/marathon/core/plugin/PluginManager.scala
|
Scala
|
apache-2.0
| 394
|
package controllers
import org.scalatestplus.play._
import play.api.test._
import play.api.test.Helpers._
/**
* Add your spec here.
* You can mock out a whole application including requests, plugins etc.
*
* For more information, see https://www.playframework.com/documentation/latest/ScalaTestingWithScalaTest
*/
class HomeControllerSpec extends PlaySpec with OneAppPerTest {
"HomeController GET" should {
"render the index page from a new instance of controller" in {
val controller = new HomeController
val home = controller.index().apply(FakeRequest())
status(home) mustBe OK
contentType(home) mustBe Some("text/html")
contentAsString(home) must include ("Welcome to Stocking")
}
"render the index page from the application" in {
val controller = app.injector.instanceOf[HomeController]
val home = controller.index().apply(FakeRequest())
status(home) mustBe OK
contentType(home) mustBe Some("text/html")
contentAsString(home) must include ("Welcome to Stocking")
}
"render the index page from the router" in {
// Need to specify Host header to get through AllowedHostsFilter
val request = FakeRequest(GET, "/").withHeaders("Host" -> "localhost")
val home = route(app, request).get
status(home) mustBe OK
contentType(home) mustBe Some("text/html")
contentAsString(home) must include ("Welcome to Stocking")
}
}
}
|
marchpig/stocking
|
test/controllers/HomeControllerSpec.scala
|
Scala
|
mit
| 1,454
|
package com.github.gdefacci.briscola.presentation.player
object Input {
final case class Player(name: String, password: String)
}
|
gdefacci/briscola
|
ddd-briscola-web/src/main/scala/com/github/gdefacci/briscola/presentation/player/Input.scala
|
Scala
|
bsd-3-clause
| 138
|
package test
import language.experimental.genericNumberLiterals
import scala.util.FromDigits
import scala.quoted.*
case class BigFloat(mantissa: BigInt, exponent: Int) {
override def toString = s"${mantissa}e${exponent}"
}
object BigFloat extends App {
def apply(digits: String): BigFloat = {
val (mantissaDigits, givenExponent) = digits.toUpperCase.split('E') match {
case Array(mantissaDigits, edigits) =>
val expo =
try FromDigits.intFromDigits(edigits)
catch {
case ex: FromDigits.NumberTooLarge =>
throw FromDigits.NumberTooLarge(s"exponent too large: $edigits")
}
(mantissaDigits, expo)
case Array(mantissaDigits) =>
(mantissaDigits, 0)
}
val (intPart, exponent) = mantissaDigits.split('.') match {
case Array(intPart, decimalPart) =>
(intPart ++ decimalPart, givenExponent - decimalPart.length)
case Array(intPart) =>
(intPart, givenExponent)
}
BigFloat(BigInt(intPart), exponent)
}
class BigFloatFromDigits extends FromDigits.Floating[BigFloat] {
def fromDigits(digits: String) = apply(digits)
}
given BigFloatFromDigits with {
override inline def fromDigits(digits: String) = ${
BigFloatFromDigitsImpl('digits)
}
}
// Should be in StdLib:
given ToExpr[BigInt] with {
def apply(x: BigInt)(using Quotes) =
'{BigInt(${Expr(x.toString)})}
}
}
|
dotty-staging/dotty
|
tests/run-macros/BigFloat/BigFloat_1.scala
|
Scala
|
apache-2.0
| 1,444
|
/*
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api.cache
import javax.inject._
import play.api.test._
import java.util.concurrent.atomic.AtomicInteger
import play.api.mvc.{ Results, Action }
import play.api.http
import scala.concurrent.duration._
import scala.util.Random
import org.joda.time.DateTime
class CachedSpec extends PlaySpecification {
sequential
"the cached action" should {
"cache values using injected CachedApi" in new WithApplication() {
val controller = app.injector.instanceOf[CachedController]
val result1 = controller.action(FakeRequest()).run
contentAsString(result1) must_== "1"
controller.invoked.get() must_== 1
val result2 = controller.action(FakeRequest()).run
contentAsString(result2) must_== "1"
controller.invoked.get() must_== 1
// Test that the same headers are added
header(ETAG, result2) must_== header(ETAG, result1)
header(EXPIRES, result2) must_== header(EXPIRES, result1)
}
"cache values using named injected CachedApi" in new WithApplication(FakeApplication(
additionalConfiguration = Map("play.modules.cache.bindCaches" -> Seq("custom"))
)) {
val controller = app.injector.instanceOf[NamedCachedController]
val result1 = controller.action(FakeRequest()).run
contentAsString(result1) must_== "1"
controller.invoked.get() must_== 1
val result2 = controller.action(FakeRequest()).run
contentAsString(result2) must_== "1"
controller.invoked.get() must_== 1
// Test that the same headers are added
header(ETAG, result2) must_== header(ETAG, result1)
header(EXPIRES, result2) must_== header(EXPIRES, result1)
// Test that the values are in the right cache
app.injector.instanceOf[CacheApi].get("foo") must beNone
controller.isCached("foo-etag") must beTrue
}
"cache values to disk using injected CachedApi" in new WithApplication() {
import net.sf.ehcache._
import net.sf.ehcache.config._
import net.sf.ehcache.store.MemoryStoreEvictionPolicy
// FIXME: Do this properly
val cacheManager = app.injector.instanceOf[CacheManager]
val diskEhcache = new Cache(
new CacheConfiguration("disk", 30)
.memoryStoreEvictionPolicy(MemoryStoreEvictionPolicy.LFU)
.eternal(false)
.timeToLiveSeconds(60)
.timeToIdleSeconds(30)
.diskExpiryThreadIntervalSeconds(0)
.persistence(new PersistenceConfiguration().strategy(PersistenceConfiguration.Strategy.LOCALTEMPSWAP)))
cacheManager.addCache(diskEhcache)
val diskEhcache2 = cacheManager.getCache("disk")
assert(diskEhcache2 != null)
val diskCache = new EhCacheApi(diskEhcache2)
val diskCached = new Cached(diskCache)
val invoked = new AtomicInteger()
val action = diskCached(_ => "foo")(Action(Results.Ok("" + invoked.incrementAndGet())))
val result1 = action(FakeRequest()).run
contentAsString(result1) must_== "1"
invoked.get() must_== 1
val result2 = action(FakeRequest()).run
contentAsString(result2) must_== "1"
// Test that the same headers are added
header(ETAG, result2) must_== header(ETAG, result1)
header(EXPIRES, result2) must_== header(EXPIRES, result1)
invoked.get() must_== 1
}
"cache values using Application's Cached" in new WithApplication() {
val invoked = new AtomicInteger()
val action = Cached(_ => "foo")(Action(Results.Ok("" + invoked.incrementAndGet())))
val result1 = action(FakeRequest()).run
contentAsString(result1) must_== "1"
invoked.get() must_== 1
val result2 = action(FakeRequest()).run
contentAsString(result2) must_== "1"
// Test that the same headers are added
header(ETAG, result2) must_== header(ETAG, result1)
header(EXPIRES, result2) must_== header(EXPIRES, result1)
invoked.get() must_== 1
}
"use etags for values" in new WithApplication() {
val invoked = new AtomicInteger()
val action = Cached(_ => "foo")(Action(Results.Ok("" + invoked.incrementAndGet())))
val result1 = action(FakeRequest()).run
status(result1) must_== 200
invoked.get() must_== 1
val etag = header(ETAG, result1)
etag must beSome(matching("""([wW]/)?"([^"]|\\\\")*"""")) //"""
val result2 = action(FakeRequest().withHeaders(IF_NONE_MATCH -> etag.get)).run
status(result2) must_== NOT_MODIFIED
invoked.get() must_== 1
}
"support wildcard etags" in new WithApplication() {
val invoked = new AtomicInteger()
val action = Cached(_ => "foo")(Action(Results.Ok("" + invoked.incrementAndGet())))
val result1 = action(FakeRequest()).run
status(result1) must_== 200
invoked.get() must_== 1
val result2 = action(FakeRequest().withHeaders(IF_NONE_MATCH -> "*")).run
status(result2) must_== NOT_MODIFIED
invoked.get() must_== 1
}
"work with etag cache misses" in new WithApplication() {
val action = Cached(_.uri)(Action(Results.Ok))
val resultA = action(FakeRequest("GET", "/a")).run
status(resultA) must_== 200
status(action(FakeRequest("GET", "/a").withHeaders(IF_NONE_MATCH -> "foo")).run) must_== 200
status(action(FakeRequest("GET", "/b").withHeaders(IF_NONE_MATCH -> header(ETAG, resultA).get)).run) must_== 200
status(action(FakeRequest("GET", "/c").withHeaders(IF_NONE_MATCH -> "*")).run) must_== 200
}
}
val dummyAction = Action { request =>
Results.Ok {
Random.nextInt().toString
}
}
val notFoundAction = Action { request =>
Results.NotFound(Random.nextInt().toString)
}
"Cached EssentialAction composition" should {
"cache infinite ok results" in new WithApplication() {
val cacheOk = Cached.empty { x =>
x.uri
}.includeStatus(200)
val actionOk = cacheOk.build(dummyAction)
val actionNotFound = cacheOk.build(notFoundAction)
val res0 = contentAsString(actionOk(FakeRequest("GET", "/a")).run)
val res1 = contentAsString(actionOk(FakeRequest("GET", "/a")).run)
// println(("res0", header(EXPIRES, actionOk(FakeRequest("GET", "/a")).run)))
res0 must equalTo(res1)
val res2 = contentAsString(actionNotFound(FakeRequest("GET", "/b")).run)
val res3 = contentAsString(actionNotFound(FakeRequest("GET", "/b")).run)
res2 must not equalTo (res3)
}
"cache everything for infinite" in new WithApplication() {
val cache = Cached.everything { x =>
x.uri
}
val actionOk = cache.build(dummyAction)
val actionNotFound = cache.build(notFoundAction)
val res0 = contentAsString(actionOk(FakeRequest("GET", "/a")).run)
val res1 = contentAsString(actionOk(FakeRequest("GET", "/a")).run)
res0 must equalTo(res1)
val res2 = contentAsString(actionNotFound(FakeRequest("GET", "/b")).run)
val res3 = contentAsString(actionNotFound(FakeRequest("GET", "/b")).run)
res2 must equalTo(res3)
}
"cache everything one hour" in new WithApplication() {
val cache = Cached.everything(x => x.uri, 3600)
val actionOk = cache.build(dummyAction)
val actionNotFound = cache.build(notFoundAction)
val res0 = header(EXPIRES, actionOk(FakeRequest("GET", "/a")).run)
val res1 = header(EXPIRES, actionNotFound(FakeRequest("GET", "/b")).run)
def toDuration(header: String) = {
val now = DateTime.now().getMillis
val target = http.dateFormat.parseDateTime(header).getMillis
Duration(target - now, MILLISECONDS)
}
val beInOneHour = beBetween(
(Duration(1, HOURS) - Duration(10, SECONDS)).toMillis,
Duration(1, HOURS).toMillis)
res0.map(toDuration).map(_.toMillis) must beSome(beInOneHour)
res1.map(toDuration).map(_.toMillis) must beSome(beInOneHour)
}
}
"EhCacheModule" should {
"support binding multiple different caches" in new WithApplication(FakeApplication(
additionalConfiguration = Map("play.modules.cache.bindCaches" -> Seq("custom"))
)) {
val component = app.injector.instanceOf[SomeComponent]
val defaultCache = app.injector.instanceOf[CacheApi]
component.set("foo", "bar")
defaultCache.get("foo") must beNone
component.get("foo") must beSome("bar")
}
}
}
class SomeComponent @Inject() (@NamedCache("custom") cache: CacheApi) {
def get(key: String) = cache.get[String](key)
def set(key: String, value: String) = cache.set(key, value)
}
class CachedController @Inject() (cached: Cached) {
val invoked = new AtomicInteger()
val action = cached(_ => "foo")(Action(Results.Ok("" + invoked.incrementAndGet())))
}
class NamedCachedController @Inject() (
@NamedCache("custom") val cache: CacheApi,
@NamedCache("custom") val cached: Cached) {
val invoked = new AtomicInteger()
val action = cached(_ => "foo")(Action(Results.Ok("" + invoked.incrementAndGet())))
def isCached(key: String): Boolean = cache.get[String](key).isDefined
}
|
jyotikamboj/container
|
pf-framework/src/play-cache/src/test/scala/play/api/cache/CachedSpec.scala
|
Scala
|
mit
| 9,138
|
/*
* Distributed as part of Scalala, a linear algebra library.
*
* Copyright (C) 2008- Daniel Ramage
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 USA
*/
package scalala;
package tensor;
package generic;
/**
* Builds a Tensor of type To after being given a series of
* key, value pairs.
*
* @author dramage
*/
trait TensorBuilder[@specialized(Int,Long) K, @specialized(Int,Long,Float,Double) V,+To] {
def update(key : K, value : V);
def result : To;
}
|
scalala/Scalala
|
src/main/scala/scalala/tensor/generic/TensorBuilder.scala
|
Scala
|
lgpl-2.1
| 1,155
|
/*
* Copyright 2017 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.featran.transformers
import com.twitter.algebird.HyperLogLogMonoid
import org.scalacheck._
import scala.math.ceil
object HashOneHotEncoderSpec extends TransformerProp("HashOneHotEncoder") {
implicit private val labelArb: Arbitrary[String] = Arbitrary(Gen.alphaStr)
private def estimateSize(xs: List[String]): Double = {
val m = new HyperLogLogMonoid(12)
xs.map(m.toHLL(_)).reduce(m.plus).estimatedSize
}
property("default") = Prop.forAll { (xs: List[String]) =>
val size = ceil(estimateSize(xs) * 8.0).toInt
test(HashOneHotEncoder("one_hot"), size, xs)
}
property("size") = Prop.forAll { (xs: List[String]) =>
val size = 100
test(HashOneHotEncoder("one_hot", size), size, xs)
}
property("scaling factor") = Prop.forAll { (xs: List[String]) =>
val scalingFactor = 2.0
val size = ceil(estimateSize(xs) * scalingFactor).toInt
test(HashOneHotEncoder("one_hot", 0, scalingFactor), size, xs)
}
private def test(encoder: Transformer[String, _, _], size: Int, xs: List[String]): Prop = {
val cats = 0 until size
val names = cats.map("one_hot_" + _)
val expected = xs.map(s => cats.map(c => if (HashEncoder.bucket(s, size) == c) 1.0 else 0.0))
val missing = cats.map(_ => 0.0)
test(encoder, xs, names, expected, missing)
}
}
|
spotify/featran
|
core/src/test/scala/com/spotify/featran/transformers/HashOneHotEncoderSpec.scala
|
Scala
|
apache-2.0
| 1,922
|
package ru.reo7sp.wave
import javax.swing.{JPanel, JTextField, JLabel, JCheckBox}
import java.awt.event.{ActionListener, ActionEvent}
class WaveController(_viewModel: WaveViewModel) extends JPanel with ActionListener {
add(new JLabel("A="))
private val aTextField = new JTextField(_viewModel.a.toString, 3)
aTextField.addActionListener(this)
add(aTextField)
add(new JLabel("z="))
private val kExponentTextField = new JTextField(_viewModel.kExponent.toString, 3)
kExponentTextField.addActionListener(this)
add(kExponentTextField)
add(new JLabel("j="))
private val omegaFactorTextField = new JTextField(_viewModel.omegaFactor.toString, 3)
omegaFactorTextField.addActionListener(this)
add(omegaFactorTextField)
add(new JLabel("k="))
private val kTextField = new JTextField(_viewModel.k.toString, 3)
kTextField.addActionListener(this)
add(kTextField)
add(new JLabel("Base x="))
private val baseXTextField = new JTextField(_viewModel.baseX.toString, 3)
baseXTextField.addActionListener(this)
add(baseXTextField)
add(new JLabel("Speed="))
private val speedTextField = new JTextField(_viewModel.speed.toString, 3)
speedTextField.addActionListener(this)
add(speedTextField)
add(new JLabel("Waves count="))
private val wavesCountTextField = new JTextField(_viewModel.wavesCount.toString, 3)
wavesCountTextField.addActionListener(this)
add(wavesCountTextField)
add(new JLabel("k step="))
private val kStepTextField = new JTextField(_viewModel.kStep.toString, 3)
kStepTextField.addActionListener(this)
add(kStepTextField)
add(new JLabel("Scale x="))
private val scaleXTextField = new JTextField(_viewModel.scaleX.toString, 3)
scaleXTextField.addActionListener(this)
add(scaleXTextField)
private val showOnlySum = new JCheckBox("Show only sum")
showOnlySum.addActionListener(this)
add(showOnlySum)
override def actionPerformed(e: ActionEvent): Unit = try {
_viewModel.updateVars(
a = aTextField.getText.toDouble,
k = kTextField.getText.toDouble,
kExponent = kExponentTextField.getText.toDouble,
omegaFactor = omegaFactorTextField.getText.toDouble,
wavesCount = wavesCountTextField.getText.toDouble.toInt,
kStep = kStepTextField.getText.toDouble,
scaleX = scaleXTextField.getText.toDouble,
showOnlySum = showOnlySum.isSelected,
baseX = baseXTextField.getText.toDouble,
speed = speedTextField.getText.toDouble
)
} catch {
case _: Throwable =>
}
}
|
reo7sp/WaveTest
|
src/main/scala/ru/reo7sp/wave/WaveConfigView.scala
|
Scala
|
mit
| 2,502
|
package org.squeryl.test
import org.squeryl.framework._
abstract class LeftJoinTest extends SchemaTester with RunTestsInsideTransaction{
import org.squeryl.PrimitiveTypeMode._
val schema = LeftJoinSchema
import LeftJoinSchema._
override def prePopulate {
months.insert(new Month(1, "Jan"))
months.insert(new Month(2, "Feb"))
months.insert(new Month(3, "Mar"))
months.insert(new Month(4, "Apr"))
months.insert(new Month(5, "May"))
months.insert(new Month(6, "Jun"))
months.insert(new Month(7, "Jul"))
months.insert(new Month(8, "Aug"))
months.insert(new Month(9, "Sep"))
months.insert(new Month(10, "Oct"))
months.insert(new Month(11, "Nov"))
months.insert(new Month(12, "Dec"))
items.insert(new Item(1, "i1"))
ordrs.insert(new Ordr(1, 1, 1, 20))
ordrs.insert(new Ordr(2, 1, 1, 40))
ordrs.insert(new Ordr(3, 5, 1, 15))
}
test("return the correct results if an inner join is used"){
val subquery = from(ordrs)((o) =>
groupBy(o.monthId)
compute (sum(o.qty))
orderBy (o.monthId))
val mainquery = join(months, subquery)((m, sq) =>
select(m, sq.measures)
on (m.id === sq.key))
val res = transaction { mainquery.toList }
res.size should equal(2)
res(0)._2 should equal(Some(60))
res(1)._2 should equal(Some(15))
}
test("return the correct results if a left outer join is used"){
val subquery = from(ordrs)((o) =>
groupBy(o.monthId)
compute (sum(o.qty))
orderBy (o.monthId))
val mainquery =
join(months, subquery.leftOuter)((m, sq) =>
select(m, sq)
on (m.id === sq.map(_.key))
)
val res = transaction {
mainquery.map(e =>
if(e._2 == None) None
else e._2.get.measures
).toSeq
}
res.size should equal(12)
res(0) should equal(Some(60))
res(1) should equal(None)
res(2) should equal(None)
res(3) should equal(None)
res(4) should equal(Some(15))
res(5) should equal(None)
res(6) should equal(None)
res(7) should equal(None)
res(8) should equal(None)
res(9) should equal(None)
res(10) should equal(None)
res(11) should equal(None)
}
}
import org.squeryl.Schema
object LeftJoinSchema extends Schema {
import org.squeryl.PrimitiveTypeMode._
val items = table[Item]("Item")
val months = table[Month]("Month")
val ordrs = table[Ordr]("Ordr")
override def drop = super.drop
}
class Item(val id: Int, val name: String)
class Month(val id: Int, val name: String) {
override def toString = "Mont("+id + ":" + name + ")"
}
class Ordr(val id: Int, val monthId: Int, val itemId: Int, val qty: Int)
|
takezoux2/squeryl-experimental
|
src/test/scala/org/squeryl/test/LeftJoinTest.scala
|
Scala
|
apache-2.0
| 2,821
|
package x7c1.linen.repository.loader.crawling
import x7c1.wheat.macros.reify.HasConstructor
import x7c1.wheat.modern.formatter.ThrowableFormatter.format
sealed trait CrawlerFateError {
def cause: Throwable
def detail: String
}
object CrawlerFateError {
implicit object unknown extends HasConstructor[Throwable => CrawlerFateError]{
override def newInstance = UnknownError(_)
}
case class UnknownError(cause: Throwable) extends CrawlerFateError {
override def detail = format(cause){"[failed] unknown error"}
}
}
|
x7c1/Linen
|
linen-repository/src/main/scala/x7c1/linen/repository/loader/crawling/CrawlerFateError.scala
|
Scala
|
mit
| 535
|
package picasso.model.pi
import picasso.utils.{LogCritical, LogError, LogWarning, LogNotice, LogInfo, LogDebug, Logger, Misc}
import picasso.math._
import picasso.math.WellPartialOrdering._
/*
object PiProgram {
def isConfiguration(p: PiProcess): Boolean = p match {
case Composition(processes) => processes.forall(isConfiguration)
case Restriction(_, process) => isConfiguration(process)
case Repetition(process) => isConfiguration(process)
case PiZero => true
case PiProcessID(_,_) => true
case _ => false
}
import scala.collection.immutable.TreeMap
def instanciateDefinition(pid: String, args: List[String], definition: Map[String,(List[String], PiProcess)]): PiProcess = definition get pid match {
case Some((params, p)) => {
val map = (TreeMap.empty[String,String] /: (params zip args))(_ + _)
p alpha map
}
case None => error("PiProgram.instanciateDefinition: " + pid +" not found")
}
//TODO define normal form where equations have a prefix/choice/replication first
//TODO normal form for the configuration: restriction at top level, or just below replication, then composition of PIDs
private def removeRestrictions(configuration: PiProcess): PiProcess = configuration match {
case Restriction(_, process) => removeRestrictions(process)
case process => process
}
def findProcessID(configuration: PiProcess, id: String): List[PiProcessID] = {
val stripped = removeRestrictions(configuration)
configuration match {
case Composition(processes) => {
((Nil: List[PiProcessID]) /: processes)( (acc, p) => p match {
case pid @ PiProcessID(_,_) => pid::acc
case Restriction(_,_) =>
Logger("DepthBoundedProcess", LogWarning, "PiProgram.findProcessID does not yet handle restriction")
acc
case _ => error("PiProgram.findProcessID: configuration not in normal form (ProcessID/Restriction)")
})
}
case _ => error("PiProgram.findProcessID: configuration not in normal form Composition")
}
}
}
object PiProcessWPO extends WellPartialOrdering[PiProcess] {
def tryCompare(p1: PiProcess, p2: PiProcess): Option[Int] = {
//TODO transform progams into some kind of graph and call somegraph isomosphism
//put graph as lazy val into the process ?
error("TODO")
}
def lteq(p1: PiProcess, p2: PiProcess): Boolean =
tryCompare(p1, p2) match { case Some(0) | Some(-1) => true; case _ => false }
override def lt(p1: PiProcess, p2: PiProcess): Boolean =
tryCompare(p1, p2) match { case Some(-1) => true; case _ => false }
override def equiv(p1: PiProcess, p2: PiProcess): Boolean =
tryCompare(p1, p2) match { case Some(0) => true; case _ => false }
override def gteq(p1: PiProcess, p2: PiProcess): Boolean =
tryCompare(p1, p2) match { case Some(0) | Some(1) => true; case _ => false }
override def gt(p1: PiProcess, p2: PiProcess): Boolean =
tryCompare(p1, p2) match { case Some(1) => true; case _ => false }
}
/** a transition happening on (co-)name observable*/
class PiTransition(observable: String, definition: Map[String,(List[String], PiProcess)]) extends Transition[PiProcess] {
private def findMatchingParams(p: PiProcess, template: (String,(List[String], PiProcess))): List[(String, List[String], PiProcess)] = {
val matchingPID = PiProgram.findProcessID(p, template._1)
matchingPID map { case PiProcessID(id, args) =>
val zipped = template._2._1 zip args
val substitution = (Map.empty[String,String] /: zipped)(_ + _)
(id, args, template._2._2 alpha substitution)
}
}
private def matchAndFilter(s: PiProcess, defs: Map[String,(List[String], PiProcess)]): List[(String,List[String], PiProcess)] = {
defs.toList flatMap ( mapping => {
val instanciated = findMatchingParams(s, mapping)
instanciated filter (_._3 isObservablePrefix observable)
})
}
private val receiving: Map[String,(List[String], PiProcess)] = definition filter ( (p: (String,(List[String], PiProcess))) => isInputPrefix(p._2._2))
/** Returns the process ID that are receiving on observable */
def name(s: PiProcess) = matchAndFilter(s, receiving)
private val sending: Map[String,(List[String], PiProcess)] = definition filter ( (p: (String,(List[String], PiProcess))) => isOutputPrefix(p._2._2))
/** Returns the process ID that are sending on observable */
def coname(s: PiProcess) = matchAndFilter(s, sending)
def apply(state: PiProcess): Set[PiProcess] = {
error("TODO")
}
def isDefinedAt(state: PiProcess): Boolean = !(name(state).isEmpty || coname(state).isEmpty)
}
*/
//TODO extends WSTS with WADL
//class PiProgram(definition: Map[String,(List[String], PiProcess)], configuration: PiProcess) extends WSTS with PredBasis {
//
// type S = PiProcess
// implicit val ordering: WellPartialOrdering[S] = PiSubGraph
// tpye T = ...
// val transitions = Nil //TODO to have an easily computable #of transitions: a transition is givent by a pair of InputPrefix/OutputPrefix
// def predBasis(s: UpwardClosedSet[S]): UpwardClosedSet[S] = error("TODO")
// //TODO
//
//}
|
dzufferey/picasso
|
core/src/main/scala/picasso/model/pi/Pi.scala
|
Scala
|
bsd-2-clause
| 5,174
|
package objektwerks
import java.sql.Timestamp
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter
import java.util.Locale
import java.util.regex.Pattern
import org.apache.spark.sql.{ForeachWriter, Row}
case class LogEntry(ip: String,
client: String,
user: String,
dateTime: Option[Timestamp],
request: String,
status: String,
bytes: String,
referer: String,
agent: String)
object LogEntry {
val logEntryPattern = {
val ddd = "\\\\d{1,3}"
val ip = s"($ddd\\\\.$ddd\\\\.$ddd\\\\.$ddd)?"
val client = "(\\\\S+)"
val user = "(\\\\S+)"
val dateTime = "(\\\\[.+?\\\\])"
val request = "\\"(.*?)\\""
val status = "(\\\\d{3})"
val bytes = "(\\\\S+)"
val referer = "\\"(.*?)\\""
val agent = "\\"(.*?)\\""
val regex = s"$ip $client $user $dateTime $request $status $bytes $referer $agent"
Pattern.compile(regex)
}
val rowForeachWriter = new ForeachWriter[Row] {
override def open(partitionId: Long, version: Long): Boolean = true
override def process(row: Row): Unit = println(s"$row")
override def close(errorOrNull: Throwable): Unit = println("Closing row foreach writer...")
}
val dateTimePattern = Pattern.compile("\\\\[(.*?) .+]")
val dateTimeFormatter = DateTimeFormatter.ofPattern("dd/MMM/yyyy:HH:mm:ss", Locale.ENGLISH)
def rowToLogEntry(row: Row): Option[LogEntry] = {
val matcher = logEntryPattern.matcher(row.getString(0))
if (matcher.matches()) {
Some(LogEntry(
matcher.group(1),
matcher.group(2),
matcher.group(3),
dateTimeToTimestamp(matcher.group(4)),
matcher.group(5),
matcher.group(6),
matcher.group(7),
matcher.group(8),
matcher.group(9)))
} else None
}
def dateTimeToTimestamp(dateTime: String): Option[Timestamp] = {
val dateTimeMatcher = dateTimePattern.matcher(dateTime)
if (dateTimeMatcher.find) {
val dateTimeAsString = dateTimeMatcher.group(1)
val dateTime = LocalDateTime.parse(dateTimeAsString, dateTimeFormatter)
val timestamp = Timestamp.valueOf(dateTime)
Some(timestamp)
} else None
}
}
|
objektwerks/spark
|
src/main/scala/objektwerks/LogEntry.scala
|
Scala
|
apache-2.0
| 2,270
|
package sampleclean.api
import org.apache.spark.SparkContext
import org.apache.spark.sql.SchemaRDD
import org.apache.spark.sql.Row
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
/**
* This class defines a sampleclean query object
*
* The Sample Clean Query algorithm follows a cleaning
* procedure on a sample before estimating the query result.
* @param scc SampleClean Context
* @param saqp Aproximate Query object
* @param sampleName
* @param attr attribute to query
* @param expr aggregate function to use
* @param pred predicate
* @param group group by this attribute
* @param rawSC perform a rawSC Query if true or a normalizedSC Query
* if false. See [[SampleCleanAQP]] for an explanation of
* this option.
*/
@serializable
class SampleCleanQuery(scc:SampleCleanContext,
saqp:SampleCleanAQP,
sampleName: String,
attr: String,
expr: String,
pred:String = "",
group:String = "",
rawSC:Boolean = true){
case class ResultSchemaRDD(val group: String, val agg: Double, val se: Double) extends Throwable
/**
* The execute method provides a straightforward way to execute the query.
* @return (current time, List(aggregate, (estimate, +/- confidence value)))
*/
def execute():SchemaRDD= {
var sampleRatio = scc.getSamplingRatio(scc.qb.getDirtySampleName(sampleName))
var defaultPred = ""
if(pred != "")
defaultPred = pred
var query:(Long, List[(String, (Double, Double))]) = null
if(rawSC){
query = saqp.rawSCQueryGroup(scc,
sampleName.trim(),
attr.trim(),
expr.trim(),
pred.trim(),
group.trim(),
sampleRatio)
}
else{
query = saqp.normalizedSCQueryGroup(scc,
sampleName.trim(),
attr.trim(),
expr.trim(),
pred.trim(),
group.trim(),
sampleRatio)
}
val sc = scc.getSparkContext()
val sqlContext = new SQLContext(sc)
val rddRes = sc.parallelize(query._2)
val castRDD = rddRes.map(x => ResultSchemaRDD(x._1,x._2._1, x._2._2))
return scc.getHiveContext.createDataFrame(castRDD)
}
}
|
sjyk/sampleclean-async
|
src/main/scala/sampleclean/api/SampleCleanQuery.scala
|
Scala
|
apache-2.0
| 2,174
|
package controllers
import play.api.mvc.{AnyContent, Request}
sealed trait RequestType
object RequestType {
def requestType(implicit request:Request[AnyContent]) = apply(request)
private[this] def acc(t: String)(implicit request: Request[AnyContent]): Boolean =
request.accepts("application/" + t) || request.accepts("text/" + t)
def apply(implicit request:Request[AnyContent]): RequestType = {
if (acc("html") || acc("x-www-form-urlencoded")) HttpRequest
else if (acc("xml")) XmlRequest
else if (acc("json")) JsonRequest
else InvalidRequest
}
}
case object JsonRequest extends RequestType
case object XmlRequest extends RequestType
case object HttpRequest extends RequestType
case object InvalidRequest extends RequestType
|
sayon/simple-rest-app
|
app/controllers/RequestType.scala
|
Scala
|
mit
| 756
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.feature.dataset
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.tensor.{DenseType, SparseType, Storage, Tensor}
import org.apache.commons.lang3.SerializationUtils
import org.apache.zookeeper.KeeperException.UnimplementedException
import scala.reflect.ClassTag
/**
* Class that represents the features and labels of a data sample.
*
* @tparam T numeric type
*/
abstract class Sample[T: ClassTag] extends Serializable {
/**
* First dimension length of index-th feature.
* This function could be used to sort samples in [[DataSet]].
*
* @return
*/
def featureLength(index: Int): Int
/**
* First dimension length of index-th label.
* This function could be used to find the longest label.
*
* @return
*/
def labelLength(index: Int): Int
/**
* Number of tensors in feature
*
* @return number of tensors in feature
*/
def numFeature(): Int
/**
* Number of tensors in label
*
* @return number of tensors in label
*/
def numLabel(): Int
/**
*@return A deep clone
*/
override def clone(): this.type =
SerializationUtils.clone(this)
/**
* Get feature tensor, for one feature Sample only.
* You don't need to override this, because we have add
* a default implement to throw exception.
* @return feature tensor
*/
def feature()(implicit ev: TensorNumeric[T]): Tensor[T]
/**
* Get feature tensor for given index
* @param index index of specific sample
*/
def feature(index: Int)(implicit ev: TensorNumeric[T]): Tensor[T]
/**
* Get label tensor, for one label Sample only.
* You don't need to override this, because we have add
* a default implement to throw exception.
* @return label tensor
*/
def label()(implicit ev: TensorNumeric[T]): Tensor[T]
/**
* Get label tensor for given index
* @param index index of specific sample
*/
def label(index: Int)(implicit ev: TensorNumeric[T]): Tensor[T]
/**
* Set data of feature and label.
* @param featureData
* @param labelData
* @param featureSize
* @param labelSize
* @return
*/
@deprecated("Old interface", "0.2.0")
def set(
featureData: Array[T],
labelData: Array[T],
featureSize: Array[Int],
labelSize: Array[Int])(implicit ev: TensorNumeric[T]): Sample[T] = {
throw new UnsupportedOperationException("Sample.set(): unimplemented deprecated method")
}
/**
* Get feature sizes
* @return feature sizes
*/
def getFeatureSize(): Array[Array[Int]]
/**
* Get label sizes
* @return label sizes
*/
def getLabelSize(): Array[Array[Int]]
/**
* Get data
* @return data
*/
def getData(): Array[T]
}
/**
* A kind of sample who use only one array
*/
class ArraySample[T: ClassTag] private[bigdl](
private val data: Array[T],
private val featureSize: Array[Array[Int]],
private val labelSize: Array[Array[Int]]) extends Sample[T] {
require(data != null, "Sample: Data couldn't be empty")
require(featureSize != null, "Sample: Feature couldn't be empty")
override def getData(): Array[T] = data
override def featureLength(index: Int): Int = {
require(null != featureSize, "featureSize is empty")
featureSize(index)(0)
}
override def labelLength(index: Int): Int = {
if (null != labelSize) {
labelSize(index)(0)
} else {
0
}
}
override def getFeatureSize(): Array[Array[Int]] = {
featureSize
}
override def getLabelSize(): Array[Array[Int]] = {
require(null != labelSize, "Sample doesn't have label")
labelSize
}
override def numFeature(): Int = {
require(null != featureSize, "featureSize is empty")
featureSize.length
}
override def numLabel(): Int = {
if (null == labelSize) {
0
} else {
labelSize.length
}
}
override def feature()(implicit ev: TensorNumeric[T]): Tensor[T] = {
require(this.numFeature == 1, "Only one Sample required in total" +
s"got ${featureSize.length} feature Sample, please use feature(index) instead")
feature(0)
}
override def feature(index: Int)(implicit ev: TensorNumeric[T]): Tensor[T] = {
require(this.numFeature > index, "feature index out of range")
val featureOffSet = 1 + getFeatureSize().zipWithIndex.
filter(_._2 < index).map(_._1.product).sum
Tensor[T](Storage(data), featureOffSet, getFeatureSize()(index))
}
override def label(index: Int)(implicit ev: TensorNumeric[T]): Tensor[T] = {
require(this.numFeature > index, "label index out of range")
if (this.numLabel > index) {
val labelOffSet = 1 + getFeatureSize().map(_.product).sum + getLabelSize().zipWithIndex
.filter(_._2 < index).map(_._1.product).sum
Tensor[T](Storage[T](data), labelOffSet, labelSize(index))
} else {
null
}
}
override def label()(implicit ev: TensorNumeric[T]): Tensor[T] = {
require(this.numLabel <= 1, "Only one Sample required in total " +
s"got ${labelSize.length} label Sample, please use label(index) instead")
label(0)
}
@deprecated("Old interface", "0.2.0")
override def set(
featureData: Array[T],
labelData: Array[T],
featureSize: Array[Int],
labelSize: Array[Int])(implicit ev: TensorNumeric[T]): Sample[T] = {
require(featureSize.sameElements(this.featureSize(0)) &&
labelSize.sameElements(this.labelSize(0)), "size not match")
ev.arraycopy(featureData, 0, data, 0, featureData.length)
ev.arraycopy(labelData, 0, data, featureData.length, labelData.length)
this
}
def canEqual(other: Any): Boolean = other.isInstanceOf[ArraySample[T]]
override def equals(other: Any): Boolean = other match {
case that: ArraySample[T] =>
if (!(that canEqual this) ||
!(data.deep == that.data.deep) ||
!(featureSize.deep == that.featureSize.deep)) {
return false
}
if (null != labelSize && null != that.labelSize) {
labelSize.deep == that.labelSize.deep
} else {
null == labelSize & null == that.labelSize
}
case _ => false
}
override def hashCode(): Int = {
val state = if (null == labelSize) Seq(data, featureSize) else Seq(data, featureSize, labelSize)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
object ArraySample {
private def typeCheck[T: ClassTag](tensor: Tensor[T]): Unit = {
tensor.getTensorType match {
case DenseType =>
require(tensor.isContiguous(), s"tensor in ArraySample should be contiguous," +
s" Please check your input.")
case _ =>
throw new IllegalArgumentException(s"ArraySample doesn't support ${tensor.getTensorType}")
}
}
private def typeCheck[T: ClassTag](tensors: Array[Tensor[T]]): Unit = {
tensors.foreach{tensor =>
typeCheck(tensor)
}
}
def apply[T: ClassTag](
data: Array[T],
featureSize: Array[Array[Int]],
labelSize: Array[Array[Int]]): Sample[T] = {
new ArraySample(data, featureSize, labelSize)
}
def apply[T: ClassTag](
featureTensor: Tensor[T],
labelTensor: Tensor[T])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensor)
typeCheck(labelTensor)
val data = new Array[T](featureTensor.nElement() + labelTensor.nElement())
ev.arraycopy(featureTensor.storage().array(), featureTensor.storageOffset() - 1,
data, 0, featureTensor.nElement())
ev.arraycopy(labelTensor.storage().array(), labelTensor.storageOffset() - 1,
data, featureTensor.nElement(), labelTensor.nElement())
new ArraySample[T](data, getSize(featureTensor), getSize(labelTensor))
}
def apply[T: ClassTag](
featureTensor: Tensor[T],
label: T)(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensor)
val data = new Array[T](featureTensor.nElement() + 1)
ev.arraycopy(featureTensor.storage().array(), featureTensor.storageOffset() - 1,
data, 0, featureTensor.nElement())
data(featureTensor.nElement()) = label
new ArraySample[T](data, getSize(featureTensor), Array(Array(1)))
}
def apply[T: ClassTag](
featureTensors: Array[Tensor[T]],
labelTensor: Tensor[T])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensors)
typeCheck(labelTensor)
val tensors = featureTensors ++ Array(labelTensor)
val data = new Array[T](tensors.map(_.nElement()).sum)
copy(data, tensors)
new ArraySample[T](data, getSize(featureTensors), getSize(labelTensor))
}
def apply[T: ClassTag](
featureTensors: Array[Tensor[T]],
labelTensors: Array[Tensor[T]])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensors)
typeCheck(labelTensors)
val tensors = featureTensors ++ labelTensors
val data = new Array[T](tensors.map(_.nElement()).sum)
copy(data, tensors)
new ArraySample[T](data, getSize(featureTensors), getSize(labelTensors))
}
def apply[T: ClassTag](
featureTensor: Tensor[T])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensor)
val data = new Array[T](featureTensor.nElement())
ev.arraycopy(featureTensor.storage().array(), featureTensor.storageOffset() - 1,
data, 0, featureTensor.nElement())
new ArraySample[T](data, getSize(featureTensor), null)
}
def apply[T: ClassTag](
featureTensors: Array[Tensor[T]])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensors)
val data = new Array[T](featureTensors.map(_.nElement()).sum)
copy(data, featureTensors)
new ArraySample[T](data, getSize(featureTensors), null)
}
private def copy[T: ClassTag](
data: Array[T],
tensors: Array[Tensor[T]])(implicit ev: TensorNumeric[T]) : Array[T] = {
var offset = 0
var i = 0
while (i < tensors.length) {
val tensor = tensors(i)
require(tensor.isContiguous(), s"${i}-th tensor is not contiguous")
ev.arraycopy(tensor.storage().array(), tensor.storageOffset() - 1,
data, offset, tensor.nElement())
offset += tensor.nElement()
i += 1
}
data
}
private[bigdl] def getSize[T: ClassTag](tensors: Array[Tensor[T]]): Array[Array[Int]] = {
tensors.map(_.size)
}
private[bigdl] def getSize[T: ClassTag](tensor: Tensor[T]): Array[Array[Int]] = {
Array(tensor.size())
}
private[bigdl] def sameSize(a: Array[Array[Int]], b: Array[Array[Int]]): Boolean = {
if (a.length != b.length) return false
var i = 0
while (i < a.length) {
if (a(i).length != b(i).length) return false
i += 1
}
true
}
}
object Sample {
def apply[T: ClassTag](
data: Array[T],
featureSize: Array[Array[Int]],
labelSize: Array[Array[Int]]): Sample[T] = {
ArraySample(data, featureSize, labelSize)
}
def apply[T: ClassTag](
featureTensor: Tensor[T],
labelTensor: Tensor[T])(implicit ev: TensorNumeric[T]) : Sample[T] = {
if (featureTensor.getTensorType == DenseType) {
ArraySample(featureTensor, labelTensor)
} else {
TensorSample(featureTensor, labelTensor)
}
}
def apply[T: ClassTag](
featureTensor: Tensor[T],
label: T)(implicit ev: TensorNumeric[T]) : Sample[T] = {
if (featureTensor.getTensorType == DenseType) {
ArraySample(featureTensor, label)
} else {
TensorSample(featureTensor, label)
}
}
def apply[T: ClassTag](
featureTensors: Array[Tensor[T]],
labelTensor: Tensor[T])(implicit ev: TensorNumeric[T]) : Sample[T] = {
if (featureTensors.exists(_.getTensorType == SparseType)) {
TensorSample(featureTensors, labelTensor)
} else {
ArraySample(featureTensors, labelTensor)
}
}
def apply[T: ClassTag](
featureTensors: Array[Tensor[T]],
labelTensors: Array[Tensor[T]])(implicit ev: TensorNumeric[T]) : Sample[T] = {
if (featureTensors.exists(_.getTensorType == SparseType) ||
labelTensors.exists(_.getTensorType == SparseType)) {
TensorSample(featureTensors, labelTensors)
} else {
ArraySample(featureTensors, labelTensors)
}
}
def apply[T: ClassTag](
featureTensor: Tensor[T])(implicit ev: TensorNumeric[T]) : Sample[T] = {
if (featureTensor.getTensorType == SparseType) {
TensorSample(featureTensor)
} else {
ArraySample(featureTensor)
}
}
def apply[T: ClassTag](
featureTensors: Array[Tensor[T]])(implicit ev: TensorNumeric[T]) : Sample[T] = {
if (featureTensors.exists(_.getTensorType == SparseType)) {
TensorSample(featureTensors)
} else {
ArraySample(featureTensors)
}
}
}
/**
* A kind of Sample who hold both DenseTensor and SparseTensor as features.
* @param features feature tensors
* @param labels label tensors
* @tparam T numeric type
*/
class TensorSample[T: ClassTag] private[bigdl] (
val features: Array[Tensor[T]],
val labels: Array[Tensor[T]]) extends Sample[T] {
protected val featureSize = features.map(_.size())
protected val labelSize = labels.map(_.size())
def featureLength(index: Int): Int = {
features(0).size(1)
}
def labelLength(index: Int): Int = {
labels(0).size(1)
}
def numFeature(): Int = {
features.length
}
def numLabel(): Int = {
labels.length
}
def getFeatureSize(): Array[Array[Int]] = {
featureSize
}
def getLabelSize(): Array[Array[Int]] = {
labelSize
}
def getData(): Array[T] = {
throw new UnimplementedException()
}
override def feature()(implicit ev: TensorNumeric[T]): Tensor[T] = {
require(this.numFeature == 1, "only sample with one feature supported")
this.feature(0)
}
override def feature(index: Int)(implicit ev: TensorNumeric[T]): Tensor[T] = {
require(index < this.numFeature, "Index out of range")
this.features(index)
}
override def label()(implicit ev: TensorNumeric[T]): Tensor[T] = {
require(this.numLabel <= 1, "only sample with at most one label supported")
if (this.numLabel == 1) this.label(0) else null
}
override def label(index: Int)(implicit ev: TensorNumeric[T]): Tensor[T] = {
require(index < this.numFeature, "Index out of range")
if (index < this.numLabel) this.labels(index) else null
}
}
object TensorSample {
private def typeCheck[T: ClassTag](tensor: Tensor[T]): Unit = {
tensor.getTensorType match {
case DenseType =>
require(tensor.isContiguous(), s"tensor in TensorSample should be contiguous," +
s" Please check your input.")
case SparseType =>
case _ =>
throw new IllegalArgumentException(s"TensorSample doesn't support ${tensor.getTensorType}")
}
}
private def typeCheck[T: ClassTag](tensors: Array[Tensor[T]]): Unit = {
tensors.foreach{tensor =>
typeCheck(tensor)
}
}
def apply[T: ClassTag](
featureTensors: Array[Tensor[T]])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensors)
new TensorSample[T](featureTensors, Array())
}
def apply[T: ClassTag](
featureTensor: Tensor[T])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensor)
new TensorSample[T](Array(featureTensor), Array())
}
def apply[T: ClassTag](
featureTensors: Array[Tensor[T]],
labelTensors: Array[Tensor[T]])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensors)
typeCheck(labelTensors)
new TensorSample[T](featureTensors, labelTensors)
}
def apply[T: ClassTag](
featureTensors: Array[Tensor[T]],
labelTensor: Tensor[T])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensors)
typeCheck(labelTensor)
new TensorSample[T](featureTensors, Array(labelTensor))
}
def apply[T: ClassTag](
featureTensor: Tensor[T],
labelTensor: Tensor[T])(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensor)
typeCheck(labelTensor)
new TensorSample[T](Array(featureTensor), Array(labelTensor))
}
def apply[T: ClassTag](
featureTensor: Tensor[T],
label: T)(implicit ev: TensorNumeric[T]) : Sample[T] = {
typeCheck(featureTensor)
new TensorSample[T](Array(featureTensor), Array(Tensor(1).fill(label)))
}
/**
* Create a TensorSample which is able to contains Tensors with different types.
*
* @tparam T main type
* @param featureTensors feature tensors
* @param labelTensors label tensors, can be null or empty, default value is null
* @return TensorSample
*/
def create[T: ClassTag](
featureTensors: Array[Tensor[_]],
labelTensors: Array[Tensor[_]] = null)
(implicit ev: TensorNumeric[T]) : Sample[T] = {
if (labelTensors == null || labelTensors.isEmpty) {
TensorSample(wrapType(featureTensors))
} else {
TensorSample(wrapType(featureTensors), wrapType(labelTensors))
}
}
private def wrapType[T: ClassTag](tensor: Array[Tensor[_]])
(implicit ev: TensorNumeric[T]): Array[Tensor[T]] = {
tensor.map(_.asInstanceOf[Tensor[T]])
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/feature/dataset/Sample.scala
|
Scala
|
apache-2.0
| 17,858
|
import leon.annotation._
import leon.lang._
import leon.lang.synthesis._
object Complete {
sealed abstract class List
case class Cons(head: Int, tail: List) extends List
case object Nil extends List
def size(l: List) : Int = (l match {
case Nil => 0
case Cons(_, t) => 1 + size(t)
}) ensuring(res => res >= 0)
def content(l: List): Set[Int] = l match {
case Nil => Set.empty[Int]
case Cons(i, t) => Set(i) ++ content(t)
}
def isSorted(list : List) : Boolean = list match {
case Nil => true
case Cons(_, Nil) => true
case Cons(x1, Cons(x2, _)) if(x1 > x2) => false
case Cons(_, xs) => isSorted(xs)
}
// def insert(in1: List, v: Int): List = {
// require(isSorted(in1))
// in1 match {
// case Cons(h, t) =>
// if (v < h) {
// Cons(v, in1)
// } else if (v == h) {
// in1
// } else {
// Cons(h, insert(t, v))
// }
// case Nil =>
// Cons(v, Nil)
// }
// } ensuring { res => (content(res) == content(in1) ++ Set(v)) && isSorted(res) }
def insert(in1: List, v: Int) = {
require(isSorted(in1))
choose { (out : List) =>
(content(out) == content(in1) ++ Set(v)) && isSorted(out) }
}
}
|
ericpony/scala-examples
|
testcases/synthesis/oopsla2013/SortedList/Insert1.scala
|
Scala
|
mit
| 1,252
|
/*******************************************************************************
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright (c) 2013,2014 by Peter Pilgrim, Addiscombe, Surrey, XeNoNiQUe UK
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU GPL v3.0
* which accompanies this distribution, and is available at:
* http://www.gnu.org/licenses/gpl-3.0.txt
*
* Developers:
* Peter Pilgrim -- design, development and implementation
* -- Blog: http://www.xenonique.co.uk/blog/
* -- Twitter: @peter_pilgrim
*
* Contributors:
*
*******************************************************************************/
package uk.co.xenonique.digitalone.simple
import java.io.File
import javax.inject.Inject
import org.jboss.arquillian.container.test.api.Deployment
import org.jboss.arquillian.junit.Arquillian
import org.jboss.shrinkwrap.api.ShrinkWrap
import org.jboss.shrinkwrap.api.asset.EmptyAsset
import org.jboss.shrinkwrap.api.spec.WebArchive
import org.junit.runner.RunWith
import org.junit.{Assert, Test}
import uk.co.xenonique.digitalone.GradleDependency
/**
* The type HelloTest
*
* @author Peter Pilgrim
*/
@RunWith(classOf[Arquillian])
class HelloTest {
@Inject
private var hello: Hello = _
@Test
def helloPrintsHelloWorld(): Unit = {
val msg = hello.hello()
Assert.assertEquals("Hello World", msg)
}
}
object HelloTest {
@Deployment
def createDeployment():WebArchive = {
val war = ShrinkWrap.create(classOf[WebArchive], "test.war")
.addPackage("uk.co.xenonique.digitalone")
.addPackage("uk.co.xenonique.digitalone.simple")
.addAsLibrary( GradleDependency.resolve("org.scala-lang:scala-library:2.11.1") )
.addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml");
println(war.toString(true));
return war;
}
}
|
peterpilgrim/digital-scala-javaone-2014
|
src/test/scala/uk/co/xenonique/digitalone/simple/HelloTest.scala
|
Scala
|
gpl-3.0
| 1,915
|
package mqfiletransfercoordinator.actors
import akka.actor.ActorSystem
import akka.actor.Actor
import akka.actor.Props
import akka.testkit.TestKit
import org.scalatest.WordSpecLike
import org.scalatest.Matchers
import org.scalatest.BeforeAndAfterAll
import akka.testkit.ImplicitSender
import scala.concurrent.duration._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import akka.testkit.TestProbe
import akka.camel.CamelMessage
@RunWith(classOf[JUnitRunner])
class CommandQueueConsumerSpec extends TestKit(ActorSystem("CommandQueueConsumerSpec"))
with ImplicitSender with WordSpecLike with BeforeAndAfterAll {
import CommandQueueConsumerSpec._
override def afterAll {
TestKit.shutdownActorSystem(system)
}
}
object CommandQueueConsumerSpec {
// val commandMessage = new CommandMessage(<message><type>StartTransfer</type><transferid>1234</transferid></message>)
}
|
antongerbracht/MQFileTransfer
|
MQFileTransferCoordinator/src/test/scala/mqfiletransfercoordinator/actors/CommandQueueConsumerSpec.scala
|
Scala
|
apache-2.0
| 897
|
package com.alvin.niagara.model
import java.io.ByteArrayOutputStream
import java.util
import com.sksamuel.avro4s.AvroSchema
import org.apache.avro.Schema
import org.apache.avro.generic.{GenericData, GenericDatumReader, GenericDatumWriter, GenericRecord}
import org.apache.avro.io.{DecoderFactory, EncoderFactory}
import scala.collection.JavaConversions._
import scala.io.Source
/**
* Created by alvinjin on 2017-03-13.
*/
case class Business(address: String, attributes: Option[Seq[String]], business_id: String, categories: Option[Seq[String]],
city: String, hours: Option[Seq[String]], is_open: Long, latitude: Double, longitude: Double,
name: String, neighborhood: String, postal_code: String, review_count: Long, stars: Double,
state: String, `type`: String)
object BusinessSerde {
val schema: Schema = AvroSchema[Business]
val reader = new GenericDatumReader[GenericRecord](schema)
val writer = new GenericDatumWriter[GenericRecord](schema)
def serialize(buz: Business): Array[Byte] = {
val out = new ByteArrayOutputStream()
val encoder = EncoderFactory.get.binaryEncoder(out, null)
val avroRecord = new GenericData.Record(schema)
avroRecord.put("address", buz.address)
avroRecord.put("attributes", asJavaCollection(buz.attributes.getOrElse(Seq.empty)))
avroRecord.put("business_id", buz.business_id)
avroRecord.put("categories", asJavaCollection(buz.categories.getOrElse(Seq.empty)))
avroRecord.put("city", buz.city)
avroRecord.put("hours", asJavaCollection(buz.hours.getOrElse(Seq.empty)))
avroRecord.put("is_open", buz.is_open)
avroRecord.put("latitude", buz.latitude)
avroRecord.put("longitude", buz.longitude)
avroRecord.put("name", buz.name)
avroRecord.put("neighborhood", buz.neighborhood)
avroRecord.put("postal_code", buz.postal_code)
avroRecord.put("review_count", buz.review_count)
avroRecord.put("stars", buz.stars)
avroRecord.put("state", buz.state)
avroRecord.put("type", buz.`type`)
writer.write(avroRecord, encoder)
encoder.flush
out.close
out.toByteArray
}
def deserialize(bytes: Array[Byte]): Business = {
val decoder = DecoderFactory.get.binaryDecoder(bytes, null)
val record = reader.read(null, decoder)
val attributes = collectionAsScalaIterable(record.get("attributes")
.asInstanceOf[util.Collection[AnyRef]])
.map(_.toString).toList
val categories = collectionAsScalaIterable(record.get("categories")
.asInstanceOf[util.Collection[AnyRef]])
.map(_.toString).toList
val hours = collectionAsScalaIterable(record.get("hours")
.asInstanceOf[util.Collection[AnyRef]])
.map(_.toString).toList
Business(
record.get("address").toString,
if(attributes.isEmpty) None else Some(attributes),
record.get("business_id").toString,
if(categories.isEmpty) None else Some(categories),
record.get("city").toString,
if(hours.isEmpty) None else Some(hours),
record.get("is_open").asInstanceOf[Long],
record.get("latitude").asInstanceOf[Double],
record.get("longitude").asInstanceOf[Double],
record.get("name").toString,
record.get("neighborhood").toString,
record.get("postal_code").toString,
record.get("review_count").asInstanceOf[Long],
record.get("stars").asInstanceOf[Double],
record.get("state").toString,
record.get("type").toString
)
}
}
|
AlvinCJin/Niagara
|
src/main/scala/com/alvin/niagara/model/Business.scala
|
Scala
|
apache-2.0
| 3,505
|
package com.aristocrat.mandrill.requests.Tags
import com.aristocrat.mandrill.requests.MandrillRequest
case class AllTimeSeries(key: String) extends MandrillRequest
|
aristocratic/mandrill
|
src/main/scala/com/aristocrat/mandrill/requests/Tags/AllTimeSeries.scala
|
Scala
|
mit
| 166
|
package org.openapitools.models
import io.circe._
import io.finch.circe._
import io.circe.generic.semiauto._
import io.circe.java8.time._
import org.openapitools._
/**
*
* @param Underscoreclass
*/
case class NullSCM(Underscoreclass: Option[String]
)
object NullSCM {
/**
* Creates the codec for converting NullSCM from and to JSON.
*/
implicit val decoder: Decoder[NullSCM] = deriveDecoder
implicit val encoder: ObjectEncoder[NullSCM] = deriveEncoder
}
|
cliffano/swaggy-jenkins
|
clients/scala-finch/generated/src/main/scala/org/openapitools/models/NullSCM.scala
|
Scala
|
mit
| 500
|
package org.bfn.ninetynineprobs
object P40 {
// TODO
}
|
bfontaine/99Scala
|
src/main/scala/P40.scala
|
Scala
|
mit
| 60
|
package au.com.intelix.evt.testkit
import org.scalatest.matchers.{MatchResult, Matcher}
import au.com.intelix.evt._
import scala.util.matching.Regex
trait EvtMatchers {
class ContainsAllFields(e: EvtSelection, count: Option[Range], values: Seq[EvtFieldValue]) extends Matcher[List[RaisedEvent]] {
def apply(all: List[RaisedEvent]) = {
val left = all.filter { x => x.event == e.e && (e.s.isEmpty || e.s.contains(x.source)) }
val found = if (values.isEmpty) left.size
else left.count(nextRaised =>
!values.exists { kv =>
!nextRaised.values.exists { v =>
v._1 == kv._1 && (kv._2 match {
case x: Regex => x.findFirstMatchIn(v._2.toString).isDefined
case EventFieldMatcher(f) => f(v._2.toString)
case x if x.getClass == v._2.getClass => v._2 == x
case x => String.valueOf(v._2) == String.valueOf(x)
})
}
})
MatchResult(
count match {
case Some(req) => req.contains(found)
case None => found > 0
},
s"[$e] with values $values should be raised $count times, matching found = $found, total events inspected = ${left.size}", s"Found [$e] with values $values, count=$found"
)
}
}
def haveAllValues(e: EvtSelection, count: Range, values: Seq[EvtFieldValue]) = new ContainsAllFields(e, Some(count), values)
def haveAllValues(e: EvtSelection, values: Seq[EvtFieldValue]) = new ContainsAllFields(e, None, values)
}
|
intelix/reactiveservices
|
tools/evt/src/main/scala/au/com/intelix/evt/testkit/EvtMatchers.scala
|
Scala
|
apache-2.0
| 1,513
|
package lila.game
import chess.Color.{ White, Black }
import chess.format.UciMove
import chess.Pos.piotr, chess.Role.forsyth
import chess.variant.Variant
import chess.{ History => ChessHistory, CheckCount, Castles, Role, Board, Move, Pos, Game => ChessGame, Clock, Status, Color, Piece, Mode, PositionHash }
import org.joda.time.DateTime
import scala.concurrent.duration.FiniteDuration
import lila.db.ByteArray
import lila.rating.PerfType
import lila.user.User
case class Game(
id: String,
whitePlayer: Player,
blackPlayer: Player,
binaryPieces: ByteArray,
binaryPgn: ByteArray,
status: Status,
turns: Int, // = ply
startedAtTurn: Int,
clock: Option[Clock],
castleLastMoveTime: CastleLastMoveTime,
daysPerTurn: Option[Int],
positionHashes: PositionHash = Array(),
checkCount: CheckCount = CheckCount(0, 0),
binaryMoveTimes: ByteArray = ByteArray.empty, // tenths of seconds
mode: Mode = Mode.default,
variant: Variant = Variant.default,
next: Option[String] = None,
bookmarks: Int = 0,
createdAt: DateTime = DateTime.now,
updatedAt: Option[DateTime] = None,
metadata: Metadata) {
val players = List(whitePlayer, blackPlayer)
def player(color: Color): Player = color match {
case White => whitePlayer
case Black => blackPlayer
}
def player(playerId: String): Option[Player] =
players find (_.id == playerId)
def player(user: User): Option[Player] =
players find (_ isUser user)
def player(c: Color.type => Color): Player = player(c(Color))
def isPlayerFullId(player: Player, fullId: String): Boolean =
(fullId.size == Game.fullIdSize) && player.id == (fullId drop 8)
def player: Player = player(turnColor)
def playerByUserId(userId: String): Option[Player] = players find (_.userId == Some(userId))
def opponent(p: Player): Player = opponent(p.color)
def opponent(c: Color): Player = player(!c)
lazy val firstColor = (whitePlayer before blackPlayer).fold(White, Black)
def firstPlayer = player(firstColor)
def secondPlayer = player(!firstColor)
def turnColor = Color(0 == turns % 2)
def turnOf(p: Player): Boolean = p == player
def turnOf(c: Color): Boolean = c == turnColor
def turnOf(u: User): Boolean = player(u) ?? turnOf
def playedTurns = turns - startedAtTurn
def fullIdOf(player: Player): Option[String] =
(players contains player) option s"$id${player.id}"
def fullIdOf(color: Color): String = s"$id${player(color).id}"
def tournamentId = metadata.tournamentId
def simulId = metadata.simulId
def isTournament = tournamentId.isDefined
def isSimul = simulId.isDefined
def isMandatory = isTournament || isSimul
def nonMandatory = !isMandatory
def hasChat = !isTournament && !isSimul && nonAi
// in tenths
private def lastMoveTime: Option[Long] = castleLastMoveTime.lastMoveTime map {
_.toLong + (createdAt.getMillis / 100)
} orElse updatedAt.map(_.getMillis / 100)
private def lastMoveTimeDate: Option[DateTime] = castleLastMoveTime.lastMoveTime map { lmt =>
createdAt plus (lmt * 100l)
} orElse updatedAt
def updatedAtOrCreatedAt = updatedAt | createdAt
def lastMoveTimeInSeconds: Option[Int] = lastMoveTime.map(x => (x / 10).toInt)
// in tenths of seconds
lazy val moveTimes: Vector[Int] = BinaryFormat.moveTime read binaryMoveTimes take playedTurns
def moveTimes(color: Color): List[Int] = {
val pivot = if (color == startColor) 0 else 1
moveTimes.toList.zipWithIndex.collect {
case (e, i) if (i % 2) == pivot => e
}
}
def moveTimesInSeconds: Vector[Float] = moveTimes.map(_.toFloat / 10)
lazy val pgnMoves: PgnMoves = BinaryFormat.pgn read binaryPgn
def openingPgnMoves(nb: Int): PgnMoves = BinaryFormat.pgn.read(binaryPgn, nb)
def pgnMoves(color: Color): PgnMoves = {
val pivot = if (color == startColor) 0 else 1
pgnMoves.zipWithIndex.collect {
case (e, i) if (i % 2) == pivot => e
}
}
lazy val toChess: ChessGame = {
val pieces = BinaryFormat.piece.read(binaryPieces, variant)
ChessGame(
board = Board(pieces, toChessHistory, variant),
player = Color(0 == turns % 2),
clock = clock,
turns = turns,
startedAtTurn = startedAtTurn,
pgnMoves = pgnMoves)
}
lazy val toChessHistory = ChessHistory(
lastMove = castleLastMoveTime.lastMove map {
case (orig, dest) => UciMove(orig, dest)
},
castles = castleLastMoveTime.castles,
positionHashes = positionHashes,
checkCount = checkCount)
def update(
game: ChessGame,
move: Move,
blur: Boolean = false,
lag: Option[FiniteDuration] = None): Progress = {
val (history, situation) = (game.board.history, game.situation)
def copyPlayer(player: Player) = player.copy(
blurs = math.min(
playerMoves(player.color),
player.blurs + (blur && move.color == player.color).fold(1, 0))
)
val updated = copy(
whitePlayer = copyPlayer(whitePlayer),
blackPlayer = copyPlayer(blackPlayer),
binaryPieces = BinaryFormat.piece write game.board.pieces,
binaryPgn = BinaryFormat.pgn write game.pgnMoves,
turns = game.turns,
positionHashes = history.positionHashes,
checkCount = history.checkCount,
castleLastMoveTime = CastleLastMoveTime(
castles = history.castles,
lastMove = history.lastMove.map(_.origDest),
lastMoveTime = Some(((nowMillis - createdAt.getMillis) / 100).toInt),
check = situation.kingPos ifTrue situation.check),
binaryMoveTimes = isPgnImport.fold(
ByteArray.empty,
BinaryFormat.moveTime write lastMoveTime.fold(Vector(0)) { lmt =>
moveTimes :+ {
(nowTenths - lmt - (lag.??(_.toMillis) / 100)).toInt max 0
}
}
),
status = situation.status | status,
clock = game.clock)
val state = Event.State(
color = situation.color,
turns = game.turns,
status = (status != updated.status) option updated.status,
winner = situation.winner,
whiteOffersDraw = whitePlayer.isOfferingDraw,
blackOffersDraw = blackPlayer.isOfferingDraw)
val clockEvent = updated.clock map Event.Clock.apply orElse {
updated.playableCorrespondenceClock map Event.CorrespondenceClock.apply
}
val events = Event.Move(move, situation, state, clockEvent) ::
{
// abstraction leak, I know.
(updated.variant.threeCheck && situation.check) ?? List(Event.CheckCount(
white = updated.checkCount.white,
black = updated.checkCount.black
))
}.toList
Progress(this, updated, events)
}
def check = castleLastMoveTime.check
def updatePlayer(color: Color, f: Player => Player) = color.fold(
copy(whitePlayer = f(whitePlayer)),
copy(blackPlayer = f(blackPlayer)))
def updatePlayers(f: Player => Player) = copy(
whitePlayer = f(whitePlayer),
blackPlayer = f(blackPlayer))
def start = started.fold(this, copy(
status = Status.Started,
mode = Mode(mode.rated && userIds.distinct.size == 2),
updatedAt = DateTime.now.some
))
def correspondenceClock: Option[CorrespondenceClock] = daysPerTurn map { days =>
val increment = days * 24 * 60 * 60
val secondsLeft = lastMoveTimeDate.fold(increment) { lmd =>
(lmd.getSeconds + increment - nowSeconds).toInt max 0
}
CorrespondenceClock(
increment = increment,
whiteTime = turnColor.fold(secondsLeft, increment),
blackTime = turnColor.fold(increment, secondsLeft))
}
def playableCorrespondenceClock: Option[CorrespondenceClock] =
playable ?? correspondenceClock
def speed = chess.Speed(clock)
def perfKey = PerfPicker.key(this)
def perfType = PerfType(perfKey)
def started = status >= Status.Started
def notStarted = !started
def joinable = notStarted && !isPgnImport
def aborted = status == Status.Aborted
def playable = status < Status.Aborted && !imported
def playableEvenImported = status < Status.Aborted
def playableBy(p: Player): Boolean = playable && turnOf(p)
def playableBy(c: Color): Boolean = playableBy(player(c))
def playableByAi: Boolean = playable && player.isAi
def continuable = status != Status.Mate && status != Status.Stalemate
def aiLevel: Option[Int] = players find (_.isAi) flatMap (_.aiLevel)
def hasAi: Boolean = players exists (_.isAi)
def nonAi = !hasAi
def mapPlayers(f: Player => Player) = copy(
whitePlayer = f(whitePlayer),
blackPlayer = f(blackPlayer)
)
def playerCanOfferDraw(color: Color) =
started && playable &&
turns >= 2 &&
!player(color).isOfferingDraw &&
!(opponent(color).isAi) &&
!(playerHasOfferedDraw(color))
def playerHasOfferedDraw(color: Color) =
player(color).lastDrawOffer ?? (_ >= turns - 1)
def playerCanRematch(color: Color) =
!player(color).isOfferingRematch &&
finishedOrAborted &&
nonMandatory &&
!boosted
def playerCanProposeTakeback(color: Color) =
started && playable && !isTournament && !isSimul &&
bothPlayersHaveMoved &&
!player(color).isProposingTakeback &&
!opponent(color).isProposingTakeback
def boosted = rated && finished && bothPlayersHaveMoved && playedTurns < 10
def moretimeable(color: Color) =
playable && nonMandatory && clock.??(_ moretimeable color)
def abortable = status == Status.Started && playedTurns < 2 && nonMandatory
def berserkable = clock.??(_.berserkable) && status == Status.Started && playedTurns < 2
def goBerserk(color: Color) =
clock.ifTrue(berserkable && !player(color).berserk).map { c =>
val newClock = c berserk color
withClock(newClock).map(_.withPlayer(color, _.goBerserk)) +
Event.Clock(newClock) +
Event.Berserk(color)
}
def withPlayer(color: Color, f: Player => Player) = copy(
whitePlayer = if (color.white) f(whitePlayer) else whitePlayer,
blackPlayer = if (color.black) f(blackPlayer) else blackPlayer)
def resignable = playable && !abortable
def drawable = playable && !abortable
def finish(status: Status, winner: Option[Color]) = Progress(
this,
copy(
status = status,
whitePlayer = whitePlayer finish (winner == Some(White)),
blackPlayer = blackPlayer finish (winner == Some(Black)),
clock = clock map (_.stop)
),
List(Event.End(winner)) ::: clock.??(c => List(Event.Clock(c)))
)
def rated = mode.rated
def casual = !rated
def finished = status >= Status.Mate
def finishedOrAborted = finished || aborted
def accountable = playedTurns >= 2 || isTournament
def replayable = isPgnImport || finished
def analysable = replayable && playedTurns > 4 && Game.analysableVariants(variant)
def ratingVariant =
if (isTournament && variant == chess.variant.FromPosition) chess.variant.Standard
else variant
def fromPosition = variant == chess.variant.FromPosition || source.??(Source.Position==)
def imported = source contains Source.Import
def winner = players find (_.wins)
def loser = winner map opponent
def winnerColor: Option[Color] = winner map (_.color)
def winnerUserId: Option[String] = winner flatMap (_.userId)
def loserUserId: Option[String] = loser flatMap (_.userId)
def wonBy(c: Color): Option[Boolean] = winnerColor map (_ == c)
def lostBy(c: Color): Option[Boolean] = winnerColor map (_ != c)
def drawn = finished && winner.isEmpty
def outoftimePlayer(playerLag: Color => Int): Option[Player] =
outoftimePlayerClock(playerLag) orElse outoftimePlayerCorrespondence
private def outoftimePlayerClock(playerLag: Color => Int): Option[Player] = for {
c ← clock
if started && playable && (bothPlayersHaveMoved || isSimul)
if (!c.isRunning && !c.isInit) || c.outoftimeWithGrace(player.color, playerLag(player.color))
} yield player
private def outoftimePlayerCorrespondence: Option[Player] = for {
c ← playableCorrespondenceClock
if c outoftime player.color
} yield player
def isCorrespondence = speed == chess.Speed.Correspondence
def isSwitchable = nonAi && (isCorrespondence || isSimul)
def hasClock = clock.isDefined
def hasCorrespondenceClock = daysPerTurn.isDefined
def isUnlimited = !hasClock && !hasCorrespondenceClock
def isClockRunning = clock ?? (_.isRunning)
def withClock(c: Clock) = Progress(this, copy(clock = Some(c)))
def estimateClockTotalTime = clock.map(_.estimateTotalTime)
def estimateTotalTime = estimateClockTotalTime orElse
correspondenceClock.map(_.estimateTotalTime) getOrElse 1200
def playerWhoDidNotMove: Option[Player] = playedTurns match {
case 0 => player(White).some
case 1 => player(Black).some
case _ => none
}
def onePlayerHasMoved = playedTurns > 0
def bothPlayersHaveMoved = playedTurns > 1
def startColor = Color(startedAtTurn % 2 == 0)
def playerMoves(color: Color): Int =
if (color == startColor) (playedTurns + 1) / 2
else playedTurns / 2
def playerHasMoved(color: Color) = playerMoves(color) > 0
def playerBlurPercent(color: Color): Int = (playedTurns > 5).fold(
(player(color).blurs * 100) / playerMoves(color),
0
)
def isBeingPlayed = !isPgnImport && !finishedOrAborted
def olderThan(seconds: Int) = (updatedAt | createdAt) isBefore DateTime.now.minusSeconds(seconds)
def unplayed = !bothPlayersHaveMoved && (createdAt isBefore Game.unplayedDate)
def abandoned = (status <= Status.Started) && ((updatedAt | createdAt) isBefore hasAi.fold(Game.aiAbandonedDate, Game.abandonedDate))
def forecastable = started && playable && isCorrespondence && !hasAi
def hasBookmarks = bookmarks > 0
def showBookmarks = hasBookmarks ?? bookmarks.toString
def userIds = playerMaps(_.userId)
def userRatings = playerMaps(_.rating)
def averageUsersRating = userRatings match {
case a :: b :: Nil => Some((a + b) / 2)
case a :: Nil => Some((a + 1500) / 2)
case _ => None
}
def withTournamentId(id: String) = this.copy(
metadata = metadata.copy(tournamentId = id.some))
def withSimulId(id: String) = this.copy(
metadata = metadata.copy(simulId = id.some))
def withId(newId: String) = this.copy(id = newId)
def source = metadata.source
def pgnImport = metadata.pgnImport
def isPgnImport = pgnImport.isDefined
def resetTurns = copy(turns = 0, startedAtTurn = 0)
lazy val opening: Option[chess.Opening] =
if (fromPosition || !Game.openingSensiblevariants(variant)) none
else chess.OpeningExplorer openingOf pgnMoves
private def playerMaps[A](f: Player => Option[A]): List[A] = players flatMap { f(_) }
}
object Game {
val openingSensiblevariants: Set[Variant] = Set(
chess.variant.Standard,
chess.variant.ThreeCheck,
chess.variant.KingOfTheHill)
val divisionSensiblevariants: Set[Variant] = Set(
chess.variant.Standard,
chess.variant.Chess960,
chess.variant.ThreeCheck,
chess.variant.KingOfTheHill,
chess.variant.Antichess,
chess.variant.FromPosition)
val analysableVariants: Set[Variant] = Set(
chess.variant.Standard,
chess.variant.Chess960,
chess.variant.KingOfTheHill,
chess.variant.ThreeCheck,
chess.variant.FromPosition)
val unanalysableVariants: Set[Variant] = Variant.all.toSet -- analysableVariants
val variantsWhereWhiteIsBetter: Set[Variant] = Set(
chess.variant.ThreeCheck,
chess.variant.Atomic,
chess.variant.Horde,
chess.variant.Antichess)
val gameIdSize = 8
val playerIdSize = 4
val fullIdSize = 12
val tokenSize = 4
val unplayedHours = 24
def unplayedDate = DateTime.now minusHours unplayedHours
val abandonedDays = 15
def abandonedDate = DateTime.now minusDays abandonedDays
val aiAbandonedDays = 3
def aiAbandonedDate = DateTime.now minusDays abandonedDays
def takeGameId(fullId: String) = fullId take gameIdSize
def takePlayerId(fullId: String) = fullId drop gameIdSize
def make(
game: ChessGame,
whitePlayer: Player,
blackPlayer: Player,
mode: Mode,
variant: Variant,
source: Source,
pgnImport: Option[PgnImport],
daysPerTurn: Option[Int] = None): Game = Game(
id = IdGenerator.game,
whitePlayer = whitePlayer,
blackPlayer = blackPlayer,
binaryPieces = if (game.isStandardInit) BinaryFormat.piece.standard
else BinaryFormat.piece write game.board.pieces,
binaryPgn = ByteArray.empty,
status = Status.Created,
turns = game.turns,
startedAtTurn = game.startedAtTurn,
clock = game.clock,
castleLastMoveTime = CastleLastMoveTime.init.copy(castles = game.board.history.castles),
daysPerTurn = daysPerTurn,
mode = mode,
variant = variant,
metadata = Metadata(
source = source.some,
pgnImport = pgnImport,
tournamentId = none,
simulId = none,
tvAt = none,
analysed = false),
createdAt = DateTime.now)
private[game] lazy val tube = lila.db.BsTube(BSONHandlers.gameBSONHandler)
object BSONFields {
val id = "_id"
val whitePlayer = "p0"
val blackPlayer = "p1"
val playerIds = "is"
val playerUids = "us"
val playingUids = "pl"
val binaryPieces = "ps"
val binaryPgn = "pg"
val status = "s"
val turns = "t"
val startedAtTurn = "st"
val clock = "c"
val positionHashes = "ph"
val checkCount = "cc"
val castleLastMoveTime = "cl"
val daysPerTurn = "cd"
val moveTimes = "mt"
val rated = "ra"
val analysed = "an"
val variant = "v"
val next = "ne"
val bookmarks = "bm"
val createdAt = "ca"
val updatedAt = "ua"
val source = "so"
val pgnImport = "pgni"
val tournamentId = "tid"
val simulId = "sid"
val tvAt = "tv"
val winnerColor = "w"
val winnerId = "wid"
val initialFen = "if"
val checkAt = "ck"
}
}
case class CastleLastMoveTime(
castles: Castles,
lastMove: Option[(Pos, Pos)],
lastMoveTime: Option[Int], // tenths of seconds since game creation
check: Option[Pos]) {
def lastMoveString = lastMove map { case (a, b) => s"$a$b" }
}
object CastleLastMoveTime {
def init = CastleLastMoveTime(Castles.all, None, None, None)
import reactivemongo.bson._
import lila.db.ByteArray.ByteArrayBSONHandler
implicit val castleLastMoveTimeBSONHandler = new BSONHandler[BSONBinary, CastleLastMoveTime] {
def read(bin: BSONBinary) = BinaryFormat.castleLastMoveTime read {
ByteArrayBSONHandler read bin
}
def write(clmt: CastleLastMoveTime) = ByteArrayBSONHandler write {
BinaryFormat.castleLastMoveTime write clmt
}
}
}
|
samuel-soubeyran/lila
|
modules/game/src/main/Game.scala
|
Scala
|
mit
| 18,655
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.io.IOException
import java.net.URI
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.parallel.ForkJoinTaskSupport
import scala.concurrent.forkjoin.ForkJoinPool
import scala.util.{Failure, Try}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.parquet.filter2.compat.FilterCompat
import org.apache.parquet.filter2.predicate.FilterApi
import org.apache.parquet.format.converter.ParquetMetadataConverter.SKIP_ROW_GROUPS
import org.apache.parquet.hadoop._
import org.apache.parquet.hadoop.codec.CodecConfig
import org.apache.parquet.hadoop.util.ContextUtil
import org.apache.parquet.schema.MessageType
import org.apache.spark.{SparkException, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.parser.LegacyTypeStringParser
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.util.{SerializableConfiguration, ThreadUtils}
class ParquetFileFormat
extends FileFormat
with DataSourceRegister
with Logging
with Serializable {
// Hold a reference to the (serializable) singleton instance of ParquetLogRedirector. This
// ensures the ParquetLogRedirector class is initialized whether an instance of ParquetFileFormat
// is constructed or deserialized. Do not heed the Scala compiler's warning about an unused field
// here.
private val parquetLogRedirector = ParquetLogRedirector.INSTANCE
override def shortName(): String = "parquet"
override def toString: String = "Parquet"
override def hashCode(): Int = getClass.hashCode()
override def equals(other: Any): Boolean = other.isInstanceOf[ParquetFileFormat]
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
val parquetOptions = new ParquetOptions(options, sparkSession.sessionState.conf)
val conf = ContextUtil.getConfiguration(job)
val committerClass =
conf.getClass(
SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key,
classOf[ParquetOutputCommitter],
classOf[ParquetOutputCommitter])
if (conf.get(SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key) == null) {
logInfo("Using default output committer for Parquet: " +
classOf[ParquetOutputCommitter].getCanonicalName)
} else {
logInfo("Using user defined output committer for Parquet: " + committerClass.getCanonicalName)
}
conf.setClass(
SQLConf.OUTPUT_COMMITTER_CLASS.key,
committerClass,
classOf[ParquetOutputCommitter])
// We're not really using `ParquetOutputFormat[Row]` for writing data here, because we override
// it in `ParquetOutputWriter` to support appending and dynamic partitioning. The reason why
// we set it here is to setup the output committer class to `ParquetOutputCommitter`, which is
// bundled with `ParquetOutputFormat[Row]`.
job.setOutputFormatClass(classOf[ParquetOutputFormat[Row]])
ParquetOutputFormat.setWriteSupportClass(job, classOf[ParquetWriteSupport])
// We want to clear this temporary metadata from saving into Parquet file.
// This metadata is only useful for detecting optional columns when pushdowning filters.
ParquetWriteSupport.setSchema(dataSchema, conf)
// Sets flags for `CatalystSchemaConverter` (which converts Catalyst schema to Parquet schema)
// and `CatalystWriteSupport` (writing actual rows to Parquet files).
conf.set(
SQLConf.PARQUET_BINARY_AS_STRING.key,
sparkSession.sessionState.conf.isParquetBinaryAsString.toString)
conf.set(
SQLConf.PARQUET_INT96_AS_TIMESTAMP.key,
sparkSession.sessionState.conf.isParquetINT96AsTimestamp.toString)
conf.set(
SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key,
sparkSession.sessionState.conf.writeLegacyParquetFormat.toString)
conf.set(
SQLConf.PARQUET_INT64_AS_TIMESTAMP_MILLIS.key,
sparkSession.sessionState.conf.isParquetINT64AsTimestampMillis.toString)
// Sets compression scheme
conf.set(ParquetOutputFormat.COMPRESSION, parquetOptions.compressionCodecClassName)
// SPARK-15719: Disables writing Parquet summary files by default.
if (conf.get(ParquetOutputFormat.ENABLE_JOB_SUMMARY) == null) {
conf.setBoolean(ParquetOutputFormat.ENABLE_JOB_SUMMARY, false)
}
new OutputWriterFactory {
// This OutputWriterFactory instance is deserialized when writing Parquet files on the
// executor side without constructing or deserializing ParquetFileFormat. Therefore, we hold
// another reference to ParquetLogRedirector.INSTANCE here to ensure the latter class is
// initialized.
private val parquetLogRedirector = ParquetLogRedirector.INSTANCE
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new ParquetOutputWriter(path, context)
}
override def getFileExtension(context: TaskAttemptContext): String = {
CodecConfig.from(context).getCodec.getExtension + ".parquet"
}
}
}
override def inferSchema(
sparkSession: SparkSession,
parameters: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
val parquetOptions = new ParquetOptions(parameters, sparkSession.sessionState.conf)
// Should we merge schemas from all Parquet part-files?
val shouldMergeSchemas = parquetOptions.mergeSchema
val mergeRespectSummaries = sparkSession.sessionState.conf.isParquetSchemaRespectSummaries
val filesByType = splitFiles(files)
// Sees which file(s) we need to touch in order to figure out the schema.
//
// Always tries the summary files first if users don't require a merged schema. In this case,
// "_common_metadata" is more preferable than "_metadata" because it doesn't contain row
// groups information, and could be much smaller for large Parquet files with lots of row
// groups. If no summary file is available, falls back to some random part-file.
//
// NOTE: Metadata stored in the summary files are merged from all part-files. However, for
// user defined key-value metadata (in which we store Spark SQL schema), Parquet doesn't know
// how to merge them correctly if some key is associated with different values in different
// part-files. When this happens, Parquet simply gives up generating the summary file. This
// implies that if a summary file presents, then:
//
// 1. Either all part-files have exactly the same Spark SQL schema, or
// 2. Some part-files don't contain Spark SQL schema in the key-value metadata at all (thus
// their schemas may differ from each other).
//
// Here we tend to be pessimistic and take the second case into account. Basically this means
// we can't trust the summary files if users require a merged schema, and must touch all part-
// files to do the merge.
val filesToTouch =
if (shouldMergeSchemas) {
// Also includes summary files, 'cause there might be empty partition directories.
// If mergeRespectSummaries config is true, we assume that all part-files are the same for
// their schema with summary files, so we ignore them when merging schema.
// If the config is disabled, which is the default setting, we merge all part-files.
// In this mode, we only need to merge schemas contained in all those summary files.
// You should enable this configuration only if you are very sure that for the parquet
// part-files to read there are corresponding summary files containing correct schema.
// As filed in SPARK-11500, the order of files to touch is a matter, which might affect
// the ordering of the output columns. There are several things to mention here.
//
// 1. If mergeRespectSummaries config is false, then it merges schemas by reducing from
// the first part-file so that the columns of the lexicographically first file show
// first.
//
// 2. If mergeRespectSummaries config is true, then there should be, at least,
// "_metadata"s for all given files, so that we can ensure the columns of
// the lexicographically first file show first.
//
// 3. If shouldMergeSchemas is false, but when multiple files are given, there is
// no guarantee of the output order, since there might not be a summary file for the
// lexicographically first file, which ends up putting ahead the columns of
// the other files. However, this should be okay since not enabling
// shouldMergeSchemas means (assumes) all the files have the same schemas.
val needMerged: Seq[FileStatus] =
if (mergeRespectSummaries) {
Seq.empty
} else {
filesByType.data
}
needMerged ++ filesByType.metadata ++ filesByType.commonMetadata
} else {
// Tries any "_common_metadata" first. Parquet files written by old versions or Parquet
// don't have this.
filesByType.commonMetadata.headOption
// Falls back to "_metadata"
.orElse(filesByType.metadata.headOption)
// Summary file(s) not found, the Parquet file is either corrupted, or different part-
// files contain conflicting user defined metadata (two or more values are associated
// with a same key in different files). In either case, we fall back to any of the
// first part-file, and just assume all schemas are consistent.
.orElse(filesByType.data.headOption)
.toSeq
}
ParquetFileFormat.mergeSchemasInParallel(filesToTouch, sparkSession)
}
case class FileTypes(
data: Seq[FileStatus],
metadata: Seq[FileStatus],
commonMetadata: Seq[FileStatus])
private def splitFiles(allFiles: Seq[FileStatus]): FileTypes = {
val leaves = allFiles.toArray.sortBy(_.getPath.toString)
FileTypes(
data = leaves.filterNot(f => isSummaryFile(f.getPath)),
metadata =
leaves.filter(_.getPath.getName == ParquetFileWriter.PARQUET_METADATA_FILE),
commonMetadata =
leaves.filter(_.getPath.getName == ParquetFileWriter.PARQUET_COMMON_METADATA_FILE))
}
private def isSummaryFile(file: Path): Boolean = {
file.getName == ParquetFileWriter.PARQUET_COMMON_METADATA_FILE ||
file.getName == ParquetFileWriter.PARQUET_METADATA_FILE
}
/**
* Returns whether the reader will return the rows as batch or not.
*/
override def supportBatch(sparkSession: SparkSession, schema: StructType): Boolean = {
val conf = sparkSession.sessionState.conf
conf.parquetVectorizedReaderEnabled && conf.wholeStageEnabled &&
schema.length <= conf.wholeStageMaxNumFields &&
schema.forall(_.dataType.isInstanceOf[AtomicType])
}
override def vectorTypes(
requiredSchema: StructType,
partitionSchema: StructType): Option[Seq[String]] = {
Option(Seq.fill(requiredSchema.fields.length + partitionSchema.fields.length)(
classOf[OnHeapColumnVector].getName))
}
override def isSplitable(
sparkSession: SparkSession,
options: Map[String, String],
path: Path): Boolean = {
true
}
override def buildReaderWithPartitionValues(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
hadoopConf.set(ParquetInputFormat.READ_SUPPORT_CLASS, classOf[ParquetReadSupport].getName)
hadoopConf.set(
ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA,
ParquetSchemaConverter.checkFieldNames(requiredSchema).json)
hadoopConf.set(
ParquetWriteSupport.SPARK_ROW_SCHEMA,
ParquetSchemaConverter.checkFieldNames(requiredSchema).json)
ParquetWriteSupport.setSchema(requiredSchema, hadoopConf)
// Sets flags for `CatalystSchemaConverter`
hadoopConf.setBoolean(
SQLConf.PARQUET_BINARY_AS_STRING.key,
sparkSession.sessionState.conf.isParquetBinaryAsString)
hadoopConf.setBoolean(
SQLConf.PARQUET_INT96_AS_TIMESTAMP.key,
sparkSession.sessionState.conf.isParquetINT96AsTimestamp)
hadoopConf.setBoolean(
SQLConf.PARQUET_INT64_AS_TIMESTAMP_MILLIS.key,
sparkSession.sessionState.conf.isParquetINT64AsTimestampMillis)
// Try to push down filters when filter push-down is enabled.
val pushed =
if (sparkSession.sessionState.conf.parquetFilterPushDown) {
filters
// Collects all converted Parquet filter predicates. Notice that not all predicates can be
// converted (`ParquetFilters.createFilter` returns an `Option`). That's why a `flatMap`
// is used here.
.flatMap(ParquetFilters.createFilter(requiredSchema, _))
.reduceOption(FilterApi.and)
} else {
None
}
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
// TODO: if you move this into the closure it reverts to the default values.
// If true, enable using the custom RecordReader for parquet. This only works for
// a subset of the types (no complex types).
val resultSchema = StructType(partitionSchema.fields ++ requiredSchema.fields)
val enableVectorizedReader: Boolean =
sparkSession.sessionState.conf.parquetVectorizedReaderEnabled &&
resultSchema.forall(_.dataType.isInstanceOf[AtomicType])
// Whole stage codegen (PhysicalRDD) is able to deal with batches directly
val returningBatch = supportBatch(sparkSession, resultSchema)
(file: PartitionedFile) => {
assert(file.partitionValues.numFields == partitionSchema.size)
val fileSplit =
new FileSplit(new Path(new URI(file.filePath)), file.start, file.length, Array.empty)
val split =
new org.apache.parquet.hadoop.ParquetInputSplit(
fileSplit.getPath,
fileSplit.getStart,
fileSplit.getStart + fileSplit.getLength,
fileSplit.getLength,
fileSplit.getLocations,
null)
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext =
new TaskAttemptContextImpl(broadcastedHadoopConf.value.value, attemptId)
// Try to push down filters when filter push-down is enabled.
// Notice: This push-down is RowGroups level, not individual records.
if (pushed.isDefined) {
ParquetInputFormat.setFilterPredicate(hadoopAttemptContext.getConfiguration, pushed.get)
}
val parquetReader = if (enableVectorizedReader) {
val vectorizedReader = new VectorizedParquetRecordReader()
vectorizedReader.initialize(split, hadoopAttemptContext)
logDebug(s"Appending $partitionSchema ${file.partitionValues}")
vectorizedReader.initBatch(partitionSchema, file.partitionValues)
if (returningBatch) {
vectorizedReader.enableReturningBatches()
}
vectorizedReader
} else {
logDebug(s"Falling back to parquet-mr")
// ParquetRecordReader returns UnsafeRow
val reader = pushed match {
case Some(filter) =>
new ParquetRecordReader[UnsafeRow](
new ParquetReadSupport,
FilterCompat.get(filter, null))
case _ =>
new ParquetRecordReader[UnsafeRow](new ParquetReadSupport)
}
reader.initialize(split, hadoopAttemptContext)
reader
}
val iter = new RecordReaderIterator(parquetReader)
Option(TaskContext.get()).foreach(_.addTaskCompletionListener(_ => iter.close()))
// UnsafeRowParquetRecordReader appends the columns internally to avoid another copy.
if (parquetReader.isInstanceOf[VectorizedParquetRecordReader] &&
enableVectorizedReader) {
iter.asInstanceOf[Iterator[InternalRow]]
} else {
val fullSchema = requiredSchema.toAttributes ++ partitionSchema.toAttributes
val joinedRow = new JoinedRow()
val appendPartitionColumns = GenerateUnsafeProjection.generate(fullSchema, fullSchema)
// This is a horrible erasure hack... if we type the iterator above, then it actually check
// the type in next() and we get a class cast exception. If we make that function return
// Object, then we can defer the cast until later!
if (partitionSchema.length == 0) {
// There is no partition columns
iter.asInstanceOf[Iterator[InternalRow]]
} else {
iter.asInstanceOf[Iterator[InternalRow]]
.map(d => appendPartitionColumns(joinedRow(d, file.partitionValues)))
}
}
}
}
}
object ParquetFileFormat extends Logging {
private[parquet] def readSchema(
footers: Seq[Footer], sparkSession: SparkSession): Option[StructType] = {
def parseParquetSchema(schema: MessageType): StructType = {
val converter = new ParquetSchemaConverter(
sparkSession.sessionState.conf.isParquetBinaryAsString,
sparkSession.sessionState.conf.isParquetBinaryAsString,
sparkSession.sessionState.conf.writeLegacyParquetFormat,
sparkSession.sessionState.conf.isParquetINT64AsTimestampMillis)
converter.convert(schema)
}
val seen = mutable.HashSet[String]()
val finalSchemas: Seq[StructType] = footers.flatMap { footer =>
val metadata = footer.getParquetMetadata.getFileMetaData
val serializedSchema = metadata
.getKeyValueMetaData
.asScala.toMap
.get(ParquetReadSupport.SPARK_METADATA_KEY)
if (serializedSchema.isEmpty) {
// Falls back to Parquet schema if no Spark SQL schema found.
Some(parseParquetSchema(metadata.getSchema))
} else if (!seen.contains(serializedSchema.get)) {
seen += serializedSchema.get
// Don't throw even if we failed to parse the serialized Spark schema. Just fallback to
// whatever is available.
Some(Try(DataType.fromJson(serializedSchema.get))
.recover { case _: Throwable =>
logInfo(
"Serialized Spark schema in Parquet key-value metadata is not in JSON format, " +
"falling back to the deprecated DataType.fromCaseClassString parser.")
LegacyTypeStringParser.parse(serializedSchema.get)
}
.recover { case cause: Throwable =>
logWarning(
s"""Failed to parse serialized Spark schema in Parquet key-value metadata:
|\\t$serializedSchema
""".stripMargin,
cause)
}
.map(_.asInstanceOf[StructType])
.getOrElse {
// Falls back to Parquet schema if Spark SQL schema can't be parsed.
parseParquetSchema(metadata.getSchema)
})
} else {
None
}
}
finalSchemas.reduceOption { (left, right) =>
try left.merge(right) catch { case e: Throwable =>
throw new SparkException(s"Failed to merge incompatible schemas $left and $right", e)
}
}
}
/**
* Reads Parquet footers in multi-threaded manner.
* If the config "spark.sql.files.ignoreCorruptFiles" is set to true, we will ignore the corrupted
* files when reading footers.
*/
private[parquet] def readParquetFootersInParallel(
conf: Configuration,
partFiles: Seq[FileStatus],
ignoreCorruptFiles: Boolean): Seq[Footer] = {
val parFiles = partFiles.par
val pool = ThreadUtils.newForkJoinPool("readingParquetFooters", 8)
parFiles.tasksupport = new ForkJoinTaskSupport(pool)
try {
parFiles.flatMap { currentFile =>
try {
// Skips row group information since we only need the schema.
// ParquetFileReader.readFooter throws RuntimeException, instead of IOException,
// when it can't read the footer.
Some(new Footer(currentFile.getPath(),
ParquetFileReader.readFooter(
conf, currentFile, SKIP_ROW_GROUPS)))
} catch { case e: RuntimeException =>
if (ignoreCorruptFiles) {
logWarning(s"Skipped the footer in the corrupted file: $currentFile", e)
None
} else {
throw new IOException(s"Could not read footer for file: $currentFile", e)
}
}
}.seq
} finally {
pool.shutdown()
}
}
/**
* Figures out a merged Parquet schema with a distributed Spark job.
*
* Note that locality is not taken into consideration here because:
*
* 1. For a single Parquet part-file, in most cases the footer only resides in the last block of
* that file. Thus we only need to retrieve the location of the last block. However, Hadoop
* `FileSystem` only provides API to retrieve locations of all blocks, which can be
* potentially expensive.
*
* 2. This optimization is mainly useful for S3, where file metadata operations can be pretty
* slow. And basically locality is not available when using S3 (you can't run computation on
* S3 nodes).
*/
def mergeSchemasInParallel(
filesToTouch: Seq[FileStatus],
sparkSession: SparkSession): Option[StructType] = {
val assumeBinaryIsString = sparkSession.sessionState.conf.isParquetBinaryAsString
val assumeInt96IsTimestamp = sparkSession.sessionState.conf.isParquetINT96AsTimestamp
val writeTimestampInMillis = sparkSession.sessionState.conf.isParquetINT64AsTimestampMillis
val writeLegacyParquetFormat = sparkSession.sessionState.conf.writeLegacyParquetFormat
val serializedConf = new SerializableConfiguration(sparkSession.sessionState.newHadoopConf())
// !! HACK ALERT !!
//
// Parquet requires `FileStatus`es to read footers. Here we try to send cached `FileStatus`es
// to executor side to avoid fetching them again. However, `FileStatus` is not `Serializable`
// but only `Writable`. What makes it worse, for some reason, `FileStatus` doesn't play well
// with `SerializableWritable[T]` and always causes a weird `IllegalStateException`. These
// facts virtually prevents us to serialize `FileStatus`es.
//
// Since Parquet only relies on path and length information of those `FileStatus`es to read
// footers, here we just extract them (which can be easily serialized), send them to executor
// side, and resemble fake `FileStatus`es there.
val partialFileStatusInfo = filesToTouch.map(f => (f.getPath.toString, f.getLen))
// Set the number of partitions to prevent following schema reads from generating many tasks
// in case of a small number of parquet files.
val numParallelism = Math.min(Math.max(partialFileStatusInfo.size, 1),
sparkSession.sparkContext.defaultParallelism)
val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles
// Issues a Spark job to read Parquet schema in parallel.
val partiallyMergedSchemas =
sparkSession
.sparkContext
.parallelize(partialFileStatusInfo, numParallelism)
.mapPartitions { iterator =>
// Resembles fake `FileStatus`es with serialized path and length information.
val fakeFileStatuses = iterator.map { case (path, length) =>
new FileStatus(length, false, 0, 0, 0, 0, null, null, null, new Path(path))
}.toSeq
// Reads footers in multi-threaded manner within each task
val footers =
ParquetFileFormat.readParquetFootersInParallel(
serializedConf.value, fakeFileStatuses, ignoreCorruptFiles)
// Converter used to convert Parquet `MessageType` to Spark SQL `StructType`
val converter =
new ParquetSchemaConverter(
assumeBinaryIsString = assumeBinaryIsString,
assumeInt96IsTimestamp = assumeInt96IsTimestamp,
writeLegacyParquetFormat = writeLegacyParquetFormat,
writeTimestampInMillis = writeTimestampInMillis)
if (footers.isEmpty) {
Iterator.empty
} else {
var mergedSchema = ParquetFileFormat.readSchemaFromFooter(footers.head, converter)
footers.tail.foreach { footer =>
val schema = ParquetFileFormat.readSchemaFromFooter(footer, converter)
try {
mergedSchema = mergedSchema.merge(schema)
} catch { case cause: SparkException =>
throw new SparkException(
s"Failed merging schema of file ${footer.getFile}:\\n${schema.treeString}", cause)
}
}
Iterator.single(mergedSchema)
}
}.collect()
if (partiallyMergedSchemas.isEmpty) {
None
} else {
var finalSchema = partiallyMergedSchemas.head
partiallyMergedSchemas.tail.foreach { schema =>
try {
finalSchema = finalSchema.merge(schema)
} catch { case cause: SparkException =>
throw new SparkException(
s"Failed merging schema:\\n${schema.treeString}", cause)
}
}
Some(finalSchema)
}
}
/**
* Reads Spark SQL schema from a Parquet footer. If a valid serialized Spark SQL schema string
* can be found in the file metadata, returns the deserialized [[StructType]], otherwise, returns
* a [[StructType]] converted from the [[MessageType]] stored in this footer.
*/
def readSchemaFromFooter(
footer: Footer, converter: ParquetSchemaConverter): StructType = {
val fileMetaData = footer.getParquetMetadata.getFileMetaData
fileMetaData
.getKeyValueMetaData
.asScala.toMap
.get(ParquetReadSupport.SPARK_METADATA_KEY)
.flatMap(deserializeSchemaString)
.getOrElse(converter.convert(fileMetaData.getSchema))
}
private def deserializeSchemaString(schemaString: String): Option[StructType] = {
// Tries to deserialize the schema string as JSON first, then falls back to the case class
// string parser (data generated by older versions of Spark SQL uses this format).
Try(DataType.fromJson(schemaString).asInstanceOf[StructType]).recover {
case _: Throwable =>
logInfo(
"Serialized Spark schema in Parquet key-value metadata is not in JSON format, " +
"falling back to the deprecated DataType.fromCaseClassString parser.")
LegacyTypeStringParser.parse(schemaString).asInstanceOf[StructType]
}.recoverWith {
case cause: Throwable =>
logWarning(
"Failed to parse and ignored serialized Spark schema in " +
s"Parquet key-value metadata:\\n\\t$schemaString", cause)
Failure(cause)
}.toOption
}
}
|
minixalpha/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala
|
Scala
|
apache-2.0
| 28,500
|
package breeze.stats.distributions
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.scalacheck._
import org.scalatest._
import org.scalatest.funsuite._
import org.scalatestplus.scalacheck._
class PoissonTest extends AnyFunSuite with Checkers with MomentsTestBase[Int] with ExpFamTest[Poisson, Int] {
import org.scalacheck.Arbitrary.arbitrary
val expFam: Poisson.type = Poisson
implicit def arbDistr: Arbitrary[Poisson] = Arbitrary {
for (p <- arbitrary[Double].map { _.abs % 5 + 1 }) yield new Poisson(p)(RandBasis.mt0)
}
def arbParameter = Arbitrary(arbitrary[Double].map(x => math.abs(x) % 5 + 0.5))
def paramsClose(p: Double, b: Double) = if (b == 0.0) p < 1E-4 else (p - b).abs / b.abs.max(1E-4) < 1E-1
def asDouble(x: Int) = x.toDouble
def fromDouble(x: Double) = x.toInt
override val VARIANCE_TOLERANCE: Double = 1E-1
val TOL = 1E-1
test("cdf") {
val mean = 5.0
import breeze.numerics._
val poi = new Poisson(mean)
assert(closeTo(poi.cdf(0), exp(-mean)), poi.cdf(0) + " " + exp(-mean))
}
override type Distr = Poisson
}
|
scalanlp/breeze
|
math/src/test/scala/breeze/stats/distributions/PoissonTest.scala
|
Scala
|
apache-2.0
| 1,621
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.