code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.evenfinancial.sbt.secrets.util
import java.util.Base64
import java.security.MessageDigest
import javax.crypto.Cipher
import javax.crypto.spec.{IvParameterSpec, SecretKeySpec}
// Basically copied from the Play framework's Crypto library.
// @see https://github.com/playframework/playframework/blob/2.4.x/framework/src/play/src/main/scala/play/api/libs/Crypto.scala
object AesUtil {
def encrypt(data: String, dataKey: String): String = {
val secretKey = buildSecretKey(dataKey)
val cipher = buildCipher()
cipher.init(Cipher.ENCRYPT_MODE, secretKey)
val encrypted = cipher.doFinal(data.getBytes("UTF-8"))
val result = cipher.getIV() ++ encrypted
Base64.getEncoder.encodeToString(result)
}
def decrypt(data: String, dataKey: String): String = {
val secretKey = buildSecretKey(dataKey)
val bytes = Base64.getDecoder.decode(data)
val cipher = buildCipher()
val iv = bytes.slice(0, cipher.getBlockSize)
val payload = bytes.slice(cipher.getBlockSize, bytes.size)
cipher.init(Cipher.DECRYPT_MODE, secretKey, new IvParameterSpec(iv))
new String(cipher.doFinal(payload), "utf-8")
}
private def buildCipher() = Cipher.getInstance("AES/CTR/NoPadding")
private def buildSecretKey(dataKey: String) = {
val algorithm = "AES"
val messageDigest = MessageDigest.getInstance("SHA-256")
messageDigest.update(dataKey.getBytes("utf-8"))
// max allowed length in bits / (8 bits to a byte)
val maxAllowedKeyLength = Cipher.getMaxAllowedKeyLength(algorithm) / 8
val raw = messageDigest.digest().slice(0, maxAllowedKeyLength)
new SecretKeySpec(raw, algorithm)
}
}
| EVENFinancial/sbt-secrets | src/main/scala/com/evenfinancial/sbt/secrets/util/AesUtil.scala | Scala | mit | 1,650 |
package scoverage
import org.scalatest.{FreeSpec, Matchers}
class LocationTest extends FreeSpec with Matchers {
"location function" - {
"should correctly process top level types" - {
"for classes" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.test\\nclass Sammy")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.test"
loc.className shouldBe "Sammy"
loc.topLevelClass shouldBe "Sammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"for objects" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.test\\nobject Bammy { def foo = 'boo } ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.test"
loc.className shouldBe "Bammy"
loc.topLevelClass shouldBe "Bammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Object
loc.sourcePath should endWith(".scala")
}
"for traits" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.test\\ntrait Gammy { def goo = 'hoo } ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.test"
loc.className shouldBe "Gammy"
loc.topLevelClass shouldBe "Gammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Trait
loc.sourcePath should endWith(".scala")
}
}
"should correctly process methods" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Hammy { def foo = 'boo } ")
val loc = compiler.locations.result().find(_._2.method == "foo").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Hammy"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"should correctly process nested methods" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Hammy { def foo = { def goo = { getClass; 3 }; goo } } ")
val loc = compiler.locations.result().find(_._2.method == "goo").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Hammy"
loc.topLevelClass shouldBe "Hammy"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"should process anon functions as inside the enclosing method" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Jammy { def moo = { Option(\\"bat\\").map(_.length) } } ")
val loc = compiler.locations.result().find(_._1 == "Function").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Jammy"
loc.method shouldBe "moo"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"should use outer package" - {
"for nested classes" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Jammy { class Pammy } ")
val loc = compiler.locations.result().find(_._2.className == "Pammy").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Pammy"
loc.topLevelClass shouldBe "Jammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"for nested objects" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Jammy { object Zammy } ")
val loc = compiler.locations.result().find(_._2.className == "Zammy").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Zammy"
loc.topLevelClass shouldBe "Jammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Object
loc.sourcePath should endWith(".scala")
}
"for nested traits" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Jammy { trait Mammy } ")
val loc = compiler.locations.result().find(_._2.className == "Mammy").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Mammy"
loc.topLevelClass shouldBe "Jammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Trait
loc.sourcePath should endWith(".scala")
}
}
"should support nested packages" - {
"for classes" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.a \\n " +
"package b \\n" +
"class Kammy ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.a.b"
loc.className shouldBe "Kammy"
loc.topLevelClass shouldBe "Kammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"for objects" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.a \\n " +
"package b \\n" +
"object Kammy ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.a.b"
loc.className shouldBe "Kammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Object
loc.sourcePath should endWith(".scala")
}
"for traits" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.a \\n " +
"package b \\n" +
"trait Kammy ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.a.b"
loc.className shouldBe "Kammy"
loc.topLevelClass shouldBe "Kammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Trait
loc.sourcePath should endWith(".scala")
}
}
"should use <none> method name" - {
"for class constructor body" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.b \\n class Tammy { val name = 'sam } ")
val loc = compiler.locations.result().find(_._1 == "ValDef").get._2
loc.packageName shouldBe "com.b"
loc.className shouldBe "Tammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"for object constructor body" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.b \\n object Yammy { val name = 'sam } ")
val loc = compiler.locations.result().find(_._1 == "ValDef").get._2
loc.packageName shouldBe "com.b"
loc.className shouldBe "Yammy"
loc.topLevelClass shouldBe "Yammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Object
loc.sourcePath should endWith(".scala")
}
"for trait constructor body" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.b \\n trait Wammy { val name = 'sam } ")
val loc = compiler.locations.result().find(_._1 == "ValDef").get._2
loc.packageName shouldBe "com.b"
loc.className shouldBe "Wammy"
loc.topLevelClass shouldBe "Wammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Trait
loc.sourcePath should endWith(".scala")
}
}
"anon class should report enclosing class" in {
val compiler = ScoverageCompiler.locationCompiler
compiler
.compile(
"package com.a; object A { def foo(b : B) : Unit = b.invoke }; trait B { def invoke : Unit }; class C { A.foo(new B { def invoke = () }) }")
println()
println(compiler.locations.result().mkString("\\n"))
val loc = compiler.locations.result().filter(_._1 == "Template").last._2
loc.packageName shouldBe "com.a"
loc.className shouldBe "C"
loc.topLevelClass shouldBe "C"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"anon class implemented method should report enclosing method" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile(
"package com.a; object A { def foo(b : B) : Unit = b.invoke }; trait B { def invoke : Unit }; class C { A.foo(new B { def invoke = () }) }")
println()
println(compiler.locations.result().mkString("\\n"))
val loc = compiler.locations.result().filter(_._1 == "DefDef").last._2
loc.packageName shouldBe "com.a"
loc.className shouldBe "C"
loc.topLevelClass shouldBe "C"
loc.method shouldBe "invoke"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
}
}
| ssidorenko/scalac-scoverage-plugin | scalac-scoverage-plugin/src/test/scala/scoverage/LocationTest.scala | Scala | apache-2.0 | 9,344 |
package com.komanov.serialization.converters
import com.komanov.serialization.converters.api.MyConverter
import com.komanov.serialization.domain.{Site, SiteEvent}
import com.twitter.chill.ScalaKryoInstantiator
/** https://github.com/twitter/chill */
object ChillConverter extends MyConverter {
private val pool = ScalaKryoInstantiator.defaultPool
override def toByteArray(site: Site): Array[Byte] = {
pool.toBytesWithoutClass(site)
}
override def fromByteArray(bytes: Array[Byte]): Site = {
pool.fromBytes(bytes, classOf[Site])
}
override def toByteArray(event: SiteEvent): Array[Byte] = {
pool.toBytesWithoutClass(event)
}
override def siteEventFromByteArray(clazz: Class[_], bytes: Array[Byte]): SiteEvent = {
pool.fromBytes(bytes, clazz).asInstanceOf[SiteEvent]
}
}
| dkomanov/stuff | src/com/komanov/serialization/converters/ChillConverter.scala | Scala | mit | 811 |
package scalaDemo
import scala.io.Source
/**
* Created by liush on 17-7-14.
*/
object TrySuccessFailueTest {
import scala.util.{Try, Success, Failure}
def divideBy(x: Int, y: Int): Try[Int] = {
Try(x / y)
}
def readTextFile(filename: String): Try[List[String]] = {
Try(Source.fromFile(filename).getLines.toList)
}
/**
* Scala2.10提供了Try来更优雅的实现这一功能。对于有可能抛出异常的操作。我们可以使用Try来包裹它,得到Try的子类Success或者Failure,
* 如果计算成功,返回Success的实例,如果抛出异常,返回Failure并携带相关信息。
* @param args
*/
def main(args: Array[String]): Unit = {
println(divideBy(1, 1).getOrElse(0)) // 1
println(divideBy(1, 0).getOrElse(0)) //0
divideBy(1, 1).foreach(println) // 1
divideBy(1, 0).foreach(println) // no print
divideBy(1, 0) match {
case Success(i) => println(s"Success, value is: $i")
case Failure(s) => println(s"Failed, message is: $s")
}
//Failed, message is: java.lang.ArithmeticException: / by zero
val filename = "/etc/passwd"
/**
* 如果该方法返回成功,将打印/etc/passwd文件的内容;
* 如果出现异常,将打印错误信息,java.io.FileNotFoundException: Foo.bar (No such file or directory)
*/
readTextFile(filename) match {
case Success(lines) => lines.foreach(println)
case Failure(f) => println("Failure:"+f)
}
}
}
| tophua/spark1.52 | examples/src/main/scala/scalaDemo/TrySuccessFailueTest.scala | Scala | apache-2.0 | 1,502 |
package scala.meta.tests.semanticdb
import org.scalatest.FunSuite
import scala.meta.internal.semanticdb.Scala._
class SymbolSuite extends FunSuite {
def checkMultiSyntax(symbols: List[String], expected: String): Unit = {
test(" syntax: " + symbols.toString()) {
val obtained = Symbols.Multi(symbols)
assert(obtained == expected)
}
}
def checkMultiRoundtrip(symbols: List[String]): Unit = {
test(" multi: " + symbols.toString) {
val symbol = Symbols.Multi(symbols)
val expected = symbol.asMulti
assert(symbol.asMulti == expected)
}
}
def checkGlobal(symbol: String): Unit = {
test(" global: " + symbol) { assert(symbol.isGlobal) }
}
def checkNotGlobal(symbol: String): Unit = {
test("!global: " + symbol) { assert(!symbol.isGlobal) }
}
def checkLocal(symbol: String): Unit = {
test(" local: " + symbol) { assert(symbol.isLocal) }
}
def checkNotLocal(symbol: String): Unit = {
test(" !local: " + symbol) { assert(!symbol.isLocal) }
}
def check(sym: String)(f: String => Boolean): Unit = {
test(sym) {
assert(f(sym))
}
}
checkMultiSyntax(Nil, "")
checkMultiSyntax("a." :: Nil, "a.")
checkMultiSyntax("a." :: "a." :: Nil, "a.")
checkMultiSyntax("a." :: "b." :: Nil, ";a.;b.")
checkMultiSyntax(";a.;b." :: ";c.;d." :: Nil, ";a.;b.;c.;d.")
checkMultiRoundtrip(Nil)
checkMultiRoundtrip("com/Bar#" :: Nil)
checkMultiRoundtrip("com/Bar#" :: "com.Bar." :: Nil)
checkMultiRoundtrip("com/`; ;`#" :: "com.`; ;`." :: Nil)
checkMultiRoundtrip("a" :: "b" :: "" :: Nil)
checkMultiRoundtrip(";_root_/;_empty_/" :: "_star_." :: Nil)
checkGlobal("com/Bar#")
checkGlobal("com/Bar.")
checkGlobal("com/Bar.(a)")
checkGlobal("com/Bar.[a]")
checkGlobal(Symbols.RootPackage)
checkGlobal(Symbols.EmptyPackage)
checkNotGlobal(";com/Bar#;com/Bar.")
checkNotGlobal("local1")
checkNotGlobal(Symbols.None)
checkLocal("local1")
checkNotLocal(";local1;local2")
checkNotLocal("com/Bar#")
checkNotLocal(";com/Bar#;com/Bar.")
checkNotLocal(Symbols.None)
checkNotLocal(Symbols.RootPackage)
checkNotLocal(Symbols.EmptyPackage)
check("com/Predef.")(_.isTerm)
check("com/Class#")(_.isType)
check("com/")(_.isPackage)
check(";com/;org/")(!_.isPackage)
check("com/Class#(a)")(_.isParameter)
check("com/Class#[A]")(_.isTypeParameter)
}
| olafurpg/scalameta | tests/jvm/src/test/scala/scala/meta/tests/semanticdb/SymbolSuite.scala | Scala | bsd-3-clause | 2,379 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rpc.netty
import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap, LinkedBlockingQueue, ThreadPoolExecutor, TimeUnit}
import javax.annotation.concurrent.GuardedBy
import scala.collection.JavaConverters._
import scala.concurrent.Promise
import scala.util.control.NonFatal
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.network.client.RpcResponseCallback
import org.apache.spark.rpc._
import org.apache.spark.util.ThreadUtils
/**
* A message dispatcher, responsible for routing RPC messages to the appropriate endpoint(s).
*/
// NettyRpcEnv中包含Dispatcher,主要针对服务端,帮助路由到正确的RpcEndpoint,并且调用其业务逻辑。
private[netty] class Dispatcher(nettyEnv: NettyRpcEnv) extends Logging {
private class EndpointData(
val name: String,
val endpoint: RpcEndpoint,
val ref: NettyRpcEndpointRef) {
val inbox = new Inbox(ref, endpoint)
}
private val endpoints: ConcurrentMap[String, EndpointData] =
new ConcurrentHashMap[String, EndpointData]
private val endpointRefs: ConcurrentMap[RpcEndpoint, RpcEndpointRef] =
new ConcurrentHashMap[RpcEndpoint, RpcEndpointRef]
// Track the receivers whose inboxes may contain messages.
private val receivers = new LinkedBlockingQueue[EndpointData]
/**
* True if the dispatcher has been stopped. Once stopped, all messages posted will be bounced
* immediately.
*/
@GuardedBy("this")
private var stopped = false
def registerRpcEndpoint(name: String, endpoint: RpcEndpoint): NettyRpcEndpointRef = {
val addr = RpcEndpointAddress(nettyEnv.address, name)
val endpointRef = new NettyRpcEndpointRef(nettyEnv.conf, addr, nettyEnv)
synchronized {
if (stopped) {
throw new IllegalStateException("RpcEnv has been stopped")
}
if (endpoints.putIfAbsent(name, new EndpointData(name, endpoint, endpointRef)) != null) {
throw new IllegalArgumentException(s"There is already an RpcEndpoint called $name")
}
val data = endpoints.get(name)
endpointRefs.put(data.endpoint, data.ref)
receivers.offer(data) // for the OnStart message
}
endpointRef
}
def getRpcEndpointRef(endpoint: RpcEndpoint): RpcEndpointRef = endpointRefs.get(endpoint)
def removeRpcEndpointRef(endpoint: RpcEndpoint): Unit = endpointRefs.remove(endpoint)
// Should be idempotent
private def unregisterRpcEndpoint(name: String): Unit = {
val data = endpoints.remove(name)
if (data != null) {
data.inbox.stop()
receivers.offer(data) // for the OnStop message
}
// Don't clean `endpointRefs` here because it's possible that some messages are being processed
// now and they can use `getRpcEndpointRef`. So `endpointRefs` will be cleaned in Inbox via
// `removeRpcEndpointRef`.
}
def stop(rpcEndpointRef: RpcEndpointRef): Unit = {
synchronized {
if (stopped) {
// This endpoint will be stopped by Dispatcher.stop() method.
return
}
unregisterRpcEndpoint(rpcEndpointRef.name)
}
}
/**
* Send a message to all registered [[RpcEndpoint]]s in this process.
*
* This can be used to make network events known to all end points (e.g. "a new node connected").
*/
def postToAll(message: InboxMessage): Unit = {
val iter = endpoints.keySet().iterator()
while (iter.hasNext) {
val name = iter.next
postMessage(name, message, (e) => logWarning(s"Message $message dropped. ${e.getMessage}"))
}
}
/** Posts a message sent by a remote endpoint. */
def postRemoteMessage(message: RequestMessage, callback: RpcResponseCallback): Unit = {
val rpcCallContext =
new RemoteNettyRpcCallContext(nettyEnv, callback, message.senderAddress)
val rpcMessage = RpcMessage(message.senderAddress, message.content, rpcCallContext)
postMessage(message.receiver.name, rpcMessage, (e) => callback.onFailure(e))
}
/** Posts a message sent by a local endpoint. */
def postLocalMessage(message: RequestMessage, p: Promise[Any]): Unit = {
val rpcCallContext =
new LocalNettyRpcCallContext(message.senderAddress, p)
val rpcMessage = RpcMessage(message.senderAddress, message.content, rpcCallContext)
postMessage(message.receiver.name, rpcMessage, (e) => p.tryFailure(e))
}
/** Posts a one-way message. */
def postOneWayMessage(message: RequestMessage): Unit = {
postMessage(message.receiver.name, OneWayMessage(message.senderAddress, message.content),
(e) => throw e)
}
/**
* Posts a message to a specific endpoint.
*
* @param endpointName name of the endpoint.
* @param message the message to post
* @param callbackIfStopped callback function if the endpoint is stopped.
*/
private def postMessage(
endpointName: String,
message: InboxMessage,
callbackIfStopped: (Exception) => Unit): Unit = {
val error = synchronized {
val data = endpoints.get(endpointName)
if (stopped) {
Some(new RpcEnvStoppedException())
} else if (data == null) {
Some(new SparkException(s"Could not find $endpointName."))
} else {
data.inbox.post(message)
receivers.offer(data)
None
}
}
// We don't need to call `onStop` in the `synchronized` block
error.foreach(callbackIfStopped)
}
def stop(): Unit = {
synchronized {
if (stopped) {
return
}
stopped = true
}
// Stop all endpoints. This will queue all endpoints for processing by the message loops.
endpoints.keySet().asScala.foreach(unregisterRpcEndpoint)
// Enqueue a message that tells the message loops to stop.
receivers.offer(PoisonPill)
threadpool.shutdown()
}
def awaitTermination(): Unit = {
threadpool.awaitTermination(Long.MaxValue, TimeUnit.MILLISECONDS)
}
/**
* Return if the endpoint exists
*/
def verify(name: String): Boolean = {
endpoints.containsKey(name)
}
/** Thread pool used for dispatching messages. */
private val threadpool: ThreadPoolExecutor = {
val numThreads = nettyEnv.conf.getInt("spark.rpc.netty.dispatcher.numThreads",
math.max(2, Runtime.getRuntime.availableProcessors()))
val pool = ThreadUtils.newDaemonFixedThreadPool(numThreads, "dispatcher-event-loop")
for (i <- 0 until numThreads) {
pool.execute(new MessageLoop)
}
pool
}
/** Message loop used for dispatching messages. */
// 读取LinkedBlockingQueue中的投递RpcMessage,根据客户端指定的Endpoint标识,找到Endpoint的Inbox,
// 然后投递进去,由于是阻塞队列,当没有消息的时候自然阻塞,一旦有消息,就开始工作。Dispatcher的ThreadPool负责消费这些Message。
private class MessageLoop extends Runnable {
override def run(): Unit = {
try {
while (true) {
try {
val data = receivers.take()
if (data == PoisonPill) {
// Put PoisonPill back so that other MessageLoops can see it.
receivers.offer(PoisonPill)
return
}
data.inbox.process(Dispatcher.this)
} catch {
case NonFatal(e) => logError(e.getMessage, e)
}
}
} catch {
case ie: InterruptedException => // exit
}
}
}
/** A poison endpoint that indicates MessageLoop should exit its message loop. */
private val PoisonPill = new EndpointData(null, null, null)
}
| spark0001/spark2.1.1 | core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala | Scala | apache-2.0 | 8,360 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.message
import java.util.Arrays
import junit.framework.Assert._
import kafka.utils.TestUtils._
import org.scalatest.junit.JUnitSuite
import org.junit.Test
trait BaseMessageSetTestCases extends JUnitSuite {
val messages = Array(new Message("abcd".getBytes()), new Message("efgh".getBytes()))
def createMessageSet(messages: Seq[Message]): MessageSet
@Test
def testWrittenEqualsRead {
val messageSet = createMessageSet(messages)
checkEquals(messages.iterator, messageSet.iterator)
}
@Test
def testIteratorIsConsistent() {
val m = createMessageSet(messages)
// two iterators over the same set should give the same results
checkEquals(m.iterator, m.iterator)
}
@Test
def testSizeInBytes() {
assertEquals("Empty message set should have 0 bytes.",
0L,
createMessageSet(Array[Message]()).sizeInBytes)
assertEquals("Predicted size should equal actual size.",
MessageSet.messageSetSize(messages).toLong,
createMessageSet(messages).sizeInBytes)
}
@Test
def testWriteTo() {
// test empty message set
testWriteToWithMessageSet(createMessageSet(Array[Message]()))
testWriteToWithMessageSet(createMessageSet(messages))
}
def testWriteToWithMessageSet(set: MessageSet) {
val channel = tempChannel()
val written = set.writeTo(channel, 0, 1024)
assertEquals("Expect to write the number of bytes in the set.", set.sizeInBytes, written)
val newSet = new FileMessageSet(channel, false)
checkEquals(set.iterator, newSet.iterator)
}
}
| quipo/kafka | core/src/test/scala/unit/kafka/message/BaseMessageSetTestCases.scala | Scala | apache-2.0 | 2,198 |
package issue347
import org.joda.time.DateTime
import org.scalatest._
import scalikejdbc._
import scalikejdbc.scalatest.AutoRollback
import skinny.dbmigration.DBSeeds
import skinny.orm._
trait Connection {
Class.forName("org.h2.Driver")
ConnectionPool.add('issue347, "jdbc:h2:mem:issue347;MODE=PostgreSQL", "sa", "sa")
}
trait CreateTables extends DBSeeds { self: Connection =>
override val dbSeedsAutoSession = NamedAutoSession('issue347)
addSeedSQL(
sql"""
create table user (
user_id bigserial not null,
name varchar(100) not null,
created_at timestamp not null default current_timestamp)
"""
)
addSeedSQL(
sql"""
create table article (
id bigserial not null,
title varchar(100) not null,
user_id bigint references user(user_id))
"""
)
runIfFailed(sql"select count(1) from article")
}
class Issue347Spec extends fixture.FunSpec with Matchers with Connection with CreateTables with AutoRollback {
case class User(userId: Long, name: String, createdAt: DateTime, articles: Seq[Article] = Nil)
case class Article(id: Long, title: String, userId: Option[Long], user: Option[User] = None)
object User extends SkinnyCRUDMapper[User] {
override val connectionPoolName = 'issue347
override val primaryKeyFieldName = "userId"
override def defaultAlias = createAlias("u")
lazy val articlesRef = hasMany[Article](
many = Article -> Article.defaultAlias,
on = (u, a) => sqls.eq(u.userId, a.userId),
merge = (u, as) => u.copy(articles = as)
).includes[Article](
merge = { (users, articles) =>
users.map { user =>
user.copy(articles = articles.filter(_.userId.exists(_ == user.userId)))
}
}
)
override def extract(rs: WrappedResultSet, rn: ResultName[User]) = autoConstruct(rs, rn, "articles")
}
object Article extends SkinnyCRUDMapper[Article] {
override val connectionPoolName = 'issue347
override def defaultAlias = createAlias("a")
override def extract(rs: WrappedResultSet, rn: ResultName[Article]) = autoConstruct(rs, rn, "user")
lazy val userRef = belongsTo[User](
right = User,
merge = (a, u) => a.copy(user = u)
)
}
override def db(): DB = NamedDB('issue347).toDB()
override def fixture(implicit session: DBSession): Unit = {
val aliceId = User.createWithAttributes('name -> "Alice")
val bobId = User.createWithAttributes('name -> "Bob") // Scala
val chrisId = User.createWithAttributes('name -> "Chris") // Scala
val denId = User.createWithAttributes('name -> "Den")
val ericId = User.createWithAttributes('name -> "Eric") // Scala
val fredId = User.createWithAttributes('name -> "Fred")
val titleAndUser = Seq(
("Hello World", Some(aliceId)),
("Getting Started with Scala", Some(bobId)), // Scala
("Functional Programming in Scala", None), // Scala
("Beginning Ruby", Some(aliceId)),
("Beginning Scala", Some(chrisId)), // Scala
("Beginning Ruby", Some(denId)),
("Hello Scala", Some(ericId)), // Scala
("Bob's Scala Lesson 1", Some(bobId)), // Scala
("Functional Programming in Java", Some(fredId)),
("Beginning Ruby", Some(fredId)),
("Scalaz Usage", Some(chrisId)), // Scala
("The Better Java?", Some(bobId)),
("How to user sbt", Some(aliceId))
)
titleAndUser.foreach {
case (title, userId) =>
Article.createWithAttributes('title -> title, 'userId -> userId)
}
}
describe("joins/includes") {
it("should return expected results when joins / includes") { implicit session =>
val users1 = User.joins(User.articlesRef).findAll()
val users2 = User.includes(User.articlesRef).findAll()
println(users1)
println(users2)
users1 should equal(users2)
}
}
}
| seratch/skinny-framework | orm/src/test/scala/issue346/Issue347Spec.scala | Scala | mit | 3,901 |
package evaluation
import breeze.linalg.DenseVector
import org.scalatest.FunSuite
import org.apache.spark.SparkContext
import utils.Stats
import workflow.PipelineContext
class MeanAveragePrecisionSuite extends FunSuite with PipelineContext {
test("random map test") {
sc = new SparkContext("local", "test")
// Build some random test data with 4 classes 0,1,2,3
val actual = List(Array(0, 3), Array(2), Array(1, 2), Array(0))
val actualRdd = sc.parallelize(actual)
val predicted = List(
DenseVector(0.1, -0.05, 0.12, 0.5),
DenseVector(-0.23, -0.45, 0.23, 0.1),
DenseVector(-0.34, -0.32, -0.66, 1.52),
DenseVector(-0.1, -0.2, 0.5, 0.8))
val predictedRdd = sc.parallelize(predicted)
val map = MeanAveragePrecisionEvaluator.apply(actualRdd, predictedRdd, 4)
// Expected values from running this in MATLAB
val expected = DenseVector(1.0, 0.3333, 0.5, 0.3333)
assert(Stats.aboutEq(map, expected, 1e-4))
}
}
| tomerk/keystone | src/test/scala/evaluation/MeanAveragePrecisionSuite.scala | Scala | apache-2.0 | 978 |
package mesosphere.marathon
package api.v2
import java.util
import javax.inject.Inject
import javax.servlet.http.HttpServletRequest
import javax.ws.rs._
import javax.ws.rs.core.{ Context, MediaType, Response }
import mesosphere.marathon.api.EndpointsHelper.ListTasks
import mesosphere.marathon.api.{ EndpointsHelper, MarathonMediaType, TaskKiller, _ }
import mesosphere.marathon.core.appinfo.EnrichedTask
import mesosphere.marathon.core.condition.Condition
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.core.health.{ Health, HealthCheckManager }
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.instance.Instance.Id
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.task.tracker.InstanceTracker
import mesosphere.marathon.plugin.auth.{ Authenticator, Authorizer, UpdateRunSpec, ViewRunSpec }
import mesosphere.marathon.raml.AnyToRaml
import mesosphere.marathon.raml.Task._
import mesosphere.marathon.raml.TaskConversion._
import mesosphere.marathon.state.PathId
import mesosphere.marathon.stream.Implicits._
import org.slf4j.LoggerFactory
import play.api.libs.json.Json
import scala.async.Async._
import scala.concurrent.{ ExecutionContext, Future }
@Path("v2/tasks")
class TasksResource @Inject() (
instanceTracker: InstanceTracker,
taskKiller: TaskKiller,
val config: MarathonConf,
groupManager: GroupManager,
healthCheckManager: HealthCheckManager,
val authenticator: Authenticator,
val authorizer: Authorizer) extends AuthResource {
val log = LoggerFactory.getLogger(getClass.getName)
implicit val ec = ExecutionContext.Implicits.global
@GET
@Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON))
@SuppressWarnings(Array("all")) /* async/await */
def indexJson(
@QueryParam("status") status: String,
@QueryParam("status[]") statuses: util.List[String],
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
Option(status).map(statuses.add)
val conditionSet: Set[Condition] = statuses.flatMap(toTaskState)(collection.breakOut)
val futureEnrichedTasks = async {
val instancesBySpec = await(instanceTracker.instancesBySpec)
val appIds: Set[PathId] = instancesBySpec.allSpecIdsWithInstances
val appIdsToApps = groupManager.apps(appIds)
val appToPorts: Map[PathId, Seq[Int]] = appIdsToApps.map {
case (appId, app) => appId -> app.map(_.servicePorts).getOrElse(Nil)
}
val health = await(
Future.sequence(appIds.map { appId =>
healthCheckManager.statuses(appId)
})).foldLeft(Map[Id, Seq[Health]]())(_ ++ _)
val enrichedTasks: Iterable[Iterable[EnrichedTask]] = for {
(appId, instances) <- instancesBySpec.instancesMap
instance <- instances.instances
app <- appIdsToApps(appId)
if (isAuthorized(ViewRunSpec, app) && (conditionSet.isEmpty || conditionSet(instance.state.condition)))
tasks = instance.tasksMap.values
} yield {
tasks.map { task =>
EnrichedTask(instance, task, health.getOrElse(instance.instanceId, Nil),
appToPorts.getOrElse(appId, Nil))
}
}
enrichedTasks.flatten
}
val enrichedTasks: Iterable[EnrichedTask] = result(futureEnrichedTasks)
ok(jsonObjString(
"tasks" -> enrichedTasks.toIndexedSeq.toRaml
))
}
@GET
@Produces(Array(MediaType.TEXT_PLAIN))
@SuppressWarnings(Array("all")) /* async/await */
def indexTxt(@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
result(async {
val instancesBySpec = await(instanceTracker.instancesBySpec)
val rootGroup = groupManager.rootGroup()
val appsToEndpointString = EndpointsHelper.appsToEndpointString(
ListTasks(instancesBySpec, rootGroup.transitiveApps.filterAs(app => isAuthorized(ViewRunSpec, app))(collection.breakOut))
)
ok(appsToEndpointString)
})
}
@POST
@Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON))
@Consumes(Array(MediaType.APPLICATION_JSON))
@Path("delete")
@SuppressWarnings(Array("all")) /* async/await */
def killTasks(
@QueryParam("scale")@DefaultValue("false") scale: Boolean,
@QueryParam("force")@DefaultValue("false") force: Boolean,
@QueryParam("wipe")@DefaultValue("false") wipe: Boolean,
body: Array[Byte],
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
if (scale && wipe) throw new BadRequestException("You cannot use scale and wipe at the same time.")
val taskIds = (Json.parse(body) \\ "ids").as[Set[String]]
val tasksIdToAppId: Map[Instance.Id, PathId] = taskIds.map { id =>
try {
val taskId = Task.Id(id)
taskId.instanceId -> taskId.instanceId.runSpecId
} catch { case e: MatchError => throw new BadRequestException(s"Invalid task id '$id'. [${e.getMessage}]") }
}(collection.breakOut)
def scaleAppWithKill(toKill: Map[PathId, Seq[Instance]]): Future[Response] = async {
val killAndScale = await(taskKiller.killAndScale(toKill, force))
deploymentResult(killAndScale)
}
def doKillTasks(toKill: Map[PathId, Seq[Instance]]): Future[Response] = async {
val affectedApps = tasksIdToAppId.values.flatMap(appId => groupManager.app(appId))(collection.breakOut)
// FIXME (gkleiman): taskKiller.kill a few lines below also checks authorization, but we need to check ALL before
// starting to kill tasks
affectedApps.foreach(checkAuthorization(UpdateRunSpec, _))
val killed = await(Future.sequence(toKill
.filter { case (appId, _) => affectedApps.exists(app => app.id == appId) }
.map {
case (appId, instances) => taskKiller.kill(appId, _ => instances, wipe)
})).flatten
ok(jsonObjString("tasks" -> killed.flatMap { instance =>
instance.tasksMap.valuesIterator.map { task =>
EnrichedTask(instance, task, Nil).toRaml
}
}))
}
val futureResponse = async {
val maybeInstances: Iterable[Option[Instance]] = await(Future.sequence(tasksIdToAppId.view
.map { case (taskId, _) => instanceTracker.instancesBySpec.map(_.instance(taskId)) }))
val tasksByAppId: Map[PathId, Seq[Instance]] = maybeInstances.flatten
.groupBy(instance => instance.instanceId.runSpecId)
.map { case (appId, instances) => appId -> instances.to[Seq] }(collection.breakOut)
val response =
if (scale) scaleAppWithKill(tasksByAppId)
else doKillTasks(tasksByAppId)
await(response)
}
result(futureResponse)
}
private def toTaskState(state: String): Option[Condition] = state.toLowerCase match {
case "running" => Some(Condition.Running)
case "staging" => Some(Condition.Staging)
case _ => None
}
}
| guenter/marathon | src/main/scala/mesosphere/marathon/api/v2/TasksResource.scala | Scala | apache-2.0 | 6,888 |
case object A { override def toString = ??? }
object Test {
def foo: Int = (A: Any) match {
case 0 => 0
case x => throw new MatchError(x)
}
def main(args: Array[String]): Unit = {
try {
foo
sys.error("no exception")
} catch {
case me: MatchError => assert(me.getMessage == "an instance of class A$", me.getMessage)
case ex: Throwable => sys.error("not a match error: " + ex.getClass)
}
}
}
| lrytz/scala | test/files/run/t7912.scala | Scala | apache-2.0 | 443 |
package com.softwaremill.codebrag.domain
case class RepositoryStatus(repositoryName: String, ready: Boolean, error: Option[String])
object RepositoryStatus extends ((String, Boolean, Option[String]) => RepositoryStatus) {
def ready(repoName: String) = new RepositoryStatus(repoName, true, None)
def notReady(repoName: String, error: Option[String] = None) = new RepositoryStatus(repoName, false, error)
}
| frodejohansen/codebrag | codebrag-domain/src/main/scala/com/softwaremill/codebrag/domain/RepositoryStatus.scala | Scala | agpl-3.0 | 413 |
package com.gravity.goose.utils
import org.junit.Test
import org.junit.Assert._
import com.gravity.goose.text.StopWords
/**
* Created by Jim Plush
* User: jim
* Date: 8/16/11
*/
class FileHelperTest {
@Test
def loadFileContents() {
println("loading test")
val txt = FileHelper.loadResourceFile("stopwords-en.txt", StopWords.getClass)
assertTrue(txt.startsWith("a's"))
}
} | njosephef/extractor | src/test/scala/com/gravity/goose/utils/FileHelperTest.scala | Scala | apache-2.0 | 394 |
/*
* Copyright (C) 2014-2015 by Nokia.
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package wookie.yql.places
import wookie.collector.cli.{RealConfig, KafkaPusherApp}
import wookie.yql.places.PlaceCodecs._
object PlaceCollector extends KafkaPusherApp[List[Place]](RealConfig(_))
| elyast/wookie | examples/src/main/scala/wookie/yql/places/PlaceCollector.scala | Scala | apache-2.0 | 914 |
package com.github.rlishtaba.commutable.layers
import scala.concurrent.Future
trait OSILayer[L, U] {
private var upperLayer = None: Option[OSILayer[U, _]]
private var lowerLayer = None: Option[OSILayer[_, L]]
protected def swim(chunk: U): Unit = upperLayer match {
case Some(upper) => upper.receive(chunk)
case None => throw new UnsupportedOperationException("There is no upper layer linked with.")
}
protected def sink(chunk: L): Future[L] = lowerLayer match {
case Some(lower) => lower.pushDown(chunk)
case None => Future.failed(new UnsupportedOperationException("There is no lower layer linked with."))
}
def linkWith[A](upper: OSILayer[U, A]) = {
upperLayer = Some(upper)
upper.lowerLayer = Some(this)
upper
}
protected def receive(chunk: L): Unit
protected def pushDown(chunk: U): Future[U]
}
| rlishtaba/scala-commutable | src/main/scala/com/github/rlishtaba/commutable/layers/OSILayer.scala | Scala | mit | 853 |
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.microservice.config
import org.scalatest.{LoneElement, Matchers, WordSpecLike}
import play.api.test.{FakeRequest, WithApplication}
import uk.gov.hmrc.http.HeaderCarrier
import uk.gov.hmrc.play.audit.http.HeaderFieldsExtractor
class HttpAuditEventSpec extends WordSpecLike with Matchers with LoneElement {
"The optional audit fields code" should {
"Return the correct size map when fed with a given amount of items" in {
val optionalFields =
HeaderFieldsExtractor.optionalAuditFields(Map("Foo" -> "Bar", "Ehh" -> "Meh", "Surrogate" -> "Cool"))
optionalFields.loneElement shouldBe ("surrogate" -> "Cool")
}
"Return the correct size map when fed with two identicle items" in {
val optionalFields = HeaderFieldsExtractor.optionalAuditFields(
Map("Foo" -> "Bar", "Ehh" -> "Meh", "Surrogate" -> "Cool", "Surrogate" -> "Cool"))
optionalFields.loneElement shouldBe ("surrogate" -> "Cool")
}
"Return the correct size map when fed with seq values" in {
val optionalFields = HeaderFieldsExtractor.optionalAuditFieldsSeq(
Map(
"Foo" -> Seq("Bar"),
"Ehh" -> Seq("Meh"),
"Surrogate" -> Seq("Cool"),
"Surrogate" -> Seq("Cool", "funk", "grr")))
optionalFields.loneElement shouldBe ("surrogate" -> "Cool,funk,grr")
}
"Return the correct size map when fed with no items" in {
val optionalFields = HeaderFieldsExtractor.optionalAuditFields(Map("Foo" -> "Bar", "Ehh" -> "Meh"))
optionalFields shouldBe empty
}
}
"The code to generate an audit event" should {
implicit val hc = new HeaderCarrier()
object HttpAuditEventForTest extends HttpAuditEvent {
override def appName: String = "my-test-app"
}
"create a valid audit event with optional headers" in new WithApplication {
val r =
FakeRequest().withHeaders(("Foo" -> "Bar"), ("Ehh" -> "Meh"), ("Surrogate" -> "Cool"), ("Surrogate" -> "Cool"))
val event = HttpAuditEventForTest.dataEvent("foo", "bar", r)
event.detail.get("surrogate") shouldBe Some("Cool,Cool") //FRIC - play 2.5 now comman delimits multiple headers with the same name into a single header
}
"create a valid audit event with no optional headers" in new WithApplication {
val r = FakeRequest().withHeaders(("Foo" -> "Bar"), ("Ehh" -> "Meh"))
val event = HttpAuditEventForTest.dataEvent("foo", "bar", r)
event.detail.get("surrogate") shouldBe None
}
// "Include the authorisation, token and ip address in the audit messages" in new WithApplication {
// val request = FakeRequest("GET", "/foo")
//
// implicit val hcWithoutSessionData = new HeaderCarrier()
//
// val event: DataEvent = AuditFilter.buildAuditRequestEvent(EventTypes.ServiceSentResponse, request, "")
// event.detail should contain ("Authorization" -> "-")
// event.detail should contain ("token" -> "-")
// event.detail.keySet should contain ("ipAddress")
// }
}
}
| hmrc/microservice-bootstrap | src/test/scala/uk/gov/hmrc/play/microservice/config/HttpAuditEventSpec.scala | Scala | apache-2.0 | 3,664 |
package ru.imho.dddmt.std
import ru.imho.dddmt.core.Base._
import ru.imho.dddmt.core.BaseConfig._
import java.net.URI
import java.io.File
import StandardNodeStateTypes.MTime
object StandardTechnologies {
object LocalFileSystem extends NodeTechnologyType {
val id = "localFileSystem"
def newInstance(config: Configuration): NodeTechnology = new NodeTechnology {
val root = config.getString("root")
val idURI = new URI("local", root, null)
val id = idURI.toString()
val rootF = new File(root)
def mtime(f: File) = if (f.exists())
MTime.fromLongTime(f.lastModified())
else
MTime.nonexistent
def file(uri: URI) = new File(rootF, uri.getSchemeSpecificPart())
def newNodeStateResolver(nst: NodeStateType): URINodeStateResolver = nst match {
case `MTime` =>
_.map(uri => mtime(file(uri)))
case _ =>
throw new IllegalArgumentException(s"LocalFileSystem does not support `${nst.id}`")
}
def newNodeStateAggregateResolver(nst: NodeStateType): URINodeStateAResolver = nst match {
case `MTime` =>
uri => {
val f = file(uri)
if(f.exists())
Some(MTime.fromLongTimeA(f.lastModified()))
else None
}
case _ =>
throw new IllegalArgumentException(s"LocalFileSystem does not support `${nst.id}`")
}
}
}
def asList: List[NodeTechnologyType] = LocalFileSystem :: Nil
} | IMHOVi/dddmt | dddmt-engine/src/main/scala/ru/imho/dddmt/std/StandardTechnologies.scala | Scala | apache-2.0 | 1,517 |
package is.hail.expr.ir
import is.hail.{ExecStrategy, HailSuite}
import is.hail.TestUtils._
import is.hail.types.virtual.TInt32
import org.testng.annotations.Test
class StringLengthSuite extends HailSuite {
implicit val execStrats = ExecStrategy.javaOnly
@Test def sameAsJavaStringLength() {
val strings = Array("abc", "", "\\uD83D\\uDCA9")
for (s <- strings) {
assertEvalsTo(invoke("length", TInt32, Str(s)), s.length)
}
}
}
| hail-is/hail | hail/src/test/scala/is/hail/expr/ir/StringLengthSuite.scala | Scala | mit | 451 |
package com.twitter.finatra.kafkastreams.internal.utils.sampling
object IndexedSampleKey {
def rangeStart[SampleKey](sampleKey: SampleKey): IndexedSampleKey[SampleKey] = {
IndexedSampleKey(sampleKey, 0)
}
def rangeEnd[SampleKey](sampleKey: SampleKey): IndexedSampleKey[SampleKey] = {
IndexedSampleKey(sampleKey, Int.MaxValue)
}
}
/**
* The key in a sample KeyValue store. Each sample is stored as a row in the table,
* and the index is what makes each row unique. The index is a number of 0..sampleSize
*
* @param sampleKey the user specified key of the sample(e.g. engagement type, or audience)
* @param index a number of 0..sampleSize
* @tparam SampleKey the user specified sample key type.
*/
case class IndexedSampleKey[SampleKey](sampleKey: SampleKey, index: Int)
| twitter/finatra | kafka-streams/kafka-streams/src/main/scala/com/twitter/finatra/kafkastreams/internal/utils/sampling/IndexedSampleKey.scala | Scala | apache-2.0 | 797 |
package au.gov.dva.sopapi.tests.parsers
import au.gov.dva.sopapi.sopref.data.Conversions
import au.gov.dva.sopapi.sopref.parsing.implementations.cleansers.{GenericClenser, PostAug2015Clenser}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import scala.util.Properties
;
@RunWith(classOf[JUnitRunner])
class CleanserTests extends FunSuite{
test("Cleanse LS raw text") {
val rawText = ParserTestUtils.resourceToString("lsConvertedToText.txt");
val result = GenericClenser.clense(rawText)
assert(result.length() > 0)
System.out.println("START:")
System.out.print(result)
System.out.println("END")
}
test("Reinstate exponents")
{
val input = "4G = 4 x 9.81m/s\\n2"
val replaced = """m/s[\\r\\n]+2""".r.replaceAllIn(input,"m/s\\u00B2")
println(replaced)
}
}
| govlawtech/dva-sop-api | app/src/test/scala/CleanserTests.scala | Scala | apache-2.0 | 856 |
package scorex.transaction.state.database.blockchain
import java.io.{DataInput, DataOutput, File}
import org.mapdb.{DB, DBMaker, DataIO, Serializer}
import scorex.account.Account
import scorex.block.Block
import scorex.transaction.LagonakiTransaction
import scorex.transaction.state.LagonakiState
import scorex.utils.ScorexLogging
/** Store current balances only, and balances changes within effective balance depth.
* Store transactions for selected accounts only.
* If no datafolder provided, blockchain lives in RAM (intended for tests only)
*/
class StoredState(dataFolderOpt: Option[String]) extends LagonakiState with ScorexLogging {
private object AccSerializer extends Serializer[Account] {
override def serialize(dataOutput: DataOutput, a: Account): Unit =
Serializer.STRING.serialize(dataOutput, a.address)
override def deserialize(dataInput: DataInput, i: Int): Account = {
val address = Serializer.STRING.deserialize(dataInput, i)
new Account(address)
}
}
private object TxArraySerializer extends Serializer[Array[LagonakiTransaction]] {
override def serialize(dataOutput: DataOutput, txs: Array[LagonakiTransaction]): Unit = {
DataIO.packInt(dataOutput, txs.length)
txs.foreach { tx =>
val bytes = tx.bytes()
DataIO.packInt(dataOutput, bytes.length)
dataOutput.write(bytes)
}
}
override def deserialize(dataInput: DataInput, i: Int): Array[LagonakiTransaction] = {
val txsCount = DataIO.unpackInt(dataInput)
(1 to txsCount).toArray.map { _ =>
val txSize = DataIO.unpackInt(dataInput)
val b = new Array[Byte](txSize)
dataInput.readFully(b)
LagonakiTransaction.parse(b)
}
}
}
private val database: DB = dataFolderOpt match {
case Some(dataFolder) =>
val db = DBMaker.fileDB(new File(dataFolder + s"/state"))
.closeOnJvmShutdown()
.cacheSize(2048)
.checksumEnable()
.fileMmapEnable()
.make()
db.rollback() //clear uncommited data from possibly invalid last run
db
case None => DBMaker.memoryDB().make()
}
private val StateHeight = "height"
private val balances = database.hashMap[Account, Long]("balances")
private val accountTransactions = database.hashMap(
"watchedTxs",
AccSerializer,
TxArraySerializer,
null)
def setStateHeight(height: Int): Unit = database.atomicInteger(StateHeight).set(height)
def stateHeight(): Int = database.atomicInteger(StateHeight).get()
def processBlock(block: Block): Unit = processBlock(block, reversal = false)
override def processBlock(block: Block, reversal: Boolean): Unit = {
val balanceChanges = block.transactionModule.transactions(block)
.foldLeft(block.consensusModule.feesDistribution(block)) { case (changes, atx) => atx match {
case tx: LagonakiTransaction =>
tx.balanceChanges().foldLeft(changes) { case (iChanges, (acc, delta)) =>
//check whether account is watched, add tx to its txs list if so
val prevTxs = accountTransactions.getOrDefault(acc, Array())
accountTransactions.put(acc, Array.concat(Array(tx), prevTxs))
//update balances sheet
val currentChange = iChanges.getOrElse(acc, 0L)
val newChange = currentChange + delta
iChanges.updated(acc, newChange)
}
case _ =>
log.error("Wrong transaction type in pattern-matching")
changes
}}
balanceChanges.foreach { case (acc, delta) =>
val balance = Option(balances.get(acc)).getOrElse(0L)
val newBalance = if (!reversal) balance + delta else balance - delta
balances.put(acc, newBalance)
}
val newHeight = (if (!reversal) stateHeight() + 1 else stateHeight() - 1).ensuring(_ > 0)
setStateHeight(newHeight)
database.commit()
}
def appendBlock(block: Block): Unit = processBlock(block, reversal = false)
def discardBlock(block: Block): Unit = processBlock(block, reversal = true)
//todo: confirmations
override def balance(address: String, confirmations: Int): Long = {
val acc = new Account(address)
val balance = Option(balances.get(acc)).getOrElse(0L)
balance
}
override def accountTransactions(account: Account): Array[LagonakiTransaction] =
Option(accountTransactions.get(account)).getOrElse(Array())
override def stopWatchingAccountTransactions(account: Account): Unit = accountTransactions.remove(account)
override def watchAccountTransactions(account: Account): Unit = accountTransactions.put(account, Array())
//initialization
setStateHeight(0)
//for debugging purposes only
override def toString = {
import scala.collection.JavaConversions._
balances.mkString("\n")
}
}
| benjyz/Scorex-Lagonaki | scorex-transaction/src/main/scala/scorex/transaction/state/database/blockchain/StoredState.scala | Scala | cc0-1.0 | 4,818 |
package com.nulabinc.backlog.migration.common.service
import java.lang.Thread.sleep
import javax.inject.Inject
import com.nulabinc.backlog.migration.common.client.BacklogAPIClient
import com.nulabinc.backlog.migration.common.convert.Convert
import com.nulabinc.backlog.migration.common.convert.writes.VersionWrites
import com.nulabinc.backlog.migration.common.domain.{BacklogProjectKey, BacklogVersion}
import com.nulabinc.backlog.migration.common.utils.Logging
import com.nulabinc.backlog4j.api.option.{AddVersionParams, UpdateVersionParams}
import scala.jdk.CollectionConverters._
/**
* @author
* uchida
*/
class VersionServiceImpl @Inject() (implicit
val versionWrites: VersionWrites,
projectKey: BacklogProjectKey,
backlog: BacklogAPIClient
) extends VersionService
with Logging {
override def allVersions(): Seq[BacklogVersion] =
backlog.getVersions(projectKey.value).asScala.toSeq.map(Convert.toBacklog(_))
override def add(backlogVersion: BacklogVersion): Option[BacklogVersion] = {
val params = new AddVersionParams(projectKey.value, backlogVersion.name)
params.description(backlogVersion.description)
for { startDate <- backlogVersion.optStartDate } yield {
params.startDate(startDate)
}
for { releaseDueDate <- backlogVersion.optReleaseDueDate } yield {
params.releaseDueDate(releaseDueDate)
}
try {
sleep(500)
Some(Convert.toBacklog(backlog.addVersion(params)))
} catch {
case e: Throwable =>
logger.error(e.getMessage, e)
None
}
}
override def update(versionId: Long, name: String): Option[BacklogVersion] = {
val params = new UpdateVersionParams(projectKey.value, versionId, name)
try {
Some(Convert.toBacklog(backlog.updateVersion(params)))
} catch {
case e: Throwable =>
logger.error(e.getMessage, e)
None
}
}
override def remove(versionId: Long) = {
backlog.removeVersion(projectKey.value, versionId)
}
}
| nulab/backlog-migration-common | core/src/main/scala/com/nulabinc/backlog/migration/common/service/VersionServiceImpl.scala | Scala | mit | 1,999 |
import scala.reflect.runtime.universe._
import scala.tools.reflect.ToolBox
import scala.tools.reflect.Eval
object Test extends dotty.runtime.LegacyApp {
trait C {
type T
implicit val tt: TypeTag[T]
lazy val code = reify {
List[T](2.asInstanceOf[T])
}
}
class D extends C {
type T = String // this "mistake" is made for a reason!
override val tt: TypeTag[T] = implicitly[TypeTag[Int]].asInstanceOf[TypeTag[T]]
}
println((new D).code.eval)
}
| yusuke2255/dotty | tests/disabled/macro/run/reify_newimpl_21.scala | Scala | bsd-3-clause | 483 |
package shared.responses.groups.members
case class AddMemberGroupResponse(
organizationId: String,
name: String,
userId: String
)
| beikern/foulkon-ui | shared/src/main/scala/shared/responses/groups/members/AddMemberGroupResponse.scala | Scala | apache-2.0 | 134 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.impl.storage.mvstore
import slamdata.Predef._
import quasar.contrib.scalaz.MonadError_
import quasar.impl.storage.{CodecPrefixStore, PrefixStore, LinearCodec, StoreError}
import cats.effect.{Blocker, ContextShift, Sync}
import cats.implicits._
import org.h2.mvstore._
import shapeless._
import scodec.Codec
object MVPrefixStore {
def apply[F[_]: MonadError_[?[_], StoreError]: Sync: ContextShift, K <: HList: LinearCodec, V: Codec](
db: MVStore,
name: String,
blocker: Blocker)
: F[PrefixStore.SCodec[F, K, V]] = {
val builder = (new MVMap.Builder[Array[Byte], Array[Byte]]()).keyType(ByteArrayDataType).valueType(ByteArrayDataType)
val store = db.openMap(name, builder)
MVPrefixableStore[F, Byte, Array[Byte]](store, blocker).map(CodecPrefixStore[F, K, V](_))
}
}
| quasar-analytics/quasar | impl/src/main/scala/quasar/impl/storage/mvstore/MVPrefixStore.scala | Scala | apache-2.0 | 1,423 |
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scray.loader
import scray.querying.description.QueryspaceConfiguration
import scray.querying.description.TableConfiguration
import scray.querying.description.internal.MaterializedView
import scray.querying.queries.DomainQuery
import scray.querying.description.ColumnConfiguration
import com.typesafe.scalalogging.LazyLogging
import scray.loader.configparser.ScrayQueryspaceConfiguration
import scray.loader.configparser.ScrayConfiguration
import scray.loader.configuration.ScrayStores
import scala.collection.mutable.HashMap
import scray.querying.storeabstraction.StoreGenerators
import scray.querying.sync.DbSession
import scray.querying.description.ColumnConfiguration
import scray.querying.description.TableIdentifier
import scray.querying.storeabstraction.StoreExtractor
import scray.querying.description.VersioningConfiguration
import scray.querying.description.Row
import scray.querying.description.Column
import scray.querying.source.store.QueryableStoreSource
import com.twitter.util.FuturePool
import scray.querying.description.IndexConfiguration
import scray.querying.Registry
import scray.common.key.OrderedStringKeyGenerator
import scray.common.key.OrderedStringKeyGenerator
import scray.common.key.StringKey
import scray.querying.source.Splitter
/**
* a generic query space that can be used to load tables from
* various different databases.
*/
class ScrayLoaderQuerySpace(name: String, config: ScrayConfiguration, qsConfig: ScrayQueryspaceConfiguration,
storeConfig: ScrayStores, futurePool: FuturePool)
extends QueryspaceConfiguration(name) with LazyLogging {
val generators = new HashMap[String, StoreGenerators]
val extractors = new HashMap[TableIdentifier, StoreExtractor[_]]
storeConfig.addSessionChangeListener { (name, session) => generators -= name }
val version = qsConfig.version
val materializedViews = qsConfig.materializedViews.map { view =>
if(view.keyClass.equals("scray.common.key.OrderedStringKeyGenerator")) {
new MaterializedView(view.table, OrderedStringKeyGenerator)
} else {
logger.warn("Unknown key generator class. use default: scray.common.key.OrderedStringKeyGenerator")
new MaterializedView(view.table, OrderedStringKeyGenerator)
}
}
/**
* if this queryspace can order accoring to query all by itself, i.e.
* without an extra in-memory step introduced by scray-querying the
* results will be ordered if the queryspace can choose the main table
*/
def queryCanBeOrdered(query: DomainQuery): Option[ColumnConfiguration] = {
val orderingColumn = query.ordering match {
case Some(columnOrdering) => Some(columnOrdering.column)
case _ => None
}
orderingColumn.map {Registry.getQuerySpaceColumn(query.getQueryspace, query.querySpaceVersion, _) }.flatten
}
/**
* if this queryspace can group accoring to query all by itself, i.e.
* without an extra in-memory step introduced by scray-querying
*/
def queryCanBeGrouped(query: DomainQuery): Option[ColumnConfiguration] = {
val groupingColumn = query.grouping match {
case Some(grouping) => Some(grouping.column)
case _ => None
}
groupingColumn.map {Registry.getQuerySpaceColumn(query.getQueryspace, query.querySpaceVersion, _) }.flatten
}
/**
* return a generator for the given named dbms
*/
private def getGenerator(dbmsId: String, session: DbSession[_, _, _]) = {
generators.get(dbmsId).getOrElse {
val generator = storeConfig.getStoreGenerator(dbmsId, session, name, futurePool)
generators += ((dbmsId, generator))
generator
}
}
/**
* return configuration for a simple rowstore
*/
private def getRowstoreConfiguration(id: TableIdentifier): Option[TableConfiguration[_ <: DomainQuery, _ <: DomainQuery, _]] = {
// Extractor
def extractTable[Q <: DomainQuery, S <: QueryableStoreSource[Q]](storeconfigs: (S,
(Function1[_, Row], Option[String], Option[VersioningConfiguration[_, _]])), generator: StoreGenerators):
TableConfiguration[_ <: DomainQuery, _ <: DomainQuery, _] = {
// TODO: read latest version from SyncTable, if it is declared there, generate a VersioningConfig; otherwise leave it by None
val extractor = generator.getExtractor[Q, S](storeconfigs._1, Some(id.tableId), None, Some(id.dbSystem), futurePool)
val tid = extractor.getTableIdentifier(storeconfigs._1, storeconfigs._2._2, Some(id.dbSystem))
extractors.+=((tid, extractor))
extractor.getTableConfiguration(storeconfigs._2._1)
}
// retrieve session...
val session = storeConfig.getSessionForStore(id.dbSystem)
// TODO: add session change listener to change store in case of session change
// storeConfig.addSessionChangeListener(listener)
session.flatMap { sess =>
val generator = getGenerator(id.dbSystem, sess)
val sStore = generator.createRowStore(id)
sStore.map { storeconfigs =>
extractTable(storeconfigs, generator)
}
}
}
/**
* returns configuration of tables which are included in this query space
* Internal use!
*/
def getTables(version: Int): Set[TableConfiguration[_ <: DomainQuery, _ <: DomainQuery, _]] = {
val generators = new HashMap[String, StoreGenerators]
// TODO: read versioned tables from SyncTable and add to rowstores
val rowstores = qsConfig.rowStores
rowstores.map { tableConfigTxt =>
getRowstoreConfiguration(tableConfigTxt)
}.collect {
case Some(tableConfiguration) => tableConfiguration
}.toSet
// TODO: add more tables (for the ones in the queryspace config, e.g. indexes)
}
/**
* returns columns which can be included in this query space
* Internal use!
*/
override def getColumns(version: Int): List[ColumnConfiguration] = {
def getColumnConfig[S <: TableConfiguration[_ <: DomainQuery, _ <: DomainQuery, _]](table: S): List[ColumnConfiguration] = {
def throwError: Exception = {
logger.error("Store must be registered before columns can be extracted!")
new UnsupportedOperationException("Store must be registered before columns can be extracted!")
}
def extractTableConfig[Q <: DomainQuery, F <: QueryableStoreSource[Q]](column: Column, extractor: StoreExtractor[F]): ColumnConfiguration = {
// TODO: add indexing configuration (replace maps)
val index = extractor.createManualIndexConfiguration(column, name, version, table.queryableStore.get.asInstanceOf[F], Map(), Map())
storeConfig.getSessionForStore(column.table.dbSystem).map { session =>
// TODO: add splitter configuration
extractor.getColumnConfiguration(session, column.table.dbId, column.table.tableId, column, index, Map[Column, Splitter[_]]())
}.getOrElse(throw new DBMSUndefinedException(column.table.dbSystem, name))
}
table.allColumns.map { column =>
// fetch extractor
extractors.get(column.table).getOrElse(throw throwError) match {
case extractor: StoreExtractor[s] => extractTableConfig[DomainQuery, QueryableStoreSource[DomainQuery]](column, extractor.asInstanceOf[StoreExtractor[QueryableStoreSource[DomainQuery]]])
case _ => throw throwError
}
}.toList
}
val tables = getTables(version)
tables.flatMap { table => getColumnConfig(table) }.toList
}
/**
* re-initialize this queryspace, possibly re-reading the configuration from somewhere
*/
def reInitialize(oldversion: Int): QueryspaceConfiguration = ???
def getMaterializedViews: Seq[MaterializedView] = materializedViews
override def toString: String = {
s"""$name { tables: [${getTables(0)}] }"""
}
}
| scray/scray | scray-loader/src/main/scala/scray/loader/ScrayLoaderQuerySpace.scala | Scala | apache-2.0 | 8,396 |
package com.tribbloids.spookystuff.utils.lifespan
import com.tribbloids.spookystuff.utils.serialization.NOTSerializable
trait LocalCleanable extends Cleanable with NOTSerializable
| tribbloid/spookystuff | mldsl/src/main/scala/com/tribbloids/spookystuff/utils/lifespan/LocalCleanable.scala | Scala | apache-2.0 | 181 |
/*
* This file is part of Kiama.
*
* Copyright (C) 2014-2015 Anthony M Sloane, Macquarie University.
*
* Kiama is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* Kiama is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
/*
* This file is derived from a JastAdd implementation of PicoJava, created
* in the Department of Computer Science at Lund University. See the
* following web site for details:
*
* http://jastadd.cs.lth.se/examples/PicoJava/index.shtml
*/
package org.kiama
package example.picojava.tests
import org.kiama.util.Tests
/**
* Test of many picojava features together, Due to Niklas Fors.
*/
class CombinedTests extends Tests {
import org.kiama.example.picojava.ErrorCheck
import org.kiama.example.picojava.PicoJavaTree._
// For the actual program text, see CombinedTests.pj
val ast =
Program (
Block (
List (
ClassDecl (
"A",
None,
Block (
List (
VarDecl (Use ("boolean"), "a"),
AssignStmt (Use ("a"), BooleanLiteral ("true")),
ClassDecl (
"AA",
None,
Block (
List (VarDecl (Use ("boolean"), "aa"))))))),
ClassDecl (
"B",
Some (Use ("A")),
Block (
List (
VarDecl (Use ("boolean"), "b"),
AssignStmt (Use ("b"), Use ("a")),
VarDecl (Use ("A"), "refA"),
VarDecl (Use ("B"), "refB"),
AssignStmt (Use ("refA"), Use ("refB")),
AssignStmt (
Dot (Use ("refB"), Use ("b")),
Dot (Use ("refA"), Use ("a"))),
ClassDecl (
"BB",
Some (Use ("AA")),
Block (
List (
VarDecl (Use ("boolean"), "bb"),
AssignStmt (Use ("bb"), Use ("aa")),
WhileStmt (
Use ("b"),
AssignStmt (Use ("b"), Use ("a"))))))))))))
val tree = new PicoJavaTree (ast)
val analyser = new ErrorCheck (tree)
import analyser._
test ("combined test program has no errors") {
assertResult (0) (errors.size)
}
}
| solomono/kiama | library/src/org/kiama/example/picojava/tests/CombinedTests.scala | Scala | gpl-3.0 | 3,510 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import java.util.Date
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.{Coordinate, Geometry}
import org.geotools.geometry.jts.JTSFactoryFinder
import org.locationtech.geomesa.utils.clearspring.HyperLogLog
import org.locationtech.geomesa.utils.stats.MinMax.CardinalityBits
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.immutable.ListMap
/**
* The MinMax stat merely returns the min/max of an attribute's values.
* Works with dates, integers, longs, doubles, and floats.
*
* @param sft simple feature type
* @param property property name for the attribute being min/maxed
* @tparam T the type of the attribute the stat is targeting (needs to be comparable)
*/
class MinMax[T] private [stats] (val sft: SimpleFeatureType,
val property: String,
private [stats] var minValue: T,
private [stats] var maxValue: T,
private [stats] val hpp: HyperLogLog)
(implicit val defaults: MinMax.MinMaxDefaults[T])
extends Stat with LazyLogging with Serializable {
// use a secondary constructor instead of companion apply to allow mixin types (i.e. ImmutableStat)
def this(sft: SimpleFeatureType, attribute: String)(implicit defaults: MinMax.MinMaxDefaults[T]) =
this(sft, attribute, defaults.max, defaults.min, HyperLogLog(CardinalityBits))(defaults)
override type S = MinMax[T]
@deprecated("property")
lazy val attribute: Int = i
private val i = sft.indexOf(property)
def min: T = if (isEmpty) { maxValue } else { minValue }
def max: T = if (isEmpty) { minValue } else { maxValue }
def bounds: (T, T) = (min, max)
def cardinality: Long = hpp.cardinality()
def tuple: (T, T, Long) = (min, max, cardinality)
override def observe(sf: SimpleFeature): Unit = {
val value = sf.getAttribute(i).asInstanceOf[T]
if (value != null) {
try {
minValue = defaults.min(value, minValue)
maxValue = defaults.max(value, maxValue)
hpp.offer(value)
} catch {
case e: Exception => logger.warn(s"Error observing value '$value': ${e.toString}")
}
}
}
// note: can't unobserve min/max without storing a lot more data
override def unobserve(sf: SimpleFeature): Unit = {}
override def +(other: MinMax[T]): MinMax[T] = {
if (other.isEmpty) {
new MinMax[T](sft, property, minValue, maxValue, hpp.merge())
} else if (this.isEmpty) {
new MinMax[T](sft, property, other.minValue, other.maxValue, other.hpp.merge())
} else {
val plus = new MinMax[T](sft, property, minValue, maxValue, hpp.merge())
plus += other
plus
}
}
override def +=(other: MinMax[T]): Unit = {
if (other.isEmpty) {
// no-op
} else if (isEmpty) {
minValue = other.minValue
maxValue = other.maxValue
hpp += other.hpp
} else {
minValue = defaults.min(minValue, other.minValue)
maxValue = defaults.max(maxValue, other.maxValue)
hpp += other.hpp
}
}
override def toJsonObject: Any =
if (isEmpty) {
ListMap("min" -> null, "max" -> null, "cardinality" -> 0)
} else {
ListMap("min" -> minValue, "max" -> maxValue, "cardinality" -> cardinality)
}
override def isEmpty: Boolean = minValue == defaults.max
override def clear(): Unit = {
minValue = defaults.max
maxValue = defaults.min
java.util.Arrays.fill(hpp.registerSet.rawBits, 0)
}
override def isEquivalent(other: Stat): Boolean = other match {
case that: MinMax[T] =>
property == that.property && minValue == that.minValue &&
maxValue == that.maxValue && cardinality == that.cardinality
case _ => false
}
}
object MinMax {
val CardinalityBits: Int = 10
trait MinMaxDefaults[T] {
def min: T
def max: T
def min(left: T, right: T): T
def max(left: T, right: T): T
}
object MinMaxDefaults {
def apply[T](binding: Class[_]): MinMaxDefaults[T] = {
if (binding == classOf[String]) {
MinMaxString.asInstanceOf[MinMaxDefaults[T]]
} else if (binding == classOf[Integer]) {
MinMaxInt.asInstanceOf[MinMaxDefaults[T]]
} else if (binding == classOf[java.lang.Long]) {
MinMaxLong.asInstanceOf[MinMaxDefaults[T]]
} else if (binding == classOf[java.lang.Float]) {
MinMaxFloat.asInstanceOf[MinMaxDefaults[T]]
} else if (binding == classOf[java.lang.Double]) {
MinMaxDouble.asInstanceOf[MinMaxDefaults[T]]
} else if (classOf[Date].isAssignableFrom(binding)) {
MinMaxDate.asInstanceOf[MinMaxDefaults[T]]
} else if (classOf[Geometry].isAssignableFrom(binding)) {
MinMaxGeometry.asInstanceOf[MinMaxDefaults[T]]
} else {
throw new IllegalArgumentException(s"No implicit default available for type: $binding")
}
}
}
abstract class ComparableMinMax[T <: Comparable[T]] extends MinMaxDefaults[T] with Serializable {
override def min(left: T, right: T): T = if (left.compareTo(right) > 0) right else left
override def max(left: T, right: T): T = if (left.compareTo(right) < 0) right else left
}
implicit object MinMaxString extends ComparableMinMax[String] with Serializable {
override val min: String = ""
override val max: String = "\\uFFFF\\uFFFF\\uFFFF"
}
implicit object MinMaxInt extends ComparableMinMax[Integer] with Serializable {
override val min: Integer = Integer.MIN_VALUE
override val max: Integer = Integer.MAX_VALUE
}
implicit object MinMaxLong extends ComparableMinMax[java.lang.Long] with Serializable {
override val min: java.lang.Long = java.lang.Long.MIN_VALUE
override val max: java.lang.Long = java.lang.Long.MAX_VALUE
}
implicit object MinMaxFloat extends ComparableMinMax[java.lang.Float] with Serializable {
override val min: java.lang.Float = 0f - java.lang.Float.MAX_VALUE
override val max: java.lang.Float = java.lang.Float.MAX_VALUE
}
implicit object MinMaxDouble extends ComparableMinMax[java.lang.Double] with Serializable {
override val min: java.lang.Double = 0d - java.lang.Double.MAX_VALUE
override val max: java.lang.Double = java.lang.Double.MAX_VALUE
}
implicit object MinMaxDate extends ComparableMinMax[Date] with Serializable {
override val min: Date = new Date(java.lang.Long.MIN_VALUE)
override val max: Date = new Date(java.lang.Long.MAX_VALUE)
}
/**
* Geometry min/max tracks the bounding box of each geometry, not the geometries themselves.
*/
implicit object MinMaxGeometry extends MinMaxDefaults[Geometry] with Serializable {
private val gf = JTSFactoryFinder.getGeometryFactory
override val min: Geometry = gf.createPoint(new Coordinate(-180.0, -90.0))
override val max: Geometry = gf.createPoint(new Coordinate(180.0, 90.0))
override def min(left: Geometry, right: Geometry): Geometry = {
val (lx, ly) = { val e = left.getEnvelopeInternal; (e.getMinX, e.getMinY) }
val (rx, ry) = { val e = right.getEnvelopeInternal; (e.getMinX, e.getMinY) }
val x = math.min(lx, rx)
val y = math.min(ly, ry)
if (x == lx && y == ly) {
left
} else if (x == rx && y == ry) {
right
} else {
gf.createPoint(new Coordinate(x, y))
}
}
override def max(left: Geometry, right: Geometry): Geometry = {
val (lx, ly) = { val e = left.getEnvelopeInternal; (e.getMaxX, e.getMaxY) }
val (rx, ry) = { val e = right.getEnvelopeInternal; (e.getMaxX, e.getMaxY) }
val x = math.max(lx, rx)
val y = math.max(ly, ry)
if (x == lx && y == ly) {
left
} else if (x == rx && y == ry) {
right
} else {
gf.createPoint(new Coordinate(x, y))
}
}
}
}
| ddseapy/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/stats/MinMax.scala | Scala | apache-2.0 | 8,402 |
package org.jetbrains.plugins.scala
package lang
package psi
package types
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.NonValueType
import scala.collection.immutable.HashSet
/**
* Use this type if you want to resolve generics.
* In conformance using ScUndefinedSubstitutor you can accumulate information
* about possible generic type.
*/
case class ScUndefinedType(tpt: ScTypeParameterType) extends NonValueType {
var level = 0
def visitType(visitor: ScalaTypeVisitor) {
visitor.visitUndefinedType(this)
}
def this(tpt: ScTypeParameterType, level: Int) {
this(tpt)
this.level = level
}
def inferValueType: ValueType = tpt
override def equivInner(r: ScType, subst: ScUndefinedSubstitutor, falseUndef: Boolean): (Boolean, ScUndefinedSubstitutor) = {
var undefinedSubst = subst
r match {
case _ if falseUndef => (false, undefinedSubst)
case u2: ScUndefinedType if u2.level > level =>
(true, undefinedSubst.addUpper((u2.tpt.name, u2.tpt.getId), this))
case u2: ScUndefinedType if u2.level < level =>
(true, undefinedSubst.addUpper((tpt.name, tpt.getId), u2))
case u2: ScUndefinedType if u2.level == level =>
(true, undefinedSubst)
case rt =>
undefinedSubst = undefinedSubst.addLower((tpt.name, tpt.getId), rt)
undefinedSubst = undefinedSubst.addUpper((tpt.name, tpt.getId), rt)
(true, undefinedSubst)
}
}
}
/**
* This type works like undefined type, but you cannot use this type
* to resolve generics. It's important if two local type
* inferences work together.
*/
case class ScAbstractType(tpt: ScTypeParameterType, lower: ScType, upper: ScType) extends NonValueType {
private var hash: Int = -1
override def toString: String = {
val buffer = new StringBuilder
buffer.append("?")
buffer.append(super.toString)
buffer.append("/*")
if (!lower.equiv(Nothing)) {
val lowerText: String = " >: " + lower.toString
buffer.append(lowerText)
}
if (!upper.equiv(Any)) {
val upperText: String = " <: " + upper.toString
buffer.append(upperText)
}
buffer.append("*/")
buffer.toString()
}
override def hashCode: Int = {
if (hash == -1) {
hash = (upper.hashCode() * 31 + lower.hashCode()) * 31 + tpt.args.hashCode()
}
hash
}
override def equals(obj: scala.Any): Boolean = {
obj match {
case ScAbstractType(oTpt, oLower, oUpper) =>
lower.equals(oLower) && upper.equals(oUpper) && tpt.args.equals(oTpt.args)
case _ => false
}
}
override def equivInner(r: ScType, uSubst: ScUndefinedSubstitutor,
falseUndef: Boolean): (Boolean, ScUndefinedSubstitutor) = {
r match {
case _ if falseUndef => (false, uSubst)
case rt =>
var t: (Boolean, ScUndefinedSubstitutor) = Conformance.conformsInner(upper, r, Set.empty, uSubst)
if (!t._1) return (false, uSubst)
t = Conformance.conformsInner(r, lower, Set.empty, t._2)
if (!t._1) return (false, uSubst)
(true, t._2)
}
}
def inferValueType = tpt
def simplifyType: ScType = {
if (upper.equiv(Any)) lower else if (lower.equiv(Nothing)) upper else lower
}
override def removeAbstracts = simplifyType
override def recursiveUpdate(update: ScType => (Boolean, ScType), visited: HashSet[ScType]): ScType = {
if (visited.contains(this)) {
return update(this) match {
case (true, res) => res
case _ => this
}
}
val newVisited = visited + this
update(this) match {
case (true, res) => res
case _ =>
try {
ScAbstractType(tpt.recursiveUpdate(update, newVisited).asInstanceOf[ScTypeParameterType], lower.recursiveUpdate(update, newVisited),
upper.recursiveUpdate(update, newVisited))
}
catch {
case cce: ClassCastException => throw new RecursiveUpdateException
}
}
}
override def recursiveVarianceUpdateModifiable[T](data: T, update: (ScType, Int, T) => (Boolean, ScType, T),
variance: Int = 1): ScType = {
update(this, variance, data) match {
case (true, res, _) => res
case (_, _, newData) =>
try {
ScAbstractType(tpt.recursiveVarianceUpdateModifiable(newData, update, variance).asInstanceOf[ScTypeParameterType],
lower.recursiveVarianceUpdateModifiable(newData, update, -variance),
upper.recursiveVarianceUpdateModifiable(newData, update, variance))
}
catch {
case cce: ClassCastException => throw new RecursiveUpdateException
}
}
}
def visitType(visitor: ScalaTypeVisitor) {
visitor.visitAbstractType(this)
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/types/ScUndefinedType.scala | Scala | apache-2.0 | 4,780 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status.api.v1
import java.util.{Arrays, Date, List => JList}
import javax.ws.rs._
import javax.ws.rs.core.MediaType
import org.apache.spark.JobExecutionStatus
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.jobs.UIData.JobUIData
@Produces(Array(MediaType.APPLICATION_JSON))
private[v1] class AllJobsResource(ui: SparkUI) {
@GET
def jobsList(@QueryParam("status") statuses: JList[JobExecutionStatus]): Seq[JobData] = {
ui.store.jobsList(statuses)
}
}
| cin/spark | core/src/main/scala/org/apache/spark/status/api/v1/AllJobsResource.scala | Scala | apache-2.0 | 1,305 |
object OuterObjectApply {
object Objecta {
def apply(s: String) = s + "biaka"
def foo(s: String) = Objecta(s)
}
object Main {
def main(args: Array[String]) {
print( /*caret*/ Objecta.foo("ti "))
}
}
}
/*
object OuterObjectApply {
object NameAfterRename {
def apply(s: String) = s + "biaka"
def foo(s: String) = NameAfterRename(s)
}
object Main {
def main(args: Array[String]) {
print( /*caret*/ NameAfterRename.foo("ti "))
}
}
}
*/ | ilinum/intellij-scala | testdata/rename/class/ObjectApply.scala | Scala | apache-2.0 | 495 |
package net.maffoo.jsonquote.literal
class Json private[jsonquote] (val s: String) extends AnyVal {
override def toString = s
}
object Json {
/**
* Parse a json string at runtime, marking it as valid with the Json value class.
*/
def apply(s: String): Json = Parse(Seq(s)) match {
case Seq(Chunk(s)) => new Json(s)
}
/**
* Quote strings for inclusion as JSON strings.
*/
def quoteString (s : String) : String = "\\"" + s.flatMap {
case '"' => """\\""""
case '\\\\' => """\\\\"""
case '/' => """\\/"""
case '\\b' => """\\b"""
case '\\f' => """\\f"""
case '\\n' => """\\n"""
case '\\r' => """\\r"""
case '\\t' => """\\t"""
case c if c.isControl => f"\\\\u$c%04x"
case c => c.toString
} + "\\""
}
| maffoo/jsonquote | core/src/main/scala/net/maffoo/jsonquote/literal/Json.scala | Scala | mit | 749 |
package edu.rice.habanero.benchmarks.facloc
import java.util.function.Consumer
import edu.rice.habanero.actors.{ScalaActor, ScalaActorState}
import edu.rice.habanero.benchmarks.facloc.FacilityLocationConfig.{Box, Point, Position}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object FacilityLocationScalaActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new FacilityLocationScalaActorBenchmark)
}
private final class FacilityLocationScalaActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
FacilityLocationConfig.parseArgs(args)
}
def printArgInfo() {
FacilityLocationConfig.printArgs()
}
def runIteration() {
val threshold = FacilityLocationConfig.ALPHA * FacilityLocationConfig.F
val boundingBox = new Box(0, 0, FacilityLocationConfig.GRID_SIZE, FacilityLocationConfig.GRID_SIZE)
val rootQuadrant = new QuadrantActor(
null, Position.ROOT, boundingBox, threshold, 0,
new java.util.ArrayList[Point](), 1, -1, new java.util.ArrayList[Point]())
rootQuadrant.start()
val producer = new ProducerActor(rootQuadrant)
producer.start()
ScalaActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
}
}
private abstract class Msg()
private case class FacilityMsg(positionRelativeToParent: Int, depth: Int, point: Point, fromChild: Boolean) extends Msg
private case class NextCustomerMsg() extends Msg
private case class CustomerMsg(producer: ScalaActor[AnyRef], point: Point) extends Msg
private case class RequestExitMsg() extends Msg
private case class ConfirmExitMsg(facilities: Int, supportCustomers: Int) extends Msg
private class ProducerActor(consumer: ScalaActor[AnyRef]) extends ScalaActor[AnyRef] {
private val selfActor = this
private var itemsProduced = 0
override def onPostStart(): Unit = {
produceCustomer()
}
private def produceCustomer(): Unit = {
consumer.send(CustomerMsg(selfActor, Point.random(FacilityLocationConfig.GRID_SIZE)))
itemsProduced += 1
}
override def process(message: AnyRef) {
message match {
case msg: NextCustomerMsg =>
if (itemsProduced < FacilityLocationConfig.NUM_POINTS) {
produceCustomer()
} else {
consumer.send(RequestExitMsg())
exit()
}
}
}
}
private class QuadrantActor(parent: QuadrantActor,
positionRelativeToParent: Int,
val boundary: Box,
threshold: Double,
depth: Int,
initLocalFacilities: java.util.List[Point],
initKnownFacilities: Int,
initMaxDepthOfKnownOpenFacility: Int,
initCustomers: java.util.List[Point]) extends ScalaActor[AnyRef] {
private val selfActor = this
// the facility associated with this quadrant if it were to open
private val facility: Point = boundary.midPoint()
// all the local facilities from corner ancestors
val localFacilities = new java.util.ArrayList[Point]()
localFacilities.addAll(initLocalFacilities)
localFacilities.add(facility)
private var knownFacilities = initKnownFacilities
private var maxDepthOfKnownOpenFacility = initMaxDepthOfKnownOpenFacility
private var terminatedChildCount = 0
// the support customers for this Quadrant
private val supportCustomers = new java.util.ArrayList[Point]()
private var childrenFacilities = 0
private var facilityCustomers = 0
// null when closed, non-null when open
private var children: List[QuadrantActor] = null
private var childrenBoundaries: List[Box] = null
// the cost so far
private var totalCost = 0.0
initCustomers.forEach(new Consumer[Point] {
override def accept(loopPoint: Point): Unit = {
if (boundary.contains(loopPoint)) {
addCustomer(loopPoint)
}
}
override def andThen(after: Consumer[_ >: Point]): Consumer[Point] = {
this
}
})
override def process(msg: AnyRef) {
msg match {
case customer: CustomerMsg =>
val point: Point = customer.point
if (children == null) {
// no open facility
addCustomer(point)
if (totalCost > threshold) {
partition()
}
} else {
// a facility is already open, propagate customer to correct child
var index = 0
while (index <= 4) {
val loopChildBoundary = childrenBoundaries(index)
if (loopChildBoundary.contains(point)) {
children(index).send(customer)
index = 5
} else {
index += 1
}
}
}
if (parent eq null) {
// request next customer
customer.producer.send(NextCustomerMsg())
}
case facility: FacilityMsg =>
val point = facility.point
val fromChild = facility.fromChild
knownFacilities += 1
localFacilities.add(point)
if (fromChild) {
notifyParentOfFacility(point, facility.depth)
if (facility.depth > maxDepthOfKnownOpenFacility) {
maxDepthOfKnownOpenFacility = facility.depth
}
// notify sibling
val childPos = facility.positionRelativeToParent
val siblingPos: Int = if (childPos == Position.TOP_LEFT) {
Position.BOT_RIGHT
} else if (childPos == Position.TOP_RIGHT) {
Position.BOT_LEFT
} else if (childPos == Position.BOT_RIGHT) {
Position.TOP_LEFT
} else {
Position.TOP_RIGHT
}
children(siblingPos).send(FacilityMsg(Position.UNKNOWN, depth, point, false))
} else {
// notify all children
if (children ne null) {
children.foreach {
loopChild =>
loopChild.send(FacilityMsg(Position.UNKNOWN, depth, point, false))
}
}
}
case exitMsg: RequestExitMsg =>
if (children ne null) {
children.foreach {
loopChild =>
loopChild.send(exitMsg)
}
} else {
// No children, notify parent and safely exit
safelyExit()
}
case exitMsg: ConfirmExitMsg =>
// child has sent a confirmation that it has exited
terminatedChildCount += 1
childrenFacilities += exitMsg.facilities
facilityCustomers += exitMsg.supportCustomers
if (terminatedChildCount == 4) {
// all children terminated
safelyExit()
}
}
}
private def addCustomer(point: Point): Unit = {
supportCustomers.add(point)
val minCost = findCost(point)
totalCost += minCost
}
private def findCost(point: Point): Double = {
var result = Double.MaxValue
// there will be at least one facility
localFacilities.forEach(new Consumer[Point] {
override def accept(loopPoint: Point): Unit = {
val distance = loopPoint.getDistance(point)
if (distance < result) {
result = distance
}
}
override def andThen(after: Consumer[_ >: Point]): Consumer[Point] = {
this
}
})
result
}
private def notifyParentOfFacility(p: Point, depth: Int): Unit = {
//println("Quadrant-" + id + ": notifyParentOfFacility: parent = " + parent)
if (parent ne null) {
//println("Quadrant-" + id + ": notifyParentOfFacility: sending msg to parent: " + parent.id)
parent.send(FacilityMsg(positionRelativeToParent, depth, p, true))
}
}
private def partition(): Unit = {
// notify parent that opened a new facility
notifyParentOfFacility(facility, depth)
maxDepthOfKnownOpenFacility = math.max(maxDepthOfKnownOpenFacility, depth)
// create children and propagate their share of customers to them
val firstBoundary: Box = new Box(boundary.x1, facility.y, facility.x, boundary.y2)
val secondBoundary: Box = new Box(facility.x, facility.y, boundary.x2, boundary.y2)
val thirdBoundary: Box = new Box(boundary.x1, boundary.y1, facility.x, facility.y)
val fourthBoundary: Box = new Box(facility.x, boundary.y1, boundary.x2, facility.y)
val firstChild = new QuadrantActor(
selfActor, Position.TOP_LEFT, firstBoundary, threshold, depth + 1,
localFacilities, knownFacilities, maxDepthOfKnownOpenFacility, supportCustomers)
firstChild.start()
val secondChild = new QuadrantActor(
selfActor, Position.TOP_RIGHT, secondBoundary, threshold, depth + 1,
localFacilities, knownFacilities, maxDepthOfKnownOpenFacility, supportCustomers)
secondChild.start()
val thirdChild = new QuadrantActor(
selfActor, Position.BOT_LEFT, thirdBoundary, threshold, depth + 1,
localFacilities, knownFacilities, maxDepthOfKnownOpenFacility, supportCustomers)
thirdChild.start()
val fourthChild = new QuadrantActor(
selfActor, Position.BOT_RIGHT, fourthBoundary, threshold, depth + 1,
localFacilities, knownFacilities, maxDepthOfKnownOpenFacility, supportCustomers)
fourthChild.start()
children = List[QuadrantActor](firstChild, secondChild, thirdChild, fourthChild)
childrenBoundaries = List[Box](firstBoundary, secondBoundary, thirdBoundary, fourthBoundary)
// support customers have been distributed to the children
supportCustomers.clear()
}
private def safelyExit(): Unit = {
if (parent ne null) {
val numFacilities = if (children ne null) childrenFacilities + 1 else childrenFacilities
val numCustomers = facilityCustomers + supportCustomers.size
parent.send(ConfirmExitMsg(numFacilities, numCustomers))
} else {
val numFacilities = childrenFacilities + 1
println(" Num Facilities: " + numFacilities + ", Num customers: " + facilityCustomers)
}
exit()
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/facloc/FacilityLocationScalaActorBenchmark.scala | Scala | gpl-2.0 | 10,592 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import java.io._
import java.nio.charset.Charset
import java.nio.file.{Files, Paths}
import java.util.Locale
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import org.apache.spark.{SparkEnv, SparkException}
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.util.Utils
private[spark] case class ProcfsMetrics(
jvmVmemTotal: Long,
jvmRSSTotal: Long,
pythonVmemTotal: Long,
pythonRSSTotal: Long,
otherVmemTotal: Long,
otherRSSTotal: Long)
// Some of the ideas here are taken from the ProcfsBasedProcessTree class in hadoop
// project.
private[spark] class ProcfsMetricsGetter(procfsDir: String = "/proc/") extends Logging {
private val procfsStatFile = "stat"
private val testing = Utils.isTesting
private val pageSize = computePageSize()
private var isAvailable: Boolean = isProcfsAvailable
private val pid = computePid()
private lazy val isProcfsAvailable: Boolean = {
if (testing) {
true
}
else {
val procDirExists = Try(Files.exists(Paths.get(procfsDir))).recover {
case ioe: IOException =>
logWarning("Exception checking for procfs dir", ioe)
false
}
val shouldLogStageExecutorMetrics =
SparkEnv.get.conf.get(config.EVENT_LOG_STAGE_EXECUTOR_METRICS)
val shouldLogStageExecutorProcessTreeMetrics =
SparkEnv.get.conf.get(config.EVENT_LOG_PROCESS_TREE_METRICS)
procDirExists.get && shouldLogStageExecutorProcessTreeMetrics && shouldLogStageExecutorMetrics
}
}
private def computePid(): Int = {
if (!isAvailable || testing) {
return -1;
}
try {
// This can be simplified in java9:
// https://docs.oracle.com/javase/9/docs/api/java/lang/ProcessHandle.html
val cmd = Array("bash", "-c", "echo $PPID")
val out = Utils.executeAndGetOutput(cmd)
Integer.parseInt(out.split("\\n")(0))
}
catch {
case e: SparkException =>
logWarning("Exception when trying to compute process tree." +
" As a result reporting of ProcessTree metrics is stopped", e)
isAvailable = false
-1
}
}
private def computePageSize(): Long = {
if (testing) {
return 4096;
}
try {
val cmd = Array("getconf", "PAGESIZE")
val out = Utils.executeAndGetOutput(cmd)
Integer.parseInt(out.split("\\n")(0))
} catch {
case e: Exception =>
logWarning("Exception when trying to compute pagesize, as a" +
" result reporting of ProcessTree metrics is stopped")
isAvailable = false
0
}
}
private def computeProcessTree(): Set[Int] = {
if (!isAvailable || testing) {
return Set()
}
var ptree: Set[Int] = Set()
ptree += pid
val queue = mutable.Queue.empty[Int]
queue += pid
while ( !queue.isEmpty ) {
val p = queue.dequeue()
val c = getChildPids(p)
if (!c.isEmpty) {
queue ++= c
ptree ++= c.toSet
}
}
ptree
}
private def getChildPids(pid: Int): ArrayBuffer[Int] = {
try {
val builder = new ProcessBuilder("pgrep", "-P", pid.toString)
val process = builder.start()
val childPidsInInt = mutable.ArrayBuffer.empty[Int]
def appendChildPid(s: String): Unit = {
if (s != "") {
logTrace("Found a child pid:" + s)
childPidsInInt += Integer.parseInt(s)
}
}
val stdoutThread = Utils.processStreamByLine("read stdout for pgrep",
process.getInputStream, appendChildPid)
val errorStringBuilder = new StringBuilder()
val stdErrThread = Utils.processStreamByLine(
"stderr for pgrep",
process.getErrorStream,
line => errorStringBuilder.append(line))
val exitCode = process.waitFor()
stdoutThread.join()
stdErrThread.join()
val errorString = errorStringBuilder.toString()
// pgrep will have exit code of 1 if there are more than one child process
// and it will have a exit code of 2 if there is no child process
if (exitCode != 0 && exitCode > 2) {
val cmd = builder.command().toArray.mkString(" ")
logWarning(s"Process $cmd exited with code $exitCode and stderr: $errorString")
throw new SparkException(s"Process $cmd exited with code $exitCode")
}
childPidsInInt
} catch {
case e: Exception =>
logWarning("Exception when trying to compute process tree." +
" As a result reporting of ProcessTree metrics is stopped.", e)
isAvailable = false
mutable.ArrayBuffer.empty[Int]
}
}
def addProcfsMetricsFromOneProcess(
allMetrics: ProcfsMetrics,
pid: Int): ProcfsMetrics = {
// The computation of RSS and Vmem are based on proc(5):
// http://man7.org/linux/man-pages/man5/proc.5.html
try {
val pidDir = new File(procfsDir, pid.toString)
def openReader(): BufferedReader = {
val f = new File(new File(procfsDir, pid.toString), procfsStatFile)
new BufferedReader(new InputStreamReader(new FileInputStream(f), Charset.forName("UTF-8")))
}
Utils.tryWithResource(openReader) { in =>
val procInfo = in.readLine
val procInfoSplit = procInfo.split(" ")
val vmem = procInfoSplit(22).toLong
val rssMem = procInfoSplit(23).toLong * pageSize
if (procInfoSplit(1).toLowerCase(Locale.US).contains("java")) {
allMetrics.copy(
jvmVmemTotal = allMetrics.jvmVmemTotal + vmem,
jvmRSSTotal = allMetrics.jvmRSSTotal + (rssMem)
)
}
else if (procInfoSplit(1).toLowerCase(Locale.US).contains("python")) {
allMetrics.copy(
pythonVmemTotal = allMetrics.pythonVmemTotal + vmem,
pythonRSSTotal = allMetrics.pythonRSSTotal + (rssMem)
)
}
else {
allMetrics.copy(
otherVmemTotal = allMetrics.otherVmemTotal + vmem,
otherRSSTotal = allMetrics.otherRSSTotal + (rssMem)
)
}
}
} catch {
case f: IOException =>
logWarning("There was a problem with reading" +
" the stat file of the process. ", f)
ProcfsMetrics(0, 0, 0, 0, 0, 0)
}
}
private[spark] def computeAllMetrics(): ProcfsMetrics = {
if (!isAvailable) {
return ProcfsMetrics(0, 0, 0, 0, 0, 0)
}
val pids = computeProcessTree
var allMetrics = ProcfsMetrics(0, 0, 0, 0, 0, 0)
for (p <- pids) {
allMetrics = addProcfsMetricsFromOneProcess(allMetrics, p)
// if we had an error getting any of the metrics, we don't want to report partial metrics, as
// that would be misleading.
if (!isAvailable) {
return ProcfsMetrics(0, 0, 0, 0, 0, 0)
}
}
allMetrics
}
}
private[spark] object ProcfsMetricsGetter {
final val pTreeInfo = new ProcfsMetricsGetter
}
| WindCanDie/spark | core/src/main/scala/org/apache/spark/executor/ProcfsMetricsGetter.scala | Scala | apache-2.0 | 7,778 |
package org.jetbrains.plugins.scala.annotator
import com.intellij.lang.annotation.AnnotationHolder
import com.intellij.psi.PsiClass
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTemplateDefinition, ScTrait}
import org.jetbrains.plugins.scala.lang.psi.types.{ScSubstitutor, ScType}
/**
* Pavel Fatin
*/
trait AnnotatorPart[T <: ScalaPsiElement] {
def kind: Class[T]
def annotate(element: T, holder: AnnotationHolder, typeAware: Boolean)
//TODO move to PsiClass extensions
protected def kindOf(entity: PsiClass) = entity match {
case _: ScTrait => "Trait"
case _: ScObject => "Object"
case c: PsiClass if c.isEnum => "Enum"
case c: PsiClass if c.isInterface => "Interface"
case _ => "Class"
}
protected def isMixable(entity: PsiClass) = entity match {
case _: ScTrait => true
case c: PsiClass if c.isInterface => !c.isAnnotationType
case _ => false
}
protected def isAbstract(entity: PsiClass) = entity match {
case _: ScTrait => true
case c: PsiClass if c.isInterface => !c.isAnnotationType
case c: PsiClass if c.hasAbstractModifier => true
case _ => false
}
}
object AnnotatorPart {
private def collectSuperRefs[T](td: ScTemplateDefinition, extractor: ScType => Option[T]) = {
val superTypeElements = td.extendsBlock.templateParents.toSeq.flatMap(_.typeElements)
for {
typeElem <- superTypeElements
tp <- typeElem.`type`().toOption
} yield {
(typeElem, extractor(tp))
}
}
def superRefs(td: ScTemplateDefinition): Seq[(ScTypeElement, Option[PsiClass])] = {
collectSuperRefs(td, _.extractClass)
}
def superRefsWithSubst(td: ScTemplateDefinition): Seq[(ScTypeElement, Option[(PsiClass, ScSubstitutor)])] = {
collectSuperRefs(td, _.extractClassType)
}
} | triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/AnnotatorPart.scala | Scala | apache-2.0 | 1,997 |
package com.github.j5ik2o.reactive.redis.feature
import akka.routing.DefaultResizer
import cats.data.NonEmptyList
import com.github.j5ik2o.reactive.redis.{ PeerConfig, RedisConnection, RedisConnectionPool }
import monix.eval.Task
class KeysFeatureOfJedisSpec extends AbstractKeysFeatureSpec {
override protected def createConnectionPool(peerConfigs: NonEmptyList[PeerConfig]): RedisConnectionPool[Task] = {
val sizePerPeer = 2
val lowerBound = 1
val upperBound = 5
val reSizer = Some(DefaultResizer(lowerBound, upperBound))
RedisConnectionPool.ofMultipleRoundRobin(
sizePerPeer,
peerConfigs,
newConnection = RedisConnection.ofJedis,
reSizer = reSizer
)
}
}
| j5ik2o/reactive-redis | core/src/test/scala/com/github/j5ik2o/reactive/redis/feature/KeysFeatureOfJedisSpec.scala | Scala | mit | 718 |
import scala.tools.partest.IcodeComparison
object Test extends IcodeComparison {
override def code =
"""object Case3 { // 01
| def unapply(z: Any): Option[Int] = Some(-1) // 02
| def main(args: Array[String]) { // 03
| ("": Any) match { // 04
| case x : String => // 05 Read: <linenumber> JUMP <target basic block id>
| println("case 0") // 06 expecting "6 JUMP 7", was "8 JUMP 7"
| case _ => // 07
| println("default") // 08 expecting "8 JUMP 7"
| } // 09
| println("done") // 10
| }
|}""".stripMargin
override def show() = showIcode()
}
| yusuke2255/dotty | tests/pending/run/t6288b-jump-position.scala | Scala | bsd-3-clause | 872 |
package org.hammerlab.guacamole.variants
import org.bdgenomics.formats.avro.GenotypeAllele.{ ALT, REF }
import org.bdgenomics.formats.avro.{ Genotype => BDGGenotype }
import org.hammerlab.genomics.reference.{ ContigName, Locus, NumLoci }
import org.hammerlab.guacamole.readsets.SampleName
import scala.collection.JavaConversions.seqAsJavaList
/**
*
* A variant that exists in the sample; includes supporting read statistics
*
* @param sampleName sample the variant was called on
* @param contigName chromosome or genome contig of the variant
* @param start start position of the variant (0-based)
* @param allele allele (ref + seq bases) for this variant
* @param evidence supporting statistics for the variant
* @param length length of the variant
*/
case class CalledAllele(sampleName: SampleName,
contigName: ContigName,
start: Locus,
allele: Allele,
evidence: AlleleEvidence,
rsID: Option[Int] = None,
override val length: NumLoci = 1) extends ReferenceVariant {
val end: Locus = start + 1L
def toBDGGenotype: BDGGenotype =
BDGGenotype
.newBuilder
.setAlleles(seqAsJavaList(Seq(REF, ALT)))
.setSampleId(sampleName)
.setGenotypeQuality(evidence.phredScaledLikelihood)
.setReadDepth(evidence.readDepth)
.setExpectedAlleleDosage(
evidence.alleleReadDepth.toFloat / evidence.readDepth
)
.setReferenceReadDepth(evidence.readDepth - evidence.alleleReadDepth)
.setAlternateReadDepth(evidence.alleleReadDepth)
.setVariant(bdgVariant)
.build
}
| hammerlab/guacamole | src/main/scala/org/hammerlab/guacamole/variants/CalledAllele.scala | Scala | apache-2.0 | 1,679 |
package grammarcomp
package generators
import grammar._
import grammar.CFGrammar._
import grammar.utils._
import scala.collection.mutable.ListBuffer
object RandomAccessGeneratorUtil {
/**
* TODO: this can be more efficiently implemented using a
* backward shortest path
*/
def basecaseOfNonterminals[T](g: Grammar[T]): Map[Nonterminal, Rule[T]] = {
var basecase = Map[Nonterminal, Rule[T]]()
var continue = true
while (continue) {
continue = false
//in every step find a non-terminal that has a productions containing only
//non-terminals for which the base case is known
g.nontermToRules.foreach {
case (nt, rules) if (!basecase.contains(nt)) =>
val baseRule = rules.find(_.rightSide.forall {
case t: Terminal[T] => true
case nt: Nonterminal =>
basecase.contains(nt)
})
if (baseRule.isDefined) {
basecase += (nt -> baseRule.get)
continue = true
}
case _ =>
; //skip this non-terminal
}
}
basecase
}
/**
* Requires the grammar to not
* have unproductive symbols
*/
def minWords[T](g: Grammar[T]) = {
var minwordMap = Map[Symbol[T], Word[T]]()
var visited = Set[Symbol[T]]() // this will only have non-terminals
def dfsRec(nt: Nonterminal): Word[T] = {
//ignore everything that is visited but does not have a minwordMap
val rhsMinwords = g.nontermToRules(nt).collect {
case Rule(_, rhs) if rhs.forall(s => !visited(s) ||
minwordMap.contains(s)) =>
rhs.flatMap {
case t: Terminal[T] =>
List(t)
case nt: Nonterminal if visited(nt) =>
minwordMap(nt)
case nt: Nonterminal =>
visited += nt
dfsRec(nt)
}
}
if (rhsMinwords.isEmpty)
throw new IllegalStateException("Grammar has unproductive symbols: " + nt)
else {
val minNTword = rhsMinwords.minBy(_.size)
minwordMap += (nt -> minNTword)
minNTword
}
}
var unexploredNts = g.nonTerminals
while(!unexploredNts.isEmpty) {
val nt = unexploredNts.head
visited += nt
dfsRec(nt)
unexploredNts = unexploredNts.filterNot(visited.contains _)
}
minwordMap
}
val one = BigInt(1)
val zero = BigInt(0)
def wordCountOfNonterminals[T](g: Grammar[T]): Map[Nonterminal, BigInt] = {
var wordCount = Map[Nonterminal, BigInt]()
var continue = true
while (continue) {
continue = false
//in every step find a non-terminal that has only productions containing
//non-terminals for which a word count is known.
//The wordcount is computed as the sum of the word count of all of its productions
g.nontermToRules.foreach {
case (nt, rules) if (!wordCount.contains(nt)) =>
if (rules.forall(_.rightSide.forall {
case t: Terminal[T] => true
case rnt: Nonterminal =>
wordCount.contains(rnt)
})) {
wordCount += (nt -> rules.map(_.rightSide.map {
case t: Terminal[T] => one
case nt: Nonterminal => wordCount(nt)
}.product).sum)
continue = true
}
case _ =>
; //skip this non-terminal
}
}
wordCount
}
/**
* Number of words that could generated by the non-terminal of the given 'size'.
* Note that this number is always bounded if epsilons are also considered as terminals.
* Note: in this class 'Int' data types are for word sizes and 'BigInt' for number of words.
*/
class WordCounter[T](g: Grammar[T], size: Int) {
val nontermToIndex = g.nontermsInPostOrder.zipWithIndex.toMap
val indexToNonterms = nontermToIndex.map { case (nt, i) => (i, nt) }.toMap
val N = g.nonTerminals.size
private var wordCount = Array.fill[BigInt](N, size)(zero) //this will fill everything with zero
def symCount(nt: Nonterminal, m: Int) = {
if (m <= 0) zero
else
wordCount(nontermToIndex(nt))(m - 1)
}
//only for sizes > cacheBeginSize we start caching
//val cacheBeginSize = 1
var splitsCache = Map[(Rule[T], Int), List[(Int, BigInt)]]()
def possibleSplitsForRule(rl: Rule[T], m: Int): List[(Int, BigInt)] = {
//println("Rule: "+rl)
//assume that the right-side has at most two non-terminals
//TODO: extend this later to multiple ones.
val nonterms = nontermsInRightSide(rl)
nonterms match {
case List() =>
//no non-terms implies only one word is possible and it should be equal to the size of 'rhs'
val rhsSize = rl.rightSide.size
if (m == rhsSize)
List((m, one))
else if (rhsSize == 0 && m == 1) //handle the epsilon special case
List((m, one))
else List()
case List(nt) =>
//here, there are 'rl.rightSide.size - 1' terminals and only one non-terminal
val ntsize = m - (rl.rightSide.size - 1)
val sc = symCount(nt, ntsize)
// /println("Nonterm, ntindex, m, ntsize: "+(nt, nontermToIndex(nt), m, ntsize)+" sc: "+sc)
if (sc > 0)
List((ntsize, sc))
else List()
case List(nt1, nt2) =>
//here, there are two nonterminals 'rl.rightSide.size - 2' terminals
val ntsize = m - (rl.rightSide.size - 2)
def computeSplits = {
var splits = List[(Int, BigInt)]()
for (j <- 1 to ntsize - 1) {
val sc1 = symCount(nt1, j)
if (sc1 > 0) {
val sc2 = symCount(nt2, ntsize - j)
if (sc2 > 0)
splits = (j, (sc1 * sc2)) +: splits
}
}
//sort the splits based on 'bounds'
splits.sortBy(_._2)
}
//if (ntsize >= cacheBeginSize) {
if (splitsCache.contains((rl, ntsize)))
splitsCache((rl, ntsize))
else {
val splits = computeSplits
splitsCache += ((rl, ntsize) -> splits)
splits
}
/*} else
computeSplits*/
}
}
def ruleCountForSize(rl: Rule[T], m: Int): BigInt = {
val cnt = possibleSplitsForRule(rl, m).map(_._2).sum
// val list = List(Nonterminal("expression1"), Nonterminal("expressionErr2"),
// Nonterminal("N-3067"),Nonterminal("N-3068"),Nonterminal("N-1227"),Nonterminal("N-1228"))
// if(m == 50 && list.contains(rl.leftSide)){
// //if(rl.rightSide == List(expr1,t,expr1) || rl.rightSide == List(expr2,t,Nonterminal("primary2")))
// println("Rule: "+rl+" count: "+cnt)
// }
cnt
}
//check if there is no unit production
val unitRules = g.rules.collect { case rl @ Rule(_, List(_: Nonterminal)) => rl }
if (!unitRules.isEmpty)
throw new IllegalStateException("There are unit productions: " + unitRules.mkString("\n"))
//initialize the word counts here
//the complexity of the procedure is O(size^2 * |G|)
for (m <- 1 to size) {
//compute A[m] for every non-terminal 'A'
//using post-order to traverse non-terminals
for (i <- 0 until N) {
val nt = indexToNonterms(i)
val count = g.nontermToRules(nt).map { rl => ruleCountForSize(rl, m) }.sum
//add this to the wordCount array
wordCount(i)(m - 1) = count
}
}
//for debugging
//print the array for debugging
/*println("Word Count Array:")
var i = -1
val str = wordCount.map { clmn =>
i += 1
indexToNonterms(i) + " " + clmn.mkString(" ")
}.mkString("\n")
println(str)*/
//System.exit(0)
/**
* procedures for looking up bounds
*/
def boundForNonterminal(nt: Nonterminal, m: Int): BigInt = {
require(m <= size)
val index = nontermToIndex(nt)
wordCount(index)(m - 1)
}
/**
* The rules are sorted by bounds.
* Using a cache for efficiency. The cache in some sense is an
* expanded version of the grammar. However, the cache could be cleaned up
* or can store only the recently used ones.
*/
var ntrulesCache = Map[(Nonterminal, Int), List[(Rule[T], BigInt)]]()
def rulesForNonterminal(nt: Nonterminal, m: Int) = {
if (ntrulesCache.contains((nt, m)))
ntrulesCache((nt, m))
else {
val rulesWithWords = g.nontermToRules(nt).map { rl =>
(rl, ruleCountForSize(rl, m))
}.filter(_._2 > 0)
val rules = rulesWithWords.sortBy(_._2)
ntrulesCache += ((nt, m) -> rules)
rules
}
}
}
def firstMismatchSize[T](g1: Grammar[T], g2: Grammar[T], maxSize: Int): Option[Int] = {
val wc1 = new WordCounter(g1, maxSize)
val wc2 = new WordCounter(g2, maxSize)
var foundSize: Option[Int] = None
for (size <- 1 to maxSize) if (!foundSize.isDefined) {
val dsize1 = wc1.boundForNonterminal(g1.start, size)
val dsize2 = wc2.boundForNonterminal(g2.start, size)
if (dsize1 != dsize2) {
foundSize = Some(size)
}
}
foundSize
}
} | epfl-lara/GrammarComparison | src/main/scala/grammarcomp/generators/GrammarCounting.scala | Scala | mit | 9,330 |
package com.prezi.haskell.gradle.systests
import java.io.{BufferedReader, InputStream, InputStreamReader}
class StreamToStdout(stream: InputStream) extends Thread {
override def run(): Unit = {
val reader = new BufferedReader(new InputStreamReader(stream))
try {
var line: String = reader.readLine()
while (line != null) {
println(line)
line = reader.readLine()
}
}
finally {
reader.close()
}
}
}
object StreamToStdout {
def apply(stream: InputStream): Unit = {
new StreamToStdout(stream).run()
}
} | prezi/gradle-haskell-plugin | src/systest/scala/com/prezi/haskell/gradle/systests/StreamToStdout.scala | Scala | apache-2.0 | 573 |
/*
* Copyright 2015 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend.slicktest
import com.github.dnvriend.CoffeeRepository._
import com.github.dnvriend.TestSpec
import com.github.dnvriend.UserRepository._
class InsertTest extends TestSpec {
import profile.api._
import coffeeRepository._
import userRepository._
/**
* Inserts are done based on a projection of columns from a single table. When you use the table directly,
* the insert is performed against its * projection. Omitting some of a table’s columns when inserting causes
* the database to use the default values specified in the table definition, or a type-specific default in case
* no explicit default was given.
*/
"Inserting Coffee" should "insert a single row" in {
db.run(CoffeeTable.length.result).futureValue shouldBe 5
// += gives you a count of the number of affected rows (which will usually be 1)
db.run(CoffeeTable += CoffeeTableRow("Colombian_Extra_Decaf", 101, 7.99, 0, 0)).futureValue shouldBe 1
db.run(CoffeeTable.length.result).futureValue shouldBe 6
}
it should "insert multiple rows" in {
val insertAction = CoffeeTable ++= Seq(
CoffeeTableRow("Italic_Roast", 49, 8.99, 0, 0),
CoffeeTableRow("Spanish_Espresso", 150, 9.99, 0, 0)
)
db.run(CoffeeTable.length.result).futureValue shouldBe 5
// ++= gives an accumulated count in an Option (which can be None if the database system
// does not provide counts for all rows)
db.run(insertAction).futureValue // could or could not be defined...
db.run(CoffeeTable.length.result).futureValue shouldBe 7
}
it should "get the auto-generated primary key for a user" in {
val userIdAction = (UserTable returning UserTable.map(_.id)) += UserTableRow(None, "Stefan", "Zeiger")
// the returning method where you specify the columns to be returned
// (as a single value or tuple from += and a Seq of such values from ++=)
val id = db.run(userIdAction).futureValue
id shouldBe an[java.lang.Integer]
db.run(UserTable.filter(_.id === id).result.headOption).futureValue shouldBe 'defined
}
it should "map the auto-generated primary key for a user into the case class" in {
// You can follow the returning method with the into method to map the inserted values and
// the generated keys (specified in returning) to a desired value. Here is an example of
// using this feature to return an object with an updated id.
val userWithIdAction =
(UserTable returning UserTable.map(_.id)
into ((user, id) => user.copy(id = Option(id)))) += UserTableRow(None, "Stefan", "Zeiger") // don't you like functional style :)
db.run(userWithIdAction).futureValue should matchPattern {
case UserTableRow(Some(id), "Stefan", "Zeiger") if id > 4 =>
}
}
}
| dnvriend/slick3-test | src/test/scala/com/github/dnvriend/slicktest/InsertTest.scala | Scala | apache-2.0 | 3,375 |
package com.github.j5ik2o.forseti.adaptor.handler.flow.code
import java.net.URI
import java.time.ZonedDateTime
import com.github.j5ik2o.forseti.adaptor.generator.{IdGenerator, TokenGenerator}
import com.github.j5ik2o.forseti.domain._
import com.github.j5ik2o.forseti.domain.auhtorizationCode.{
AuthorizationCode,
AuthorizationCodeId,
AuthorizationCodeWriter
}
import com.github.j5ik2o.forseti.domain.client.ClientId
import com.github.j5ik2o.forseti.domain.exception.{OAuthException, ServerException}
import com.github.j5ik2o.forseti.domain.pkce.CodeChallengeWithMethodType
import com.github.j5ik2o.forseti.domain.user.UserId
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scalaz._
import scalaz.std.scalaFuture._
trait AuthorizationCodeRequestSupport extends LazyLogging {
protected val authorizationCodeWriter: AuthorizationCodeWriter
protected val idGenerator: IdGenerator
protected val codeBitLength: Int = 512
protected val codeGenerator: TokenGenerator = TokenGenerator.ofDefault(codeBitLength)
protected val expiresInOfCode: FiniteDuration = 10 minutes
protected def storeAuthorizationCode(authorizationCode: AuthorizationCode)(
implicit ec: ExecutionContext
): EitherT[Future, OAuthException, Unit] = {
authorizationCodeWriter.store(authorizationCode).leftMap[OAuthException] { ex: Exception =>
logger.error("Occurred error", ex)
new ServerException(Maybe.just(ex.getMessage))
}
}
protected def generateAuthorizationCode(
clientId: ClientId,
userId: Maybe[UserId],
redirectUri: Maybe[URI],
scope: Scope,
nonce: Maybe[String],
codeChallengeWithMethodType: Maybe[CodeChallengeWithMethodType]
)(implicit ec: ExecutionContext): EitherT[Future, OAuthException, AuthorizationCode] = {
for {
token <- codeGenerator.generateToken.leftMap[OAuthException] { ex =>
logger.error("Occurred error", ex)
new ServerException(Maybe.just("Occurred internal error"))
}
now = ZonedDateTime.now()
id <- idGenerator.generateId[AuthorizationCodeId].leftMap[OAuthException] { ex =>
logger.error("Occurred error", ex)
new ServerException(Maybe.just("Occurred internal error"))
}
entity = AuthorizationCode(
id,
clientId = clientId,
userId = userId,
value = token,
redirectUri = redirectUri,
scope = scope,
expiresIn = expiresInOfCode,
nonce = nonce,
codeChallengeWithMethodType = codeChallengeWithMethodType,
now,
now
)
} yield entity
}
protected def generateAndStoreAuthorizationCode(
clientId: ClientId,
userId: Maybe[UserId],
redirectUri: Maybe[URI],
scope: Scope,
nonce: Maybe[String],
codeChallengeWithMethodType: Maybe[CodeChallengeWithMethodType]
)(implicit ec: ExecutionContext): EitherT[Future, OAuthException, AuthorizationCode] = {
for {
authorizationCode <- generateAuthorizationCode(
clientId,
userId,
redirectUri,
scope,
nonce,
codeChallengeWithMethodType
)
_ <- storeAuthorizationCode(authorizationCode)
} yield authorizationCode
}
}
| j5ik2o/forseti | server/server-use-case-port/src/main/scala/com/github/j5ik2o/forseti/adaptor/handler/flow/code/AuthorizationCodeRequestSupport.scala | Scala | mit | 3,311 |
package truerss.util
import truerss.dto.SourceViewDto
import scala.xml.Utility
object OpmlBuilder {
// util variables
private val exportText = "Newsfeeds exported from TrueRSS"
def build(sources: Iterable[SourceViewDto]): String = {
val outlines = sources.map { source =>
s"""<outline type="rss" text="${e(source.name)}" title="${e(source.name)}" xmlUrl="${e(source.url)}"></outline>"""
}.mkString("\\n")
s"""|<?xml version="1.0"?>
|<opml version="1.0">
|<head>TrueRSS Feed List Export</head>
|<body>
|<outline title="$exportText" text="$exportText" description="$exportText" type="folder">
|$outlines
|</outline>
|</body>
|</opml>
""".stripMargin
}
private def e(x: String): String = Utility.escape(x)
}
| truerss/truerss | src/main/scala/truerss/util/OpmlBuilder.scala | Scala | mit | 827 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.io._
import java.nio._
import java.nio.channels._
import java.nio.charset.{Charset, StandardCharsets}
import java.nio.file.{Files, StandardOpenOption}
import java.security.cert.X509Certificate
import java.time.Duration
import java.util.{Arrays, Collections, Properties}
import java.util.concurrent.{Callable, ExecutionException, Executors, TimeUnit}
import javax.net.ssl.X509TrustManager
import kafka.api._
import kafka.cluster.{Broker, EndPoint}
import kafka.log._
import kafka.security.auth.{Acl, Authorizer => LegacyAuthorizer, Resource}
import kafka.server._
import kafka.server.checkpoints.OffsetCheckpointFile
import com.yammer.metrics.core.Meter
import kafka.controller.LeaderIsrAndControllerEpoch
import kafka.metrics.KafkaYammerMetrics
import kafka.utils.Implicits._
import kafka.zk._
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.clients.admin.AlterConfigOp.OpType
import org.apache.kafka.clients.admin._
import org.apache.kafka.clients.consumer._
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.acl.{AccessControlEntry, AccessControlEntryFilter, AclBinding, AclBindingFilter}
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException
import org.apache.kafka.common.header.Header
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.network.{ListenerName, Mode}
import org.apache.kafka.common.record._
import org.apache.kafka.common.resource.ResourcePattern
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, Deserializer, IntegerSerializer, Serializer}
import org.apache.kafka.common.utils.Time
import org.apache.kafka.common.utils.Utils._
import org.apache.kafka.common.{KafkaFuture, TopicPartition}
import org.apache.kafka.server.authorizer.{Authorizer => JAuthorizer}
import org.apache.kafka.test.{TestSslUtils, TestUtils => JTestUtils}
import org.apache.zookeeper.KeeperException.SessionExpiredException
import org.apache.zookeeper.ZooDefs._
import org.apache.zookeeper.data.ACL
import org.junit.Assert._
import org.scalatest.Assertions.fail
import scala.jdk.CollectionConverters._
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.collection.{Map, Seq, mutable}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Await, ExecutionContext, Future}
/**
* Utility functions to help with testing
*/
object TestUtils extends Logging {
val random = JTestUtils.RANDOM
/* 0 gives a random port; you can then retrieve the assigned port from the Socket object. */
val RandomPort = 0
/* Incorrect broker port which can used by kafka clients in tests. This port should not be used
by any other service and hence we use a reserved port. */
val IncorrectBrokerPort = 225
/** Port to use for unit tests that mock/don't require a real ZK server. */
val MockZkPort = 1
/** ZooKeeper connection string to use for unit tests that mock/don't require a real ZK server. */
val MockZkConnect = "127.0.0.1:" + MockZkPort
// CN in SSL certificates - this is used for endpoint validation when enabled
val SslCertificateCn = "localhost"
private val transactionStatusKey = "transactionStatus"
private val committedValue : Array[Byte] = "committed".getBytes(StandardCharsets.UTF_8)
private val abortedValue : Array[Byte] = "aborted".getBytes(StandardCharsets.UTF_8)
/**
* Create a temporary directory
*/
def tempDir(): File = JTestUtils.tempDirectory()
def tempTopic(): String = "testTopic" + random.nextInt(1000000)
/**
* Create a temporary relative directory
*/
def tempRelativeDir(parent: String): File = {
val parentFile = new File(parent)
parentFile.mkdirs()
JTestUtils.tempDirectory(parentFile.toPath, null)
}
/**
* Create a random log directory in the format <string>-<int> used for Kafka partition logs.
* It is the responsibility of the caller to set up a shutdown hook for deletion of the directory.
*/
def randomPartitionLogDir(parentDir: File): File = {
val attempts = 1000
val f = Iterator.continually(new File(parentDir, "kafka-" + random.nextInt(1000000)))
.take(attempts).find(_.mkdir())
.getOrElse(sys.error(s"Failed to create directory after $attempts attempts"))
f.deleteOnExit()
f
}
/**
* Create a temporary file
*/
def tempFile(): File = JTestUtils.tempFile()
/**
* Create a temporary file and return an open file channel for this file
*/
def tempChannel(): FileChannel =
FileChannel.open(tempFile().toPath, StandardOpenOption.READ, StandardOpenOption.WRITE)
/**
* Create a kafka server instance with appropriate test settings
* USING THIS IS A SIGN YOU ARE NOT WRITING A REAL UNIT TEST
*
* @param config The configuration of the server
*/
def createServer(config: KafkaConfig, time: Time = Time.SYSTEM): KafkaServer = {
createServer(config, time, None)
}
def createServer(config: KafkaConfig, threadNamePrefix: Option[String]): KafkaServer = {
createServer(config, Time.SYSTEM, threadNamePrefix)
}
def createServer(config: KafkaConfig, time: Time, threadNamePrefix: Option[String]): KafkaServer = {
val server = new KafkaServer(config, time, threadNamePrefix = threadNamePrefix)
server.startup()
server
}
def boundPort(server: KafkaServer, securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): Int =
server.boundPort(ListenerName.forSecurityProtocol(securityProtocol))
def createBroker(id: Int, host: String, port: Int, securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): Broker =
new Broker(id, host, port, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol)
def createBrokerAndEpoch(id: Int, host: String, port: Int, securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT,
epoch: Long = 0): (Broker, Long) = {
(new Broker(id, host, port, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol), epoch)
}
/**
* Create a test config for the provided parameters.
*
* Note that if `interBrokerSecurityProtocol` is defined, the listener for the `SecurityProtocol` will be enabled.
*/
def createBrokerConfigs(numConfigs: Int,
zkConnect: String,
enableControlledShutdown: Boolean = true,
enableDeleteTopic: Boolean = true,
interBrokerSecurityProtocol: Option[SecurityProtocol] = None,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
enablePlaintext: Boolean = true,
enableSsl: Boolean = false,
enableSaslPlaintext: Boolean = false,
enableSaslSsl: Boolean = false,
rackInfo: Map[Int, String] = Map(),
logDirCount: Int = 1,
enableToken: Boolean = false,
numPartitions: Int = 1,
defaultReplicationFactor: Short = 1): Seq[Properties] = {
(0 until numConfigs).map { node =>
createBrokerConfig(node, zkConnect, enableControlledShutdown, enableDeleteTopic, RandomPort,
interBrokerSecurityProtocol, trustStoreFile, saslProperties, enablePlaintext = enablePlaintext, enableSsl = enableSsl,
enableSaslPlaintext = enableSaslPlaintext, enableSaslSsl = enableSaslSsl, rack = rackInfo.get(node), logDirCount = logDirCount, enableToken = enableToken,
numPartitions = numPartitions, defaultReplicationFactor = defaultReplicationFactor)
}
}
def getBrokerListStrFromServers(servers: Seq[KafkaServer], protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): String = {
servers.map { s =>
val listener = s.config.advertisedListeners.find(_.securityProtocol == protocol).getOrElse(
sys.error(s"Could not find listener with security protocol $protocol"))
formatAddress(listener.host, boundPort(s, protocol))
}.mkString(",")
}
def bootstrapServers(servers: Seq[KafkaServer], listenerName: ListenerName): String = {
servers.map { s =>
val listener = s.config.advertisedListeners.find(_.listenerName == listenerName).getOrElse(
sys.error(s"Could not find listener with name ${listenerName.value}"))
formatAddress(listener.host, s.boundPort(listenerName))
}.mkString(",")
}
/**
* Shutdown `servers` and delete their log directories.
*/
def shutdownServers(servers: Seq[KafkaServer]): Unit = {
import ExecutionContext.Implicits._
val future = Future.traverse(servers) { s =>
Future {
s.shutdown()
CoreUtils.delete(s.config.logDirs)
}
}
Await.result(future, FiniteDuration(5, TimeUnit.MINUTES))
}
/**
* Create a test config for the provided parameters.
*
* Note that if `interBrokerSecurityProtocol` is defined, the listener for the `SecurityProtocol` will be enabled.
*/
def createBrokerConfig(nodeId: Int,
zkConnect: String,
enableControlledShutdown: Boolean = true,
enableDeleteTopic: Boolean = true,
port: Int = RandomPort,
interBrokerSecurityProtocol: Option[SecurityProtocol] = None,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
enablePlaintext: Boolean = true,
enableSaslPlaintext: Boolean = false,
saslPlaintextPort: Int = RandomPort,
enableSsl: Boolean = false,
sslPort: Int = RandomPort,
enableSaslSsl: Boolean = false,
saslSslPort: Int = RandomPort,
rack: Option[String] = None,
logDirCount: Int = 1,
enableToken: Boolean = false,
numPartitions: Int = 1,
defaultReplicationFactor: Short = 1): Properties = {
def shouldEnable(protocol: SecurityProtocol) = interBrokerSecurityProtocol.fold(false)(_ == protocol)
val protocolAndPorts = ArrayBuffer[(SecurityProtocol, Int)]()
if (enablePlaintext || shouldEnable(SecurityProtocol.PLAINTEXT))
protocolAndPorts += SecurityProtocol.PLAINTEXT -> port
if (enableSsl || shouldEnable(SecurityProtocol.SSL))
protocolAndPorts += SecurityProtocol.SSL -> sslPort
if (enableSaslPlaintext || shouldEnable(SecurityProtocol.SASL_PLAINTEXT))
protocolAndPorts += SecurityProtocol.SASL_PLAINTEXT -> saslPlaintextPort
if (enableSaslSsl || shouldEnable(SecurityProtocol.SASL_SSL))
protocolAndPorts += SecurityProtocol.SASL_SSL -> saslSslPort
val listeners = protocolAndPorts.map { case (protocol, port) =>
s"${protocol.name}://localhost:$port"
}.mkString(",")
val props = new Properties
if (nodeId >= 0) props.put(KafkaConfig.BrokerIdProp, nodeId.toString)
props.put(KafkaConfig.ListenersProp, listeners)
if (logDirCount > 1) {
val logDirs = (1 to logDirCount).toList.map(i =>
// We would like to allow user to specify both relative path and absolute path as log directory for backward-compatibility reason
// We can verify this by using a mixture of relative path and absolute path as log directories in the test
if (i % 2 == 0) TestUtils.tempDir().getAbsolutePath else TestUtils.tempRelativeDir("data")
).mkString(",")
props.put(KafkaConfig.LogDirsProp, logDirs)
} else {
props.put(KafkaConfig.LogDirProp, TestUtils.tempDir().getAbsolutePath)
}
props.put(KafkaConfig.ZkConnectProp, zkConnect)
props.put(KafkaConfig.ZkConnectionTimeoutMsProp, "10000")
props.put(KafkaConfig.ReplicaSocketTimeoutMsProp, "1500")
props.put(KafkaConfig.ControllerSocketTimeoutMsProp, "1500")
props.put(KafkaConfig.ControlledShutdownEnableProp, enableControlledShutdown.toString)
props.put(KafkaConfig.DeleteTopicEnableProp, enableDeleteTopic.toString)
props.put(KafkaConfig.LogDeleteDelayMsProp, "1000")
props.put(KafkaConfig.ControlledShutdownRetryBackoffMsProp, "100")
props.put(KafkaConfig.LogCleanerDedupeBufferSizeProp, "2097152")
props.put(KafkaConfig.LogMessageTimestampDifferenceMaxMsProp, Long.MaxValue.toString)
props.put(KafkaConfig.OffsetsTopicReplicationFactorProp, "1")
if (!props.containsKey(KafkaConfig.OffsetsTopicPartitionsProp))
props.put(KafkaConfig.OffsetsTopicPartitionsProp, "5")
if (!props.containsKey(KafkaConfig.GroupInitialRebalanceDelayMsProp))
props.put(KafkaConfig.GroupInitialRebalanceDelayMsProp, "0")
rack.foreach(props.put(KafkaConfig.RackProp, _))
if (protocolAndPorts.exists { case (protocol, _) => usesSslTransportLayer(protocol) })
props ++= sslConfigs(Mode.SERVER, false, trustStoreFile, s"server$nodeId")
if (protocolAndPorts.exists { case (protocol, _) => usesSaslAuthentication(protocol) })
props ++= JaasTestUtils.saslConfigs(saslProperties)
interBrokerSecurityProtocol.foreach { protocol =>
props.put(KafkaConfig.InterBrokerSecurityProtocolProp, protocol.name)
}
if (enableToken)
props.put(KafkaConfig.DelegationTokenMasterKeyProp, "masterkey")
props.put(KafkaConfig.NumPartitionsProp, numPartitions.toString)
props.put(KafkaConfig.DefaultReplicationFactorProp, defaultReplicationFactor.toString)
props
}
/**
* Create a topic in ZooKeeper.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(zkClient: KafkaZkClient,
topic: String,
numPartitions: Int = 1,
replicationFactor: Int = 1,
servers: Seq[KafkaServer],
topicConfig: Properties = new Properties): scala.collection.immutable.Map[Int, Int] = {
val adminZkClient = new AdminZkClient(zkClient)
// create topic
waitUntilTrue( () => {
var hasSessionExpirationException = false
try {
adminZkClient.createTopic(topic, numPartitions, replicationFactor, topicConfig)
} catch {
case _: SessionExpiredException => hasSessionExpirationException = true
case e: Throwable => throw e // let other exceptions propagate
}
!hasSessionExpirationException},
s"Can't create topic $topic")
// wait until the update metadata request for new topic reaches all servers
(0 until numPartitions).map { i =>
TestUtils.waitUntilMetadataIsPropagated(servers, topic, i)
i -> TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, i)
}.toMap
}
/**
* Create a topic in ZooKeeper using a customized replica assignment.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(zkClient: KafkaZkClient,
topic: String,
partitionReplicaAssignment: collection.Map[Int, Seq[Int]],
servers: Seq[KafkaServer]): scala.collection.immutable.Map[Int, Int] = {
createTopic(zkClient, topic, partitionReplicaAssignment, servers, new Properties())
}
/**
* Create a topic in ZooKeeper using a customized replica assignment.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(zkClient: KafkaZkClient,
topic: String,
partitionReplicaAssignment: collection.Map[Int, Seq[Int]],
servers: Seq[KafkaServer],
topicConfig: Properties): scala.collection.immutable.Map[Int, Int] = {
val adminZkClient = new AdminZkClient(zkClient)
// create topic
waitUntilTrue( () => {
var hasSessionExpirationException = false
try {
adminZkClient.createTopicWithAssignment(topic, topicConfig, partitionReplicaAssignment)
} catch {
case _: SessionExpiredException => hasSessionExpirationException = true
case e: Throwable => throw e // let other exceptions propagate
}
!hasSessionExpirationException},
s"Can't create topic $topic")
// wait until the update metadata request for new topic reaches all servers
partitionReplicaAssignment.keySet.map { i =>
TestUtils.waitUntilMetadataIsPropagated(servers, topic, i)
i -> TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, i)
}.toMap
}
/**
* Create the consumer offsets/group metadata topic and wait until the leader is elected and metadata is propagated
* to all brokers.
*/
def createOffsetsTopic(zkClient: KafkaZkClient, servers: Seq[KafkaServer]): Unit = {
val server = servers.head
createTopic(zkClient, Topic.GROUP_METADATA_TOPIC_NAME,
server.config.getInt(KafkaConfig.OffsetsTopicPartitionsProp),
server.config.getShort(KafkaConfig.OffsetsTopicReplicationFactorProp).toInt,
servers,
server.groupCoordinator.offsetsTopicConfigs)
}
/**
* Wrap a single record log buffer.
*/
def singletonRecords(value: Array[Byte],
key: Array[Byte] = null,
codec: CompressionType = CompressionType.NONE,
timestamp: Long = RecordBatch.NO_TIMESTAMP,
magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = {
records(Seq(new SimpleRecord(timestamp, key, value)), magicValue = magicValue, codec = codec)
}
def recordsWithValues(magicValue: Byte,
codec: CompressionType,
values: Array[Byte]*): MemoryRecords = {
records(values.map(value => new SimpleRecord(value)), magicValue, codec)
}
def records(records: Iterable[SimpleRecord],
magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE,
codec: CompressionType = CompressionType.NONE,
producerId: Long = RecordBatch.NO_PRODUCER_ID,
producerEpoch: Short = RecordBatch.NO_PRODUCER_EPOCH,
sequence: Int = RecordBatch.NO_SEQUENCE,
baseOffset: Long = 0L,
partitionLeaderEpoch: Int = RecordBatch.NO_PARTITION_LEADER_EPOCH): MemoryRecords = {
val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava))
val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, baseOffset,
System.currentTimeMillis, producerId, producerEpoch, sequence, false, partitionLeaderEpoch)
records.foreach(builder.append)
builder.build()
}
/**
* Generate an array of random bytes
*
* @param numBytes The size of the array
*/
def randomBytes(numBytes: Int): Array[Byte] = JTestUtils.randomBytes(numBytes)
/**
* Generate a random string of letters and digits of the given length
*
* @param len The length of the string
* @return The random string
*/
def randomString(len: Int): String = JTestUtils.randomString(len)
/**
* Check that the buffer content from buffer.position() to buffer.limit() is equal
*/
def checkEquals(b1: ByteBuffer, b2: ByteBuffer): Unit = {
assertEquals("Buffers should have equal length", b1.limit() - b1.position(), b2.limit() - b2.position())
for(i <- 0 until b1.limit() - b1.position())
assertEquals("byte " + i + " byte not equal.", b1.get(b1.position() + i), b2.get(b1.position() + i))
}
/**
* Throw an exception if the two iterators are of differing lengths or contain
* different messages on their Nth element
*/
def checkEquals[T](expected: Iterator[T], actual: Iterator[T]): Unit = {
var length = 0
while(expected.hasNext && actual.hasNext) {
length += 1
assertEquals(expected.next, actual.next)
}
// check if the expected iterator is longer
if (expected.hasNext) {
var length1 = length
while (expected.hasNext) {
expected.next
length1 += 1
}
assertFalse("Iterators have uneven length-- first has more: "+length1 + " > " + length, true)
}
// check if the actual iterator was longer
if (actual.hasNext) {
var length2 = length
while (actual.hasNext) {
actual.next
length2 += 1
}
assertFalse("Iterators have uneven length-- second has more: "+length2 + " > " + length, true)
}
}
/**
* Throw an exception if an iterable has different length than expected
*
*/
def checkLength[T](s1: Iterator[T], expectedLength:Int): Unit = {
var n = 0
while (s1.hasNext) {
n+=1
s1.next
}
assertEquals(expectedLength, n)
}
/**
* Throw an exception if the two iterators are of differing lengths or contain
* different messages on their Nth element
*/
def checkEquals[T](s1: java.util.Iterator[T], s2: java.util.Iterator[T]): Unit = {
while(s1.hasNext && s2.hasNext)
assertEquals(s1.next, s2.next)
assertFalse("Iterators have uneven length--first has more", s1.hasNext)
assertFalse("Iterators have uneven length--second has more", s2.hasNext)
}
def stackedIterator[T](s: Iterator[T]*): Iterator[T] = {
new Iterator[T] {
var cur: Iterator[T] = null
val topIterator = s.iterator
def hasNext: Boolean = {
while (true) {
if (cur == null) {
if (topIterator.hasNext)
cur = topIterator.next
else
return false
}
if (cur.hasNext)
return true
cur = null
}
// should never reach her
throw new RuntimeException("should not reach here")
}
def next() : T = cur.next
}
}
/**
* Create a hexadecimal string for the given bytes
*/
def hexString(bytes: Array[Byte]): String = hexString(ByteBuffer.wrap(bytes))
/**
* Create a hexadecimal string for the given bytes
*/
def hexString(buffer: ByteBuffer): String = {
val builder = new StringBuilder("0x")
for(i <- 0 until buffer.limit())
builder.append(String.format("%x", Integer.valueOf(buffer.get(buffer.position() + i))))
builder.toString
}
def securityConfigs(mode: Mode,
securityProtocol: SecurityProtocol,
trustStoreFile: Option[File],
certAlias: String,
certCn: String,
saslProperties: Option[Properties],
tlsProtocol: String = TestSslUtils.DEFAULT_TLS_PROTOCOL_FOR_TESTS): Properties = {
val props = new Properties
if (usesSslTransportLayer(securityProtocol))
props ++= sslConfigs(mode, securityProtocol == SecurityProtocol.SSL, trustStoreFile, certAlias, certCn, tlsProtocol)
if (usesSaslAuthentication(securityProtocol))
props ++= JaasTestUtils.saslConfigs(saslProperties)
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, securityProtocol.name)
props
}
def producerSecurityConfigs(securityProtocol: SecurityProtocol,
trustStoreFile: Option[File],
saslProperties: Option[Properties]): Properties =
securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, "producer", SslCertificateCn, saslProperties)
/**
* Create a (new) producer with a few pre-configured properties.
*/
def createProducer[K, V](brokerList: String,
acks: Int = -1,
maxBlockMs: Long = 60 * 1000L,
bufferSize: Long = 1024L * 1024L,
retries: Int = Int.MaxValue,
deliveryTimeoutMs: Int = 30 * 1000,
lingerMs: Int = 0,
batchSize: Int = 16384,
compressionType: String = "none",
requestTimeoutMs: Int = 20 * 1000,
securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
keySerializer: Serializer[K] = new ByteArraySerializer,
valueSerializer: Serializer[V] = new ByteArraySerializer,
enableIdempotence: Boolean = false): KafkaProducer[K, V] = {
val producerProps = new Properties
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
producerProps.put(ProducerConfig.ACKS_CONFIG, acks.toString)
producerProps.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxBlockMs.toString)
producerProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferSize.toString)
producerProps.put(ProducerConfig.RETRIES_CONFIG, retries.toString)
producerProps.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, deliveryTimeoutMs.toString)
producerProps.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs.toString)
producerProps.put(ProducerConfig.LINGER_MS_CONFIG, lingerMs.toString)
producerProps.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize.toString)
producerProps.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType)
producerProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, enableIdempotence.toString)
producerProps ++= producerSecurityConfigs(securityProtocol, trustStoreFile, saslProperties)
new KafkaProducer[K, V](producerProps, keySerializer, valueSerializer)
}
def usesSslTransportLayer(securityProtocol: SecurityProtocol): Boolean = securityProtocol match {
case SecurityProtocol.SSL | SecurityProtocol.SASL_SSL => true
case _ => false
}
def usesSaslAuthentication(securityProtocol: SecurityProtocol): Boolean = securityProtocol match {
case SecurityProtocol.SASL_PLAINTEXT | SecurityProtocol.SASL_SSL => true
case _ => false
}
def consumerSecurityConfigs(securityProtocol: SecurityProtocol, trustStoreFile: Option[File], saslProperties: Option[Properties]): Properties =
securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, "consumer", SslCertificateCn, saslProperties)
def adminClientSecurityConfigs(securityProtocol: SecurityProtocol, trustStoreFile: Option[File], saslProperties: Option[Properties]): Properties =
securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, "admin-client", SslCertificateCn, saslProperties)
/**
* Create a consumer with a few pre-configured properties.
*/
def createConsumer[K, V](brokerList: String,
groupId: String = "group",
autoOffsetReset: String = "earliest",
enableAutoCommit: Boolean = true,
readCommitted: Boolean = false,
maxPollRecords: Int = 500,
securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
keyDeserializer: Deserializer[K] = new ByteArrayDeserializer,
valueDeserializer: Deserializer[V] = new ByteArrayDeserializer): KafkaConsumer[K, V] = {
val consumerProps = new Properties
consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset)
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId)
consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit.toString)
consumerProps.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords.toString)
consumerProps.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, if (readCommitted) "read_committed" else "read_uncommitted")
consumerProps ++= consumerSecurityConfigs(securityProtocol, trustStoreFile, saslProperties)
new KafkaConsumer[K, V](consumerProps, keyDeserializer, valueDeserializer)
}
def createBrokersInZk(zkClient: KafkaZkClient, ids: Seq[Int]): Seq[Broker] =
createBrokersInZk(ids.map(kafka.admin.BrokerMetadata(_, None)), zkClient)
def createBrokersInZk(brokerMetadatas: Seq[kafka.admin.BrokerMetadata], zkClient: KafkaZkClient): Seq[Broker] = {
zkClient.makeSurePersistentPathExists(BrokerIdsZNode.path)
val brokers = brokerMetadatas.map { b =>
val protocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(protocol)
Broker(b.id, Seq(EndPoint("localhost", 6667, listenerName, protocol)), b.rack)
}
brokers.foreach(b => zkClient.registerBroker(BrokerInfo(Broker(b.id, b.endPoints, rack = b.rack),
ApiVersion.latestVersion, jmxPort = -1)))
brokers
}
def deleteBrokersInZk(zkClient: KafkaZkClient, ids: Seq[Int]): Seq[Broker] = {
val brokers = ids.map(createBroker(_, "localhost", 6667, SecurityProtocol.PLAINTEXT))
ids.foreach(b => zkClient.deletePath(BrokerIdsZNode.path + "/" + b))
brokers
}
def getMsgStrings(n: Int): Seq[String] = {
val buffer = new ListBuffer[String]
for (i <- 0 until n)
buffer += ("msg" + i)
buffer
}
def makeLeaderForPartition(zkClient: KafkaZkClient,
topic: String,
leaderPerPartitionMap: scala.collection.immutable.Map[Int, Int],
controllerEpoch: Int): Unit = {
val newLeaderIsrAndControllerEpochs = leaderPerPartitionMap.map { case (partition, leader) =>
val topicPartition = new TopicPartition(topic, partition)
val newLeaderAndIsr = zkClient.getTopicPartitionState(topicPartition)
.map(_.leaderAndIsr.newLeader(leader))
.getOrElse(LeaderAndIsr(leader, List(leader)))
topicPartition -> LeaderIsrAndControllerEpoch(newLeaderAndIsr, controllerEpoch)
}
zkClient.setTopicPartitionStatesRaw(newLeaderIsrAndControllerEpochs, ZkVersion.MatchAnyVersion)
}
/**
* If neither oldLeaderOpt nor newLeaderOpt is defined, wait until the leader of a partition is elected.
* If oldLeaderOpt is defined, it waits until the new leader is different from the old leader.
* If newLeaderOpt is defined, it waits until the new leader becomes the expected new leader.
*
* @return The new leader (note that negative values are used to indicate conditions like NoLeader and
* LeaderDuringDelete).
* @throws AssertionError if the expected condition is not true within the timeout.
*/
def waitUntilLeaderIsElectedOrChanged(zkClient: KafkaZkClient, topic: String, partition: Int, timeoutMs: Long = 30000L,
oldLeaderOpt: Option[Int] = None, newLeaderOpt: Option[Int] = None): Int = {
require(!(oldLeaderOpt.isDefined && newLeaderOpt.isDefined), "Can't define both the old and the new leader")
val startTime = System.currentTimeMillis()
val topicPartition = new TopicPartition(topic, partition)
trace(s"Waiting for leader to be elected or changed for partition $topicPartition, old leader is $oldLeaderOpt, " +
s"new leader is $newLeaderOpt")
var leader: Option[Int] = None
var electedOrChangedLeader: Option[Int] = None
while (electedOrChangedLeader.isEmpty && System.currentTimeMillis() < startTime + timeoutMs) {
// check if leader is elected
leader = zkClient.getLeaderForPartition(topicPartition)
leader match {
case Some(l) => (newLeaderOpt, oldLeaderOpt) match {
case (Some(newLeader), _) if newLeader == l =>
trace(s"Expected new leader $l is elected for partition $topicPartition")
electedOrChangedLeader = leader
case (_, Some(oldLeader)) if oldLeader != l =>
trace(s"Leader for partition $topicPartition is changed from $oldLeader to $l")
electedOrChangedLeader = leader
case (None, None) =>
trace(s"Leader $l is elected for partition $topicPartition")
electedOrChangedLeader = leader
case _ =>
trace(s"Current leader for partition $topicPartition is $l")
}
case None =>
trace(s"Leader for partition $topicPartition is not elected yet")
}
Thread.sleep(math.min(timeoutMs, 100L))
}
electedOrChangedLeader.getOrElse {
val errorMessage = (newLeaderOpt, oldLeaderOpt) match {
case (Some(newLeader), _) =>
s"Timing out after $timeoutMs ms since expected new leader $newLeader was not elected for partition $topicPartition, leader is $leader"
case (_, Some(oldLeader)) =>
s"Timing out after $timeoutMs ms since a new leader that is different from $oldLeader was not elected for partition $topicPartition, " +
s"leader is $leader"
case _ =>
s"Timing out after $timeoutMs ms since a leader was not elected for partition $topicPartition"
}
fail(errorMessage)
}
}
/**
* Execute the given block. If it throws an assert error, retry. Repeat
* until no error is thrown or the time limit elapses
*/
def retry(maxWaitMs: Long)(block: => Unit): Unit = {
var wait = 1L
val startTime = System.currentTimeMillis()
while(true) {
try {
block
return
} catch {
case e: AssertionError =>
val elapsed = System.currentTimeMillis - startTime
if (elapsed > maxWaitMs) {
throw e
} else {
info("Attempt failed, sleeping for " + wait + ", and then retrying.")
Thread.sleep(wait)
wait += math.min(wait, 1000)
}
}
}
}
def pollUntilTrue(consumer: Consumer[_, _],
action: () => Boolean,
msg: => String,
waitTimeMs: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = {
waitUntilTrue(() => {
consumer.poll(Duration.ofMillis(100))
action()
}, msg = msg, pause = 0L, waitTimeMs = waitTimeMs)
}
def pollRecordsUntilTrue[K, V](consumer: Consumer[K, V],
action: ConsumerRecords[K, V] => Boolean,
msg: => String,
waitTimeMs: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = {
waitUntilTrue(() => {
val records = consumer.poll(Duration.ofMillis(100))
action(records)
}, msg = msg, pause = 0L, waitTimeMs = waitTimeMs)
}
def subscribeAndWaitForRecords(topic: String,
consumer: KafkaConsumer[Array[Byte], Array[Byte]],
waitTimeMs: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = {
consumer.subscribe(Collections.singletonList(topic))
pollRecordsUntilTrue(
consumer,
(records: ConsumerRecords[Array[Byte], Array[Byte]]) => !records.isEmpty,
"Expected records",
waitTimeMs)
}
/**
* Wait for the presence of an optional value.
*
* @param func The function defining the optional value
* @param msg Error message in the case that the value never appears
* @param waitTimeMs Maximum time to wait
* @return The unwrapped value returned by the function
*/
def awaitValue[T](func: () => Option[T], msg: => String, waitTimeMs: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): T = {
var value: Option[T] = None
waitUntilTrue(() => {
value = func()
value.isDefined
}, msg, waitTimeMs)
value.get
}
/**
* Wait until the given condition is true or throw an exception if the given wait time elapses.
*
* @param condition condition to check
* @param msg error message
* @param waitTimeMs maximum time to wait and retest the condition before failing the test
* @param pause delay between condition checks
*/
def waitUntilTrue(condition: () => Boolean, msg: => String,
waitTimeMs: Long = JTestUtils.DEFAULT_MAX_WAIT_MS, pause: Long = 100L): Unit = {
val startTime = System.currentTimeMillis()
while (true) {
if (condition())
return
if (System.currentTimeMillis() > startTime + waitTimeMs)
fail(msg)
Thread.sleep(waitTimeMs.min(pause))
}
// should never hit here
throw new RuntimeException("unexpected error")
}
/**
* Invoke `compute` until `predicate` is true or `waitTime` elapses.
*
* Return the last `compute` result and a boolean indicating whether `predicate` succeeded for that value.
*
* This method is useful in cases where `waitUntilTrue` makes it awkward to provide good error messages.
*/
def computeUntilTrue[T](compute: => T, waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS, pause: Long = 100L)(
predicate: T => Boolean): (T, Boolean) = {
val startTime = System.currentTimeMillis()
while (true) {
val result = compute
if (predicate(result))
return result -> true
if (System.currentTimeMillis() > startTime + waitTime)
return result -> false
Thread.sleep(waitTime.min(pause))
}
// should never hit here
throw new RuntimeException("unexpected error")
}
def isLeaderLocalOnBroker(topic: String, partitionId: Int, server: KafkaServer): Boolean = {
server.replicaManager.nonOfflinePartition(new TopicPartition(topic, partitionId)).exists(_.leaderLogIfLocal.isDefined)
}
def findLeaderEpoch(brokerId: Int,
topicPartition: TopicPartition,
servers: Iterable[KafkaServer]): Int = {
val leaderServer = servers.find(_.config.brokerId == brokerId)
val leaderPartition = leaderServer.flatMap(_.replicaManager.nonOfflinePartition(topicPartition))
.getOrElse(fail(s"Failed to find expected replica on broker $brokerId"))
leaderPartition.getLeaderEpoch
}
def findFollowerId(topicPartition: TopicPartition,
servers: Iterable[KafkaServer]): Int = {
val followerOpt = servers.find { server =>
server.replicaManager.nonOfflinePartition(topicPartition) match {
case Some(partition) => !partition.leaderReplicaIdOpt.contains(server.config.brokerId)
case None => false
}
}
followerOpt
.map(_.config.brokerId)
.getOrElse(fail(s"Unable to locate follower for $topicPartition"))
}
/**
* Wait until all brokers know about each other.
*
* @param servers The Kafka broker servers.
* @param timeout The amount of time waiting on this condition before assert to fail
*/
def waitUntilBrokerMetadataIsPropagated(servers: Seq[KafkaServer],
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = {
val expectedBrokerIds = servers.map(_.config.brokerId).toSet
waitUntilTrue(() => servers.forall(server =>
expectedBrokerIds == server.dataPlaneRequestProcessor.metadataCache.getAliveBrokers.map(_.id).toSet
), "Timed out waiting for broker metadata to propagate to all servers", timeout)
}
/**
* Wait until a valid leader is propagated to the metadata cache in each broker.
* It assumes that the leader propagated to each broker is the same.
*
* @param servers The list of servers that the metadata should reach to
* @param topic The topic name
* @param partition The partition Id
* @param timeout The amount of time waiting on this condition before assert to fail
* @return The leader of the partition.
*/
def waitUntilMetadataIsPropagated(servers: Seq[KafkaServer], topic: String, partition: Int,
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = {
var leader: Int = -1
waitUntilTrue(
() => servers.forall { server =>
server.dataPlaneRequestProcessor.metadataCache.getPartitionInfo(topic, partition) match {
case Some(partitionState) if Request.isValidBrokerId(partitionState.leader) =>
leader = partitionState.leader
true
case _ => false
}
},
"Partition [%s,%d] metadata not propagated after %d ms".format(topic, partition, timeout),
waitTimeMs = timeout)
leader
}
def waitUntilControllerElected(zkClient: KafkaZkClient, timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = {
val (controllerId, _) = TestUtils.computeUntilTrue(zkClient.getControllerId, waitTime = timeout)(_.isDefined)
controllerId.getOrElse(fail(s"Controller not elected after $timeout ms"))
}
def awaitLeaderChange(servers: Seq[KafkaServer],
tp: TopicPartition,
oldLeader: Int,
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = {
def newLeaderExists: Option[Int] = {
servers.find { server =>
server.config.brokerId != oldLeader &&
server.replicaManager.nonOfflinePartition(tp).exists(_.leaderLogIfLocal.isDefined)
}.map(_.config.brokerId)
}
waitUntilTrue(() => newLeaderExists.isDefined,
s"Did not observe leader change for partition $tp after $timeout ms", waitTimeMs = timeout)
newLeaderExists.get
}
def waitUntilLeaderIsKnown(servers: Seq[KafkaServer],
tp: TopicPartition,
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = {
def leaderIfExists: Option[Int] = {
servers.find { server =>
server.replicaManager.nonOfflinePartition(tp).exists(_.leaderLogIfLocal.isDefined)
}.map(_.config.brokerId)
}
waitUntilTrue(() => leaderIfExists.isDefined,
s"Partition $tp leaders not made yet after $timeout ms", waitTimeMs = timeout)
leaderIfExists.get
}
def writeNonsenseToFile(fileName: File, position: Long, size: Int): Unit = {
val file = new RandomAccessFile(fileName, "rw")
file.seek(position)
for (_ <- 0 until size)
file.writeByte(random.nextInt(255))
file.close()
}
def appendNonsenseToFile(file: File, size: Int): Unit = {
val outputStream = Files.newOutputStream(file.toPath(), StandardOpenOption.APPEND)
try {
for (_ <- 0 until size)
outputStream.write(random.nextInt(255))
} finally outputStream.close()
}
def checkForPhantomInSyncReplicas(zkClient: KafkaZkClient, topic: String, partitionToBeReassigned: Int, assignedReplicas: Seq[Int]): Unit = {
val inSyncReplicas = zkClient.getInSyncReplicasForPartition(new TopicPartition(topic, partitionToBeReassigned))
// in sync replicas should not have any replica that is not in the new assigned replicas
val phantomInSyncReplicas = inSyncReplicas.get.toSet -- assignedReplicas.toSet
assertTrue("All in sync replicas %s must be in the assigned replica list %s".format(inSyncReplicas, assignedReplicas),
phantomInSyncReplicas.isEmpty)
}
def ensureNoUnderReplicatedPartitions(zkClient: KafkaZkClient, topic: String, partitionToBeReassigned: Int, assignedReplicas: Seq[Int],
servers: Seq[KafkaServer]): Unit = {
val topicPartition = new TopicPartition(topic, partitionToBeReassigned)
waitUntilTrue(() => {
val inSyncReplicas = zkClient.getInSyncReplicasForPartition(topicPartition)
inSyncReplicas.get.size == assignedReplicas.size
},
"Reassigned partition [%s,%d] is under replicated".format(topic, partitionToBeReassigned))
var leader: Option[Int] = None
waitUntilTrue(() => {
leader = zkClient.getLeaderForPartition(topicPartition)
leader.isDefined
},
"Reassigned partition [%s,%d] is unavailable".format(topic, partitionToBeReassigned))
waitUntilTrue(() => {
val leaderBroker = servers.filter(s => s.config.brokerId == leader.get).head
leaderBroker.replicaManager.underReplicatedPartitionCount == 0
},
"Reassigned partition [%s,%d] is under-replicated as reported by the leader %d".format(topic, partitionToBeReassigned, leader.get))
}
// Note: Call this method in the test itself, rather than the @After method.
// Because of the assert, if assertNoNonDaemonThreads fails, nothing after would be executed.
def assertNoNonDaemonThreads(threadNamePrefix: String): Unit = {
val threadCount = Thread.getAllStackTraces.keySet.asScala.count { t =>
!t.isDaemon && t.isAlive && t.getName.startsWith(threadNamePrefix)
}
assertEquals(0, threadCount)
}
def allThreadStackTraces(): String = {
Thread.getAllStackTraces.asScala.map { case (thread, stackTrace) =>
thread.getName + "\\n\\t" + stackTrace.toList.map(_.toString).mkString("\\n\\t")
}.mkString("\\n")
}
/**
* Create new LogManager instance with default configuration for testing
*/
def createLogManager(logDirs: Seq[File] = Seq.empty[File],
defaultConfig: LogConfig = LogConfig(),
cleanerConfig: CleanerConfig = CleanerConfig(enableCleaner = false),
time: MockTime = new MockTime()): LogManager = {
new LogManager(logDirs = logDirs.map(_.getAbsoluteFile),
initialOfflineDirs = Array.empty[File],
topicConfigs = Map(),
initialDefaultConfig = defaultConfig,
cleanerConfig = cleanerConfig,
recoveryThreadsPerDataDir = 4,
flushCheckMs = 1000L,
flushRecoveryOffsetCheckpointMs = 10000L,
flushStartOffsetCheckpointMs = 10000L,
retentionCheckMs = 1000L,
maxPidExpirationMs = 60 * 60 * 1000,
scheduler = time.scheduler,
time = time,
brokerState = BrokerState(),
brokerTopicStats = new BrokerTopicStats,
logDirFailureChannel = new LogDirFailureChannel(logDirs.size))
}
def produceMessages(servers: Seq[KafkaServer],
records: Seq[ProducerRecord[Array[Byte], Array[Byte]]],
acks: Int = -1): Unit = {
val producer = createProducer(TestUtils.getBrokerListStrFromServers(servers), acks = acks)
try {
val futures = records.map(producer.send)
futures.foreach(_.get)
} finally {
producer.close()
}
val topics = records.map(_.topic).distinct
debug(s"Sent ${records.size} messages for topics ${topics.mkString(",")}")
}
def generateAndProduceMessages(servers: Seq[KafkaServer],
topic: String,
numMessages: Int,
acks: Int = -1): Seq[String] = {
val values = (0 until numMessages).map(x => s"test-$x")
val intSerializer = new IntegerSerializer()
val records = values.zipWithIndex.map { case (v, i) =>
new ProducerRecord(topic, intSerializer.serialize(topic, i), v.getBytes)
}
produceMessages(servers, records, acks)
values
}
def produceMessage(servers: Seq[KafkaServer], topic: String, message: String,
deliveryTimeoutMs: Int = 30 * 1000, requestTimeoutMs: Int = 20 * 1000): Unit = {
val producer = createProducer(TestUtils.getBrokerListStrFromServers(servers),
deliveryTimeoutMs = deliveryTimeoutMs, requestTimeoutMs = requestTimeoutMs)
try {
producer.send(new ProducerRecord(topic, topic.getBytes, message.getBytes)).get
} finally {
producer.close()
}
}
def verifyTopicDeletion(zkClient: KafkaZkClient, topic: String, numPartitions: Int, servers: Seq[KafkaServer]): Unit = {
val topicPartitions = (0 until numPartitions).map(new TopicPartition(topic, _))
// wait until admin path for delete topic is deleted, signaling completion of topic deletion
waitUntilTrue(() => !zkClient.isTopicMarkedForDeletion(topic),
"Admin path /admin/delete_topics/%s path not deleted even after a replica is restarted".format(topic))
waitUntilTrue(() => !zkClient.topicExists(topic),
"Topic path /brokers/topics/%s not deleted after /admin/delete_topics/%s path is deleted".format(topic, topic))
// ensure that the topic-partition has been deleted from all brokers' replica managers
waitUntilTrue(() =>
servers.forall(server => topicPartitions.forall(tp => server.replicaManager.nonOfflinePartition(tp).isEmpty)),
"Replica manager's should have deleted all of this topic's partitions")
// ensure that logs from all replicas are deleted if delete topic is marked successful in ZooKeeper
assertTrue("Replica logs not deleted after delete topic is complete",
servers.forall(server => topicPartitions.forall(tp => server.getLogManager.getLog(tp).isEmpty)))
// ensure that topic is removed from all cleaner offsets
waitUntilTrue(() => servers.forall(server => topicPartitions.forall { tp =>
val checkpoints = server.getLogManager.liveLogDirs.map { logDir =>
new OffsetCheckpointFile(new File(logDir, "cleaner-offset-checkpoint")).read()
}
checkpoints.forall(checkpointsPerLogDir => !checkpointsPerLogDir.contains(tp))
}), "Cleaner offset for deleted partition should have been removed")
waitUntilTrue(() => servers.forall(server =>
server.config.logDirs.forall { logDir =>
topicPartitions.forall { tp =>
!new File(logDir, tp.topic + "-" + tp.partition).exists()
}
}
), "Failed to soft-delete the data to a delete directory")
waitUntilTrue(() => servers.forall(server =>
server.config.logDirs.forall { logDir =>
topicPartitions.forall { tp =>
!Arrays.asList(new File(logDir).list()).asScala.exists { partitionDirectoryName =>
partitionDirectoryName.startsWith(tp.topic + "-" + tp.partition) &&
partitionDirectoryName.endsWith(Log.DeleteDirSuffix)
}
}
}
), "Failed to hard-delete the delete directory")
}
/**
* Translate the given buffer into a string
*
* @param buffer The buffer to translate
* @param encoding The encoding to use in translating bytes to characters
*/
def readString(buffer: ByteBuffer, encoding: String = Charset.defaultCharset.toString): String = {
val bytes = new Array[Byte](buffer.remaining)
buffer.get(bytes)
new String(bytes, encoding)
}
def copyOf(props: Properties): Properties = {
val copy = new Properties()
copy ++= props
copy
}
def sslConfigs(mode: Mode, clientCert: Boolean, trustStoreFile: Option[File], certAlias: String,
certCn: String = SslCertificateCn,
tlsProtocol: String = TestSslUtils.DEFAULT_TLS_PROTOCOL_FOR_TESTS): Properties = {
val trustStore = trustStoreFile.getOrElse {
throw new Exception("SSL enabled but no trustStoreFile provided")
}
val sslConfigs = new TestSslUtils.SslConfigsBuilder(mode)
.useClientCert(clientCert)
.createNewTrustStore(trustStore)
.certAlias(certAlias)
.cn(certCn)
.tlsProtocol(tlsProtocol)
.build()
val sslProps = new Properties()
sslConfigs.forEach { (k, v) => sslProps.put(k, v) }
sslProps
}
// a X509TrustManager to trust self-signed certs for unit tests.
def trustAllCerts: X509TrustManager = {
val trustManager = new X509TrustManager() {
override def getAcceptedIssuers: Array[X509Certificate] = {
null
}
override def checkClientTrusted(certs: Array[X509Certificate], authType: String): Unit = {
}
override def checkServerTrusted(certs: Array[X509Certificate], authType: String): Unit = {
}
}
trustManager
}
def waitAndVerifyAcls(expected: Set[AccessControlEntry],
authorizer: JAuthorizer,
resource: ResourcePattern,
accessControlEntryFilter: AccessControlEntryFilter = AccessControlEntryFilter.ANY): Unit = {
val newLine = scala.util.Properties.lineSeparator
val filter = new AclBindingFilter(resource.toFilter, accessControlEntryFilter)
waitUntilTrue(() => authorizer.acls(filter).asScala.map(_.entry).toSet == expected,
s"expected acls:${expected.mkString(newLine + "\\t", newLine + "\\t", newLine)}" +
s"but got:${authorizer.acls(filter).asScala.map(_.entry).mkString(newLine + "\\t", newLine + "\\t", newLine)}")
}
@deprecated("Use org.apache.kafka.server.authorizer.Authorizer", "Since 2.5")
def waitAndVerifyAcls(expected: Set[Acl], authorizer: LegacyAuthorizer, resource: Resource): Unit = {
val newLine = scala.util.Properties.lineSeparator
waitUntilTrue(() => authorizer.getAcls(resource) == expected,
s"expected acls:${expected.mkString(newLine + "\\t", newLine + "\\t", newLine)}" +
s"but got:${authorizer.getAcls(resource).mkString(newLine + "\\t", newLine + "\\t", newLine)}")
}
/**
* Verifies that this ACL is the secure one.
*/
def isAclSecure(acl: ACL, sensitive: Boolean): Boolean = {
debug(s"ACL $acl")
acl.getPerms match {
case Perms.READ => !sensitive && acl.getId.getScheme == "world"
case Perms.ALL => acl.getId.getScheme == "sasl"
case _ => false
}
}
/**
* Verifies that the ACL corresponds to the unsecure one that
* provides ALL access to everyone (world).
*/
def isAclUnsecure(acl: ACL): Boolean = {
debug(s"ACL $acl")
acl.getPerms match {
case Perms.ALL => acl.getId.getScheme == "world"
case _ => false
}
}
private def secureZkPaths(zkClient: KafkaZkClient): Seq[String] = {
def subPaths(path: String): Seq[String] = {
if (zkClient.pathExists(path))
path +: zkClient.getChildren(path).map(c => path + "/" + c).flatMap(subPaths)
else
Seq.empty
}
val topLevelPaths = ZkData.SecureRootPaths ++ ZkData.SensitiveRootPaths
topLevelPaths.flatMap(subPaths)
}
/**
* Verifies that all secure paths in ZK are created with the expected ACL.
*/
def verifySecureZkAcls(zkClient: KafkaZkClient, usersWithAccess: Int): Unit = {
secureZkPaths(zkClient).foreach(path => {
if (zkClient.pathExists(path)) {
val sensitive = ZkData.sensitivePath(path)
// usersWithAccess have ALL access to path. For paths that are
// not sensitive, world has READ access.
val aclCount = if (sensitive) usersWithAccess else usersWithAccess + 1
val acls = zkClient.getAcl(path)
assertEquals(s"Invalid ACLs for $path $acls", aclCount, acls.size)
acls.foreach(acl => isAclSecure(acl, sensitive))
}
})
}
/**
* Verifies that secure paths in ZK have no access control. This is
* the case when zookeeper.set.acl=false and no ACLs have been configured.
*/
def verifyUnsecureZkAcls(zkClient: KafkaZkClient): Unit = {
secureZkPaths(zkClient).foreach(path => {
if (zkClient.pathExists(path)) {
val acls = zkClient.getAcl(path)
assertEquals(s"Invalid ACLs for $path $acls", 1, acls.size)
acls.foreach(isAclUnsecure)
}
})
}
/**
* To use this you pass in a sequence of functions that are your arrange/act/assert test on the SUT.
* They all run at the same time in the assertConcurrent method; the chances of triggering a multithreading code error,
* and thereby failing some assertion are greatly increased.
*/
def assertConcurrent(message: String, functions: Seq[() => Any], timeoutMs: Int): Unit = {
def failWithTimeout(): Unit = {
fail(s"$message. Timed out, the concurrent functions took more than $timeoutMs milliseconds")
}
val numThreads = functions.size
val threadPool = Executors.newFixedThreadPool(numThreads)
val exceptions = ArrayBuffer[Throwable]()
try {
val runnables = functions.map { function =>
new Callable[Unit] {
override def call(): Unit = function()
}
}.asJava
val futures = threadPool.invokeAll(runnables, timeoutMs, TimeUnit.MILLISECONDS).asScala
futures.foreach { future =>
if (future.isCancelled)
failWithTimeout()
else
try future.get()
catch { case e: Exception =>
exceptions += e
}
}
} catch {
case _: InterruptedException => failWithTimeout()
case e: Throwable => exceptions += e
} finally {
threadPool.shutdownNow()
}
assertTrue(s"$message failed with exception(s) $exceptions", exceptions.isEmpty)
}
def consumeTopicRecords[K, V](servers: Seq[KafkaServer],
topic: String,
numMessages: Int,
groupId: String = "group",
securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT,
trustStoreFile: Option[File] = None,
waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Seq[ConsumerRecord[Array[Byte], Array[Byte]]] = {
val consumer = createConsumer(TestUtils.getBrokerListStrFromServers(servers, securityProtocol),
groupId = groupId,
securityProtocol = securityProtocol,
trustStoreFile = trustStoreFile)
try {
consumer.subscribe(Collections.singleton(topic))
consumeRecords(consumer, numMessages, waitTime)
} finally consumer.close()
}
def pollUntilAtLeastNumRecords[K, V](consumer: Consumer[K, V],
numRecords: Int,
waitTimeMs: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Seq[ConsumerRecord[K, V]] = {
val records = new ArrayBuffer[ConsumerRecord[K, V]]()
def pollAction(polledRecords: ConsumerRecords[K, V]): Boolean = {
records ++= polledRecords.asScala
records.size >= numRecords
}
pollRecordsUntilTrue(consumer, pollAction,
waitTimeMs = waitTimeMs,
msg = s"Consumed ${records.size} records before timeout instead of the expected $numRecords records")
records
}
def consumeRecords[K, V](consumer: Consumer[K, V],
numRecords: Int,
waitTimeMs: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Seq[ConsumerRecord[K, V]] = {
val records = pollUntilAtLeastNumRecords(consumer, numRecords, waitTimeMs)
assertEquals("Consumed more records than expected", numRecords, records.size)
records
}
/**
* Will consume all the records for the given consumer for the specified duration. If you want to drain all the
* remaining messages in the partitions the consumer is subscribed to, the duration should be set high enough so
* that the consumer has enough time to poll everything. This would be based on the number of expected messages left
* in the topic, and should not be too large (ie. more than a second) in our tests.
*
* @return All the records consumed by the consumer within the specified duration.
*/
def consumeRecordsFor[K, V](consumer: KafkaConsumer[K, V], duration: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Seq[ConsumerRecord[K, V]] = {
val startTime = System.currentTimeMillis()
val records = new ArrayBuffer[ConsumerRecord[K, V]]()
waitUntilTrue(() => {
records ++= consumer.poll(Duration.ofMillis(50)).asScala
System.currentTimeMillis() - startTime > duration
}, s"The timeout $duration was greater than the maximum wait time.")
records
}
def createTransactionalProducer(transactionalId: String,
servers: Seq[KafkaServer],
batchSize: Int = 16384,
transactionTimeoutMs: Long = 60000,
maxBlockMs: Long = 60000,
deliveryTimeoutMs: Int = 120000,
requestTimeoutMs: Int = 30000): KafkaProducer[Array[Byte], Array[Byte]] = {
val props = new Properties()
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, TestUtils.getBrokerListStrFromServers(servers))
props.put(ProducerConfig.ACKS_CONFIG, "all")
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize.toString)
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionalId)
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
props.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, transactionTimeoutMs.toString)
props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxBlockMs.toString)
props.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, deliveryTimeoutMs.toString)
props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs.toString)
new KafkaProducer[Array[Byte], Array[Byte]](props, new ByteArraySerializer, new ByteArraySerializer)
}
// Seeds the given topic with records with keys and values in the range [0..numRecords)
def seedTopicWithNumberedRecords(topic: String, numRecords: Int, servers: Seq[KafkaServer]): Unit = {
val props = new Properties()
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, TestUtils.getBrokerListStrFromServers(servers))
val producer = new KafkaProducer[Array[Byte], Array[Byte]](props, new ByteArraySerializer, new ByteArraySerializer)
try {
for (i <- 0 until numRecords) {
producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, asBytes(i.toString), asBytes(i.toString)))
}
producer.flush()
} finally {
producer.close()
}
}
private def asString(bytes: Array[Byte]) = new String(bytes, StandardCharsets.UTF_8)
private def asBytes(string: String) = string.getBytes(StandardCharsets.UTF_8)
// Verifies that the record was intended to be committed by checking the headers for an expected transaction status
// If true, this will return the value as a string. It is expected that the record in question should have been created
// by the `producerRecordWithExpectedTransactionStatus` method.
def assertCommittedAndGetValue(record: ConsumerRecord[Array[Byte], Array[Byte]]) : String = {
record.headers.headers(transactionStatusKey).asScala.headOption match {
case Some(header) =>
assertEquals(s"Got ${asString(header.value)} but expected the value to indicate " +
s"committed status.", asString(committedValue), asString(header.value))
case None =>
fail("expected the record header to include an expected transaction status, but received nothing.")
}
recordValueAsString(record)
}
def recordValueAsString(record: ConsumerRecord[Array[Byte], Array[Byte]]) : String = {
asString(record.value)
}
def producerRecordWithExpectedTransactionStatus(topic: String, partition: Integer, key: Array[Byte], value: Array[Byte], willBeCommitted: Boolean): ProducerRecord[Array[Byte], Array[Byte]] = {
val header = new Header {override def key() = transactionStatusKey
override def value() = if (willBeCommitted)
committedValue
else
abortedValue
}
new ProducerRecord[Array[Byte], Array[Byte]](topic, partition, key, value, Collections.singleton(header))
}
def producerRecordWithExpectedTransactionStatus(topic: String, partition: Integer, key: String, value: String, willBeCommitted: Boolean): ProducerRecord[Array[Byte], Array[Byte]] = {
producerRecordWithExpectedTransactionStatus(topic, partition, asBytes(key), asBytes(value), willBeCommitted)
}
// Collect the current positions for all partition in the consumers current assignment.
def consumerPositions(consumer: KafkaConsumer[Array[Byte], Array[Byte]]) : Map[TopicPartition, OffsetAndMetadata] = {
val offsetsToCommit = new mutable.HashMap[TopicPartition, OffsetAndMetadata]()
consumer.assignment.forEach { topicPartition =>
offsetsToCommit.put(topicPartition, new OffsetAndMetadata(consumer.position(topicPartition)))
}
offsetsToCommit.toMap
}
def resetToCommittedPositions(consumer: KafkaConsumer[Array[Byte], Array[Byte]]): Unit = {
val committed = consumer.committed(consumer.assignment).asScala.filter(_._2 != null).map { case (k, v) => k -> v.offset }
consumer.assignment.forEach { topicPartition =>
if (committed.contains(topicPartition))
consumer.seek(topicPartition, committed(topicPartition))
else
consumer.seekToBeginning(Collections.singletonList(topicPartition))
}
}
def incrementalAlterConfigs(servers: Seq[KafkaServer], adminClient: Admin, props: Properties,
perBrokerConfig: Boolean, opType: OpType = OpType.SET): AlterConfigsResult = {
val configEntries = props.asScala.map { case (k, v) => new AlterConfigOp(new ConfigEntry(k, v), opType) }.toList.asJavaCollection
val configs = if (perBrokerConfig) {
servers.map { server =>
val resource = new ConfigResource(ConfigResource.Type.BROKER, server.config.brokerId.toString)
(resource, configEntries)
}.toMap.asJava
} else {
Map(new ConfigResource(ConfigResource.Type.BROKER, "") -> configEntries).asJava
}
adminClient.incrementalAlterConfigs(configs)
}
def assertLeader(client: Admin, topicPartition: TopicPartition, expectedLeader: Int): Unit = {
waitForLeaderToBecome(client, topicPartition, Some(expectedLeader))
}
def assertNoLeader(client: Admin, topicPartition: TopicPartition): Unit = {
waitForLeaderToBecome(client, topicPartition, None)
}
def waitForLeaderToBecome(client: Admin, topicPartition: TopicPartition, leader: Option[Int]): Unit = {
val topic = topicPartition.topic
val partition = topicPartition.partition
TestUtils.waitUntilTrue(() => {
try {
val topicResult = client.describeTopics(Arrays.asList(topic)).all.get.get(topic)
val partitionResult = topicResult.partitions.get(partition)
Option(partitionResult.leader).map(_.id) == leader
} catch {
case e: ExecutionException if e.getCause.isInstanceOf[UnknownTopicOrPartitionException] => false
}
}, "Timed out waiting for leader metadata")
}
def waitForBrokersOutOfIsr(client: Admin, partition: Set[TopicPartition], brokerIds: Set[Int]): Unit = {
TestUtils.waitUntilTrue(
() => {
val description = client.describeTopics(partition.map(_.topic).asJava).all.get.asScala
val isr = description
.values
.flatMap(_.partitions.asScala.flatMap(_.isr.asScala))
.map(_.id)
.toSet
brokerIds.intersect(isr).isEmpty
},
s"Expected brokers $brokerIds to no longer in the ISR for $partition"
)
}
def waitForBrokersInIsr(client: Admin, partition: TopicPartition, brokerIds: Set[Int]): Unit = {
TestUtils.waitUntilTrue(
() => {
val description = client.describeTopics(Set(partition.topic).asJava).all.get.asScala
val isr = description
.values
.flatMap(_.partitions.asScala.flatMap(_.isr.asScala))
.map(_.id)
.toSet
brokerIds.subsetOf(isr)
},
s"Expected brokers $brokerIds to be in the ISR for $partition"
)
}
def waitForReplicasAssigned(client: Admin, partition: TopicPartition, brokerIds: Seq[Int]): Unit = {
TestUtils.waitUntilTrue(
() => {
val description = client.describeTopics(Set(partition.topic).asJava).all.get.asScala
val replicas = description
.values
.flatMap(_.partitions.asScala.flatMap(_.replicas.asScala))
.map(_.id)
.toSeq
brokerIds == replicas
},
s"Expected brokers $brokerIds to be the replicas for $partition"
)
}
/**
* Capture the console output during the execution of the provided function.
*/
def grabConsoleOutput(f: => Unit) : String = {
val out = new ByteArrayOutputStream
try scala.Console.withOut(out)(f)
finally scala.Console.out.flush()
out.toString
}
/**
* Capture the console error during the execution of the provided function.
*/
def grabConsoleError(f: => Unit) : String = {
val err = new ByteArrayOutputStream
try scala.Console.withErr(err)(f)
finally scala.Console.err.flush()
err.toString
}
/**
* Capture both the console output and console error during the execution of the provided function.
*/
def grabConsoleOutputAndError(f: => Unit) : (String, String) = {
val out = new ByteArrayOutputStream
val err = new ByteArrayOutputStream
try scala.Console.withOut(out)(scala.Console.withErr(err)(f))
finally {
scala.Console.out.flush()
scala.Console.err.flush()
}
(out.toString, err.toString)
}
def assertFutureExceptionTypeEquals(future: KafkaFuture[_], clazz: Class[_ <: Throwable],
expectedErrorMessage: Option[String] = None): Unit = {
try {
future.get()
fail("Expected CompletableFuture.get to return an exception")
} catch {
case e: ExecutionException =>
val cause = e.getCause
assertTrue("Expected an exception of type " + clazz.getName + "; got type " +
cause.getClass.getName, clazz.isInstance(cause))
expectedErrorMessage.foreach(message => assertTrue(s"Received error message : ${cause.getMessage}" +
s" does not contain expected error message : $message", cause.getMessage.contains(message)))
}
}
def totalMetricValue(server: KafkaServer, metricName: String): Long = {
val allMetrics = server.metrics.metrics
val total = allMetrics.values().asScala.filter(_.metricName().name() == metricName)
.foldLeft(0.0)((total, metric) => total + metric.metricValue.asInstanceOf[Double])
total.toLong
}
def meterCount(metricName: String): Long = {
KafkaYammerMetrics.defaultRegistry.allMetrics.asScala
.filter { case (k, _) => k.getMBeanName.endsWith(metricName) }
.values
.headOption
.getOrElse(fail(s"Unable to find metric $metricName"))
.asInstanceOf[Meter]
.count
}
def clearYammerMetrics(): Unit = {
for (metricName <- KafkaYammerMetrics.defaultRegistry.allMetrics.keySet.asScala)
KafkaYammerMetrics.defaultRegistry.removeMetric(metricName)
}
def stringifyTopicPartitions(partitions: Set[TopicPartition]): String = {
Json.legacyEncodeAsString(
Map(
"partitions" -> partitions.map(tp => Map("topic" -> tp.topic, "partition" -> tp.partition))
)
)
}
def resource[R <: AutoCloseable, A](resource: R)(func: R => A): A = {
try {
func(resource)
} finally {
resource.close()
}
}
/**
* Set broker replication quotas and enable throttling for a set of partitions. This
* will override any previous replication quotas, but will leave the throttling status
* of other partitions unaffected.
*/
def setReplicationThrottleForPartitions(admin: Admin,
brokerIds: Seq[Int],
partitions: Set[TopicPartition],
throttleBytes: Int): Unit = {
throttleAllBrokersReplication(admin, brokerIds, throttleBytes)
assignThrottledPartitionReplicas(admin, partitions.map(_ -> brokerIds).toMap)
}
/**
* Remove a set of throttled partitions and reset the overall replication quota.
*/
def removeReplicationThrottleForPartitions(admin: Admin,
brokerIds: Seq[Int],
partitions: Set[TopicPartition]): Unit = {
removePartitionReplicaThrottles(admin, partitions)
resetBrokersThrottle(admin, brokerIds)
}
/**
* Throttles all replication across the cluster.
* @param adminClient is the adminClient to use for making connection with the cluster
* @param brokerIds all broker ids in the cluster
* @param throttleBytes is the target throttle
*/
def throttleAllBrokersReplication(adminClient: Admin, brokerIds: Seq[Int], throttleBytes: Int): Unit = {
val throttleConfigs = Seq(
new AlterConfigOp(new ConfigEntry(DynamicConfig.Broker.LeaderReplicationThrottledRateProp, throttleBytes.toString), AlterConfigOp.OpType.SET),
new AlterConfigOp(new ConfigEntry(DynamicConfig.Broker.FollowerReplicationThrottledRateProp, throttleBytes.toString), AlterConfigOp.OpType.SET)
).asJavaCollection
adminClient.incrementalAlterConfigs(
brokerIds.map { brokerId =>
new ConfigResource(ConfigResource.Type.BROKER, brokerId.toString) -> throttleConfigs
}.toMap.asJava
).all().get()
}
def resetBrokersThrottle(adminClient: Admin, brokerIds: Seq[Int]): Unit =
throttleAllBrokersReplication(adminClient, brokerIds, Int.MaxValue)
def assignThrottledPartitionReplicas(adminClient: Admin, allReplicasByPartition: Map[TopicPartition, Seq[Int]]): Unit = {
val throttles = allReplicasByPartition.groupBy(_._1.topic()).map {
case (topic, replicasByPartition) =>
new ConfigResource(ConfigResource.Type.TOPIC, topic) -> Seq(
new AlterConfigOp(new ConfigEntry(LogConfig.LeaderReplicationThrottledReplicasProp, formatReplicaThrottles(replicasByPartition)), AlterConfigOp.OpType.SET),
new AlterConfigOp(new ConfigEntry(LogConfig.FollowerReplicationThrottledReplicasProp, formatReplicaThrottles(replicasByPartition)), AlterConfigOp.OpType.SET)
).asJavaCollection
}
adminClient.incrementalAlterConfigs(throttles.asJava).all().get()
}
def removePartitionReplicaThrottles(adminClient: Admin, partitions: Set[TopicPartition]): Unit = {
val throttles = partitions.map {
tp =>
new ConfigResource(ConfigResource.Type.TOPIC, tp.topic()) -> Seq(
new AlterConfigOp(new ConfigEntry(LogConfig.LeaderReplicationThrottledReplicasProp, ""), AlterConfigOp.OpType.DELETE),
new AlterConfigOp(new ConfigEntry(LogConfig.FollowerReplicationThrottledReplicasProp, ""), AlterConfigOp.OpType.DELETE)
).asJavaCollection
}.toMap
adminClient.incrementalAlterConfigs(throttles.asJava).all().get()
}
def formatReplicaThrottles(moves: Map[TopicPartition, Seq[Int]]): String =
moves.flatMap { case (tp, assignment) =>
assignment.map(replicaId => s"${tp.partition}:$replicaId")
}.mkString(",")
def waitForAllReassignmentsToComplete(adminClient: Admin, pause: Long = 100L): Unit = {
waitUntilTrue(() => adminClient.listPartitionReassignments().reassignments().get().isEmpty,
s"There still are ongoing reassignments", pause = pause)
}
def addAndVerifyAcls(server: KafkaServer, acls: Set[AccessControlEntry], resource: ResourcePattern): Unit = {
val authorizer = server.dataPlaneRequestProcessor.authorizer.get
val aclBindings = acls.map { acl => new AclBinding(resource, acl) }
authorizer.createAcls(null, aclBindings.toList.asJava).asScala
.map(_.toCompletableFuture.get)
.foreach { result =>
result.exception.ifPresent { e => throw e }
}
val aclFilter = new AclBindingFilter(resource.toFilter, AccessControlEntryFilter.ANY)
waitAndVerifyAcls(
authorizer.acls(aclFilter).asScala.map(_.entry).toSet ++ acls,
authorizer, resource)
}
}
| sslavic/kafka | core/src/test/scala/unit/kafka/utils/TestUtils.scala | Scala | apache-2.0 | 75,975 |
package com.sksamuel.avro4s
import java.io.InputStream
import org.apache.avro.{AvroRuntimeException, Schema}
import org.apache.avro.file.DataFileStream
import org.apache.avro.generic.GenericData
import org.apache.avro.io.DatumReader
import scala.util.{Failure, Try}
/**
* An implementation of [[AvroInputStream]] that reads values of type T
* written as Avro.
*
* Avro data files contain the schema as part of the message payload. Therefore, no schema
* is necessarily required to read the data back and the decoder will use the schema
* present in the payload. However, for efficiency, if the schema is provided, then a
* decoder can be pre-built and used on each contained object.
*
* A [[Decoder]] must be provided (usually implicitly) that will marshall
* avro records into instances of type T.
*
* @param in the input stream to read from
* @param writerSchema the schema that was used to write the data. Optional, but if provided will
* allow the decoder to be re-used for every contained object.
* @param decoder a mapping from the base avro type to an instance of T
*/
class AvroDataInputStream[T](in: InputStream,
writerSchema: Option[Schema])
(using decoder: Decoder[T]) extends AvroInputStream[T] {
// if no writer schema is specified, then we create a reader that uses what's present in the files
private val datumReader: DatumReader[Any] = writerSchema match {
case Some(schema) => GenericData.get.createDatumReader(schema).asInstanceOf[DatumReader[Any]]
case _ => GenericData.get.createDatumReader(null).asInstanceOf[DatumReader[Any]]
}
private val dataFileReader = new DataFileStream[Any](in, datumReader)
private val decodeT = writerSchema.map(schema => decoder.decode(schema))
private def decode(record: Any, schema: Schema) = decodeT.getOrElse(decoder.decode(schema)).apply(record)
override def iterator: Iterator[T] = new Iterator[T] {
override def hasNext: Boolean = dataFileReader.hasNext
override def next(): T = {
val record = dataFileReader.next
decode(record, dataFileReader.getSchema)
}
}
override def tryIterator: Iterator[Try[T]] = new Iterator[Try[T]] {
override def hasNext: Boolean = dataFileReader.hasNext
override def next(): Try[T] =
Try(decode(dataFileReader.next, dataFileReader.getSchema)).recoverWith {
case t: AvroRuntimeException =>
dataFileReader.nextBlock() // in case of exception, skip to next block
Failure(t)
}
}
override def close(): Unit = in.close()
}
| sksamuel/avro4s | avro4s-core/src/main/scala/com/sksamuel/avro4s/AvroDataInputStream.scala | Scala | apache-2.0 | 2,636 |
package org.jetbrains.plugins.scala
package lang.psi.controlFlow.impl
import com.intellij.psi.PsiNamedElement
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember
import org.jetbrains.plugins.scala.lang.psi.controlFlow.ScControlFlowPolicy
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.SyntheticNamedElement
/**
* Nikolay.Tropin
* 2014-04-14
*/
object ExtractMethodControlFlowPolicy extends ScControlFlowPolicy {
override def isElementAccepted(named: PsiNamedElement): Boolean = {
if (named.isInstanceOf[SyntheticNamedElement]) return false
ScalaPsiUtil.nameContext(named) match {
case cp: ScClassParameter => false
case member: ScMember => member.isLocal
case _ => true
}
}
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/controlFlow/impl/ExtractMethodControlFlowPolicy.scala | Scala | apache-2.0 | 899 |
package com.fuscus.seien.domain.entity
import com.fuscus.seien.infra.core._
import com.fuscus.seien.infra.vo.URI
import org.joda.time.DateTime
/**
* Created by watawuwu on 15/07/02.
*/
// @todo is better to use a type alias
case class IssueID(value: UUID = UUID.gen) extends UniversallyUniqueIdentifier
case class Issue(
id: IssueID = IssueID(),
title: String,
uri: URI,
healthCheckUri: Option[URI] = None,
description: Option[String] = None,
createdAt: DateTime = DateTime.now(),
updatedAt: DateTime = DateTime.now(),
lockVersion: Int = 0,
isDeleted: Boolean = false) extends Entity[IssueID] {
}
// @todo workaround
// https://issues.scala-lang.org/browse/SI-3664
object Issue extends ((IssueID, String, URI, Option[URI], Option[String], DateTime, DateTime, Int, Boolean) => Issue) {
def apply(
title: String,
uri: URI,
healthCheckURI: URI,
description: String): Issue = {
Issue(title = title, uri = uri, healthCheckUri = Some(healthCheckURI), description = Some(description))
}
}
| watawuwu/seien-backend | modules/domain/app/com/fuscus/seien/domain/entity/Issue.scala | Scala | mit | 1,050 |
package net.walend.disentangle.examples
import net.walend.disentangle.graph.semiring.Brandes.BrandesSteps
import net.walend.disentangle.graph.semiring.LabelUndigraphSemiringAlgorithms
import net.walend.disentangle.graph.{AdjacencyLabelUndigraph, NodePair}
/**
* Use Brandes' algorithms to find least paths and betweenness for a directed graph.
*
* @author dwalend
* @since v0.2.1
*/
object BrandesImplicitsExample {
/**
* Edges are just a Seq of Tuple3[Node,Node,Edge]
*/
lazy val edges: Seq[(NodePair[String], String)] = Seq(
(NodePair("A","B"),"ab"),
(NodePair("B","C"),"bc"),
(NodePair("C","D"),"cd"),
(NodePair("D","E"),"de"),
(NodePair("E","F"),"ef"),
(NodePair("E","B"),"eb"),
(NodePair("E","H"),"eh"),
(NodePair("H","C"),"hc")
)
/**
* The labels from Brandes use node indexes from a directed graph, so it's best to control those via the optional nodeOrder parameter
*/
lazy val nodeOrder = Array("A","B","C","D","E","F","H")
val graph = AdjacencyLabelUndigraph(edges,nodeOrder)
lazy val brandesResults = graph.allLeastPathsAndBetweenness()
lazy val nextStepsAndCosts: IndexedSeq[(String, String, Option[BrandesSteps[String, Int]])] = brandesResults._1
lazy val betweennessValues: Map[String, Double] = brandesResults._2
}
| dwalend/Disentangle | examples/src/main/scala/net/walend/disentangle/examples/BradnesImplicitsExample.scala | Scala | mit | 1,314 |
package spark.executor
import java.io.{File, FileOutputStream}
import java.net.{URI, URL, URLClassLoader}
import java.util.concurrent._
import org.apache.hadoop.fs.FileUtil
import scala.collection.mutable.{ArrayBuffer, Map, HashMap}
import spark.broadcast._
import spark.scheduler._
import spark._
import java.nio.ByteBuffer
/**
* The Mesos executor for Spark.
*/
private[spark] class Executor(executorId: String, slaveHostname: String, properties: Seq[(String, String)]) extends Logging {
// Application dependencies (added through SparkContext) that we've fetched so far on this node.
// Each map holds the master's timestamp for the version of that file or JAR we got.
private val currentFiles: HashMap[String, Long] = new HashMap[String, Long]()
private val currentJars: HashMap[String, Long] = new HashMap[String, Long]()
private val EMPTY_BYTE_BUFFER = ByteBuffer.wrap(new Array[Byte](0))
initLogging()
// Make sure the local hostname we report matches the cluster scheduler's name for this host
Utils.setCustomHostname(slaveHostname)
// Set spark.* system properties from executor arg
for ((key, value) <- properties) {
System.setProperty(key, value)
}
// Create our ClassLoader and set it on this thread
private val urlClassLoader = createClassLoader()
Thread.currentThread.setContextClassLoader(urlClassLoader)
// Make any thread terminations due to uncaught exceptions kill the entire
// executor process to avoid surprising stalls.
Thread.setDefaultUncaughtExceptionHandler(
new Thread.UncaughtExceptionHandler {
override def uncaughtException(thread: Thread, exception: Throwable) {
try {
logError("Uncaught exception in thread " + thread, exception)
// We may have been called from a shutdown hook. If so, we must not call System.exit().
// (If we do, we will deadlock.)
if (!Utils.inShutdown()) {
if (exception.isInstanceOf[OutOfMemoryError]) {
System.exit(ExecutorExitCode.OOM)
} else {
System.exit(ExecutorExitCode.UNCAUGHT_EXCEPTION)
}
}
} catch {
case oom: OutOfMemoryError => Runtime.getRuntime.halt(ExecutorExitCode.OOM)
case t: Throwable => Runtime.getRuntime.halt(ExecutorExitCode.UNCAUGHT_EXCEPTION_TWICE)
}
}
}
)
// Initialize Spark environment (using system properties read above)
val env = SparkEnv.createFromSystemProperties(executorId, slaveHostname, 0, false, false)
SparkEnv.set(env)
// Start worker thread pool
val threadPool = new ThreadPoolExecutor(
1, 128, 600, TimeUnit.SECONDS, new SynchronousQueue[Runnable])
def launchTask(context: ExecutorBackend, taskId: Long, serializedTask: ByteBuffer) {
threadPool.execute(new TaskRunner(context, taskId, serializedTask))
}
class TaskRunner(context: ExecutorBackend, taskId: Long, serializedTask: ByteBuffer)
extends Runnable {
override def run() {
val startTime = System.currentTimeMillis()
SparkEnv.set(env)
Thread.currentThread.setContextClassLoader(urlClassLoader)
val ser = SparkEnv.get.closureSerializer.newInstance()
logInfo("Running task ID " + taskId)
context.statusUpdate(taskId, TaskState.RUNNING, EMPTY_BYTE_BUFFER)
try {
SparkEnv.set(env)
Accumulators.clear()
val (taskFiles, taskJars, taskBytes) = Task.deserializeWithDependencies(serializedTask)
updateDependencies(taskFiles, taskJars)
val task = ser.deserialize[Task[Any]](taskBytes, Thread.currentThread.getContextClassLoader)
logInfo("Its generation is " + task.generation)
env.mapOutputTracker.updateGeneration(task.generation)
val taskStart = System.currentTimeMillis()
val value = task.run(taskId.toInt)
val taskFinish = System.currentTimeMillis()
task.metrics.foreach{ m =>
m.executorDeserializeTime = (taskStart - startTime).toInt
m.executorRunTime = (taskFinish - taskStart).toInt
}
//TODO I'd also like to track the time it takes to serialize the task results, but that is huge headache, b/c
// we need to serialize the task metrics first. If TaskMetrics had a custom serialized format, we could
// just change the relevants bytes in the byte buffer
val accumUpdates = Accumulators.values
val result = new TaskResult(value, accumUpdates, task.metrics.getOrElse(null))
val serializedResult = ser.serialize(result)
logInfo("Serialized size of result for " + taskId + " is " + serializedResult.limit)
context.statusUpdate(taskId, TaskState.FINISHED, serializedResult)
logInfo("Finished task ID " + taskId)
} catch {
case ffe: FetchFailedException => {
val reason = ffe.toTaskEndReason
context.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
}
case t: Throwable => {
val reason = ExceptionFailure(t)
context.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
// TODO: Should we exit the whole executor here? On the one hand, the failed task may
// have left some weird state around depending on when the exception was thrown, but on
// the other hand, maybe we could detect that when future tasks fail and exit then.
logError("Exception in task ID " + taskId, t)
//System.exit(1)
}
}
}
}
/**
* Create a ClassLoader for use in tasks, adding any JARs specified by the user or any classes
* created by the interpreter to the search path
*/
private def createClassLoader(): ExecutorURLClassLoader = {
var loader = this.getClass.getClassLoader
// For each of the jars in the jarSet, add them to the class loader.
// We assume each of the files has already been fetched.
val urls = currentJars.keySet.map { uri =>
new File(uri.split("/").last).toURI.toURL
}.toArray
loader = new URLClassLoader(urls, loader)
// If the REPL is in use, add another ClassLoader that will read
// new classes defined by the REPL as the user types code
val classUri = System.getProperty("spark.repl.class.uri")
if (classUri != null) {
logInfo("Using REPL class URI: " + classUri)
loader = {
try {
val klass = Class.forName("spark.repl.ExecutorClassLoader")
.asInstanceOf[Class[_ <: ClassLoader]]
val constructor = klass.getConstructor(classOf[String], classOf[ClassLoader])
constructor.newInstance(classUri, loader)
} catch {
case _: ClassNotFoundException => loader
}
}
}
return new ExecutorURLClassLoader(Array(), loader)
}
/**
* Download any missing dependencies if we receive a new set of files and JARs from the
* SparkContext. Also adds any new JARs we fetched to the class loader.
*/
private def updateDependencies(newFiles: HashMap[String, Long], newJars: HashMap[String, Long]) {
synchronized {
// Fetch missing dependencies
for ((name, timestamp) <- newFiles if currentFiles.getOrElse(name, -1L) < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory))
currentFiles(name) = timestamp
}
for ((name, timestamp) <- newJars if currentJars.getOrElse(name, -1L) < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory))
currentJars(name) = timestamp
// Add it to our class loader
val localName = name.split("/").last
val url = new File(SparkFiles.getRootDirectory, localName).toURI.toURL
if (!urlClassLoader.getURLs.contains(url)) {
logInfo("Adding " + url + " to class loader")
urlClassLoader.addURL(url)
}
}
}
}
}
| koeninger/spark | core/src/main/scala/spark/executor/Executor.scala | Scala | bsd-3-clause | 8,008 |
package io.skysail.app.instagram
import io.skysail.server.restlet.resources.EntityServerResource
import io.skysail.domain.GenericIdentifiable
class InstagramMeResource extends EntityServerResource[GenericIdentifiable] {
def getEntity(): GenericIdentifiable = {
val app = getApplication().asInstanceOf[InstagramApplication];
val me = app.getInstagramApi().getMe(getPrincipal());
new GenericIdentifiable(me);
}
}
| evandor/skysail | skysail.app.instagram/src/io/skysail/app/instagram/resources/InstagramMeResource.scala | Scala | apache-2.0 | 429 |
package vonsim.webapp.tutorials
import vonsim.webapp.UIConfig
class VariablesTutorial extends Tutorial {
val title="Variables en assembly"
val initialCode="""
org 2000h
; código aquí
hlt
end
"""
val id="variables"
val steps=List(
TutorialStep("Variables en VonSim"
,"""<p><strong>Objetivos:</strong> Comprender el concepto de variable y su uso en Assembly.</p>
<p><strong>Conocimientos previos:</strong> Uso del simulador VonSim. Estructura básica de un programa
en assembly. Conocimientos básicos de organización y arquitectura de computadoras.</p>
""",UIConfig.disableAll,None
)
,TutorialStep("Introducción"
,"""<p>Sabiendo ya como escribir y ejecutar programas con VonSim, y la estructura básica de un
programa en assembly, vamos a comenzar a ver funcionan las variables en Assembly.</p>
<p>Recordemos que como programadores tenemos dos lugares para guardar
información: la memoria principal o RAM y los registros.</p>
</p>En este tutorial veremos como definir variables en la memoria e inicializarlas.<p>
""",UIConfig.disableAll,None
)
,TutorialStep("Ubicación de las variables"
,"""
<p> Al igual que las instrucciones, las variables también deben ser ubicadas con una sentencia org, como vimos en el tutorial anterior.</p>
<p>Por eso agregamos otra sentencia org al comienzo del programa, en este caso en la dirección 5h de memoria.</p>
<p> Esto quiere decir que las variables que ahora declaremos debajo de la línea <code>org 5h</code>
se ubicarán a partir de la dirección de memoria 5h, es decir 5 en hexadecimal (siempre escribiremos las direcciones en hexadecimal) </p>
""",UIConfig.enableAll,Some("org 5h\\n;las variables van aqui\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Declaración de variables"
,"""
<p> Las variables se declaran en una línea aparte, con
la sintaxis</p>
<p><code>nombre tipo valor_inicial</code></p>.
<p> Hay dos tipos de variables, las db, que ocupan un byte,
y las dw que ocupan dos bytes.</p>
<p>Entonces, para definir una variable llamada <code>peso</code>
que ocupe un solo byte (db) y tenga como valor inicial 25h (25 hexadecimal), debemos agregar la línea
<code>peso db 25h</code> debajo de la línea <code>org 5h</code> </p>
<p class="exercise"> Agrega la línea <code>peso db 25h</code> para definir la
variable peso con valor 25h, debajo de la sentencia <code>org 5h</code>.
Ejecuta el programa para cargar las variables en la memoria.</p>
<p class="exercise"> Busca la celda de memoria con dirección 5h. Debería tener el valor 25h. </p>
<p> Más tarde veremos como definir variables con valores decimales o binarios, pero por ahora
lo haremos con valores hexadecimales</p>
""",UIConfig.enableAll,Some("org 5h\\n;las variables van aqui\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Valos hexadecimales"
,"""
<p> Como vimos, los valores hexadecimales requieren una <em>h</em> al final para
especificar este sistema.</p>
<p> Recuerda que tanto 5h, 25h, 1Ah, Ah o BCDEh son valores hexadecimales, ya que las letras
A, B, C, D, E y F codifican los valores decimales 10, 11, 12, 13, 14 y 15 en el sistema
hexadecimal.</p>
<p>En el caso de valores como Ah o BCDEh, que <em>comienzan</em> con una letra, el simulador
requiere que se anteponga un 0 al valor. Entonces en lugar de escribir Ah o BCDEh, escribiremos
0Ah o 0BCDEh.</p>
<p> De este modo, el simulador puede distinguir el valor Ah de una variable llamada Ah.</p>
<p class="exercise"> El programa del editor no compila. Agrega el 0 al valor A3h de la variable
peso para que compile y ejecútalo. ¿Qué valor aparece en la dirección 5h?</p>
""",UIConfig.enableAll,Some("org 5h\\npeso db A3h\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Orden de almacenamiento de las variables (parte 1)"
,"""
<p> La variable <code>peso</code> que declaramos se ubicó en la celda con dirección 5h.</p>
<p> ¿Qué sucede si declaramos otra variable, también de un byte, a continuación?</p>
<p class="exercise"> Agrega la línea <code>temperatura db 14h</code>
para definir la variable temperatura con valor 14h debajo de la variable peso.
Ejecuta el programa.</p>
<p class="exercise"> Busca la celda de memoria con dirección 5. Debería seguir teniendo el valor 25h.
Mirá la celda siguiente, con dirección 6. ¿Qué valor tiene? </p>
""",UIConfig.enableAll,Some("org 5h\\npeso db 25h\\n\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Orden de almacenamiento de las variables (parte 2)"
,"""
<p> La variable <code>temperatura</code> que declaramos se ubicó
en la celda con dirección 6. Esto es porque las variables se ubican una tras de otra
a partir de la dirección indicada en la sentencia org </p>
<p class="exercise"> Intercambia las declaraciones de las variables <code>peso</code> y
<code>temperatura</code>. Ejecuta el programa y verifica que ahora los valores se invierten
en la memoria, es decir, primero se ubica la variable temperatura
y luego la variable peso.</p>
<p class="exercise"> Agrega otras dos variables de un byte llamadas <code>edad</code> y
<code>altura</code>, con valores iniciales 3Ah y 4Ch, debajo de la variable <code>peso</code>.
Ejecuta el programa.</p>
<p class="exercise"> Observa el valor de las celdas de memoria con dirección 7 y 8. ¿Qué valores tienen?</p>
""",UIConfig.enableAll,Some("org 5h\\npeso db 25h\\ntemperatura db 14h\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Variables de dos bytes (parte 1)"
,"""
<p> Las variables que declaramos ocupaban todas un byte, ya que
usaban el tipo db.</p>
<p> Podemos definir variables que ocupen 2_ bytes con el tipo dw.
Reservando 2_ bytes para la variable, podemos guardar números más grandes.</p>
<p class="exercise"> Agrega la línea <code>peso dw 5A12h</code> para definir
la variable peso con valor 5A12h.
Ejecuta el programa y observa el valor de las celdas 5h y 6h.</p>
<p> Habrás visto que las variables de dos bytes ocupan dos celdas de memoria, ya que cada celda de la
memoria guarda un byte. En este caso vemos que la parte menos significativa del valor (12h) se ubicó
en la celda con la dirección más chica (5h). Por otro lado, la parte más significativa (5Ah) se ubicó
en la celda con la dirección más alta (6h). Este esquema para guardar las variables se llama, por
razones históricas, <strong>little-endian</strong>. </p>
""",UIConfig.enableAll,Some("org 5h\\n;las variables van aqui\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Variables de dos bytes (parte 2)"
,"""
<p> Podemos definir varias variables de tipo dw también, y también se ubicarán secuencialmente.</p>
<p class="exercise"> Define las variables <code>vida</code>, <code>mana</code> y <code>energía</code>, en ese orden, de tipo dw,
con valores iniciales 32h, 15Dh y 1A4Bh, respectivamente.</p>
<p class="exercise"> Ejecuta el programa y observa el valor de las celdas 5h a Ah.
¿Qué sucede cuando ponemos un valor chico, como 32h, en una variable de 2_ bytes?
¿Cómo se rellena la parte más significativa? </p>
""",UIConfig.enableAll,Some("org 5h\\n;aca van las variables\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Variables de dos bytes (parte 3)"
,"""
<p class="exercise"> El programa del editor declara las variables del paso anterior. Ejecútalo
nuevamente y observa en qué dirección comienza cada variable.</p>
<p> En este caso, la variable <code>vida</code> empieza en la dirección 5h;
la variable <code>mana</code> en la dirección 7h y la variable <code>energia</code>
en la dirección 9h.</p>
<p> Por ende la variable <code>vida</code> ocupa las celdas 5h y 6h;
la variable <code>mana</code> ocupa las celdas 7h y 8h y la variable <code>energia</code>
las celdas 9h y 10h.</p>
<p class="exercise"> Si definieramos una nueva variable debajo de <code>energia</code>,
¿en qué dirección de memoria comenzaría?</p>
""",UIConfig.enableAll,Some("org 5h\\nvida dw 32h\\nmana dw 15Dh\\nenergia dw 1A4Bh\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Ubicación de las variables con db y dw "
,"""
<p> Hemos definido varias variables de uno y dos bytes por separado. ¿Qué sucede si las combinamos?</p>
<p class="exercise"> Ejecuta el programa del editor, en donde se definen distintos tipos de variables.</p>
<p class="exercise"> ¿Cuál es la dirección de comienzo de cada variable? ¿Qué celdas de memoria ocupa cada variable? </p>
<div class="answer">
<ul>
<li>La variable precipitaciones comienza en la dirección 5h y ocupa las celdas 5h y 6h.</li>
<li>La variable nubes comienza en la dirección 7h y ocupa la celda 7h.</li>
<li>La variable temperatura comienza en la dirección 8h y ocupa las celdas 8h y 9h.</li>
<li>La variable viento comienza en la dirección 0Ah y ocupa la celda 0Ah.</li>
</ul>
</div>
""",UIConfig.enableAll,Some("org 5h\\nprecipitaciones dw 134h\\nnubes db 45h\\ntemperatura dw 2Ah\\nviento db 8Ah\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("El rol del org"
,"""
<p> Hasta ahora las variables que definimos se ubicaban a partir de la dirección 5h,
debido a que estaban debajo de un <code> org 5h</code>.</p>
<p class="exercise"> Cambia el 5h en la línea <code> org 5h</code> por 12h. Ejecuta el programa.
Verifica que las variables ahora se ubican a partir de la dirección 12h.</p>
""",UIConfig.enableAll,Some("org 5h\\nprecipitaciones dw 134h\\nnubes db 45h\\ntemperatura dw 2Ah\\nviento db 8Ah\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Utilizando varios org"
,"""
<p>¿Qué podemos hacer si queremos que algunas variables se ubican a partir del 5h, y
otras a partir del 12h? Utilizamos dos sentencias org.</p>
<p class="exercise"> Lee y ejecuta el programa del editor.
¿Qué direcciones tienen las variables <code>precipitaciones</code> y <code>nubes</code>?
¿Y las variables <code>temperatura</code> y <code>viento</code>?</p>
<p>Entonces en este caso definimos tres sectores de memoria para nuestro programa:
el primero para las variables <code>precipitaciones</code> y <code>nubes</code>, a partir de la dirección 5h,
el segundo paralas variables <code>temperatura</code> y <code>viento</code>, a partir de la dirección 12h,
y el tercero para el código, a partir de la dirección 2000h.</p>
""",UIConfig.enableAll,Some("org 5h\\nprecipitaciones dw 134h\\nnubes db 45h\\norg 12h\\ntemperatura dw 2Ah\\nviento db 8Ah\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Valores decimales para la dirección del org"
,"""
<p>En algunos casos puede ser más fácil especificar la dirección de memoria en decimal.
Supongamos que queremos ubicar variables a partir de la dirección de memoria 12. En tal caso,
en lugar de tener que convertirla a hexadecimal, podemos escribir el 12 sin la <em>h</em> en
la instrucción org.</p>
<p class="exercise"> Lee y ejecuta el programa del editor. Las variables se ubican a partir
de la dirección 12h.</p>
<p class="exercise"> Quita el <em>h</em> de la sentencia <code>org 12h</code> y ejecuta
el programa. ¿Dónde se ubican las variables ahora?</p>
<p class="answer">Las variables se ubican a partir de la dirección 12, o sea 0Bh.</p>
""",UIConfig.enableAll,Some("org 12h\\ntemperatura dw 2Ah\\nviento db 8Ah\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Valores decimales para inicializar las variables"
,"""
<p> Si bien la memoria muestra los valores de las celdas en formato hexadecimal, debido a
que es lo más común, en verdad lo que se guarda en cada celda son 8_ bits, un byte,
que codifican un número utilizando el sistema binario. </p>
<p> Hasta ahora hemos inicializado las variables con un valor codificado en hexadecimal, pero al
cargarse en la memoria en verdad se guarda en formato binario.</p>
<p>Entonces, en realidad el formato hexadecimal es solo una conveniencia para
escribir los valores de forma más cómoda.</p>
<p> También podemos escribirlos con un valor codificado en decimal, como hicimos con la
dirección del org.
Para ello, recordemos que simplemente debemos no poner una <em>h</em> al final del valor.</p>
<p class="exercise"> Agrega la línea <code>peso db 25</code> para definir la variable peso con valor 25 (decimal).
Ejecuta el programa y busca el valor de la celda de memoria donde se cargó</p>
<p class="exercise"> Ese valor, ¿es 25? ¿por qué no? ¿con qué codificación se está mostrando?</p>
<p class="answer">Se muestra el valor 19h, porque se muestra en hexadecimal <p>
""",UIConfig.enableAll,Some("org 5h\\n;las variables van aqui\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Valores máximos"
,"""
<p>Las variables de tipo db tienen un rango de 0 a 255 para valores sin signo,
ya que disponen de 8_ bits.</p>
<p class="exercise"> Intenta poner un valor mayor a 255 en la variable edad. ¿Qué sucede? </p>
<p>Las variables de tipo dw tienen un rango de 0 a 65536 para valores sin signo,
ya que disponen de 16_ bits.</p>
<p class="exercise"> Intenta poner un valor mayor a 65536 en la variable distancia. ¿Qué sucede? </p>
<p> En ambos casos, como son valores positivos, se codifican en el sistema Binario Sin Signo
(BSS) al guardarse en la memoria.</p>
""",UIConfig.enableAll,Some("org 5h\\nedad db 50\\ndistancia dw 1529\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Valores negativos"
,"""
<p>También se pueden usar valores negativos para inicializar una variable. </p>
<p class="exercise"> Prueba poniendo el valor -10 a la variable temperatura y ejecutando
el programa. ¿Qué se almacena en la memoria en la dirección 5h? ¿Por qué?</p>
<p class="answer"> Se almacena el valor F6h, o sea 11110110b, que es la codificación
en Complemento a 2_ (CA2) del número -10. Hay que tener en cuenta que tanto el número
119 como el número -10 se codifican como 11110110b. Por ende es el programador quien
debe saber de antemano como interpretar esa cadena de bits, si en CA2 o en BSS. </p>
""",UIConfig.enableAll,Some("org 5h\\ntemperatura db 10\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Valores mínimos"
,"""
<p>Como se utiliza el sistema CA2 para los números negativos, el valor
mínimo para las variables de tipo db es de -128.</p>
<p class="exercise"> Intenta poner un valor menor a -128 en la variable edad. ¿Qué sucede? </p>
<p>Por otro lado, las variables de tipo dw tienen como valor mínimo el -32768.</p>
<p class="exercise"> Intenta poner un valor menor a -32768 en la variable distancia. ¿Qué sucede? </p>
""",UIConfig.enableAll,Some("org 5h\\nedad db -15\\ndistancia dw -1234\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Valores binarios para inicializar las variables"
,"""
<p> Podemos también ingresar un byte en formato binario agregando una <em>b</em> al final del mismo.</p>
<p class="exercise"> Agrega debajo de <code>peso</code> la línea <code>peso db 00101001b</code> para definir la variable peso con valor 29h.
Ejecuta el programa y verifica que la celda de memoria con dirección 6h tiene el valor 29h.</p>
<p> Recuerda que el valor 00101001b representa el valor 41 en BSS, o 29h en hexadecimal </p>
""",UIConfig.enableAll,Some("org 5h\\n;las variables van aqui\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Variables sin valor"
,"""
<p>También podemos declarar variables sin valor. Para ello ponemos <span class="value">?</span> en lugar del valor.</p>
<p class="exercise"> Define la variable <code>peso</code> de tipo db con valor <span class="value">?</span>.
Antes de ejecutar el código, anota el valor de la celda de memoria 5h. Luego ejecuta el código. ¿Qué
valor tiene ahora esta celda? </p>
<p class="answer"> La variable se define con el código <code>peso db ?</code>. El valor final
de la celda debería ser igual al valor anterior de la misma.</p>
""",UIConfig.enableAll,Some("org 5h\\n;las variables van aqui\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Vectores de db"
,"""
<p>También puedes declarar una variable una variable con varios valores, es decir, un vector.
En ese caso la sintaxis es <code>nombre tipo valor1, valor2, valor3, ...</code> </p>
<p> Los valores se guardan uno seguido del otro en la memoria.</p>
<p class="exercise">Leer y ejecutar el código del editor. ¿En qué celdas de memoria se guardan
los valores? ¿Cuántas celdas ocupan en total? </p>
<p class="answer"> Ocupan 6_ celdas en total, una celda por número. Sus direcciones son 5h, 6h, 7h, 8h, 9h y 0Ah.</p>
""",UIConfig.enableAll,Some("org 5h\\ntabla db 1,3,5,7,9,11\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Vectores de dw"
,"""
<p>Recién definimos un vector donde cada elemento era de tipo db.</p>
<p> Si ahora los elementos son de tipo dw entonces cada elemento ocupará
dos bytes de memoria.</p>
<p class="exercise">Ejecutar el código del editor. ¿En qué celdas de memoria se guardan
los valores? ¿Cuántas celdas ocupan en total? </p>
<p class="answer"> Ocupan 12_ celdas, dos por cada elemento; de la 5h a la 10h.</p>
""",UIConfig.enableAll,Some("org 5h\\ntabla dw 1,3,5,7,9,11\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Cadenas de caracteres"
,"""
<p>También se pueden declarar strings o cadenas de caracteres. Recuerda que los caracteres en
verdad se almacenan como códigos; los mismos se obtienen del
<a href="https://es.wikipedia.org_/wiki/ASCII#Caracteres_imprimibles_ASCII">estándar ASCII</a>.</p>
<p> Por ejemplo, la letra <em>A</em> se codifica con el número 41h, y la letra <em>a</em> con el
número 61h.</p>
<p> Entonces, como assembly es un lenguaje de bajo nivel, en realidad lo que declararemos es un vector
de números, donde cada número es el código ASCII de un carácter.</p>
<p> Por suerte, no debemos buscar e ingresar los códigos uno por uno, ya que el compilador de
assembly nos permite ingresar un texto entre comillas y el convierte el mismo en códigos.</p>
<p class="exercise"> Lee y ejecuta el código del editor. ¿Qué se guarda a partir de la dirección 5h?
Verifica que los códigos corresponden a los de los caracteres h, o, l, y a. </p>
<div class="answer"> <p>Se almacenan:</p>
<ul>
<li>En la celda 5h, el valor 68h, código del carácter h</li>
<li>En la celda 6h, el valor 6Fh, código del carácter o</li>
<li>En la celda 7h, el valor 6Ch, código del carácter l</li>
<li>En la celda 8h, el valor 61h, código del carácter a</li>
</ul>
</div>
""",UIConfig.enableAll,Some("org 5h\\ncadena db \\"hola\\"\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Codificación de caracteres no alfabéticos"
,"""
<p class="exercise"> En el editor se encuentra el mismo código que antes.
Cambia el contenido de la cadena de caracteres de "hola" a "123#! wubba lubba dub dub" y
ejecuta el programa. ¿Cómo se codifica el "123"?¿Y los caracteres "#!"?¿Y los espacios?</p>
<div class="answer">
<p>La cadena "123" contiene tres caracteres, el "1", con código ASCII 31h, el "2" con código 32h
y el "3" con código 33h. Ten cuidado, ya que el código de un número (32h) difiere del valor
del mismo (2).</p>
<p> Los caracteres "#" y "!" también tienen un código asociado, en este caso
el 23h y 21h, así como "?,.-+*" y otros símbolos.</p>
<p>Por último, los espacios también tienen un código, el 20h. Si hay varios espacios
habrá la misma cantidad de códigos 20h en la memoria.
</p>
</div>
""",UIConfig.enableAll,Some("org 5h\\ncadena db \\"hola\\"\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Las cadenas de caracteres son sólo de tipo db"
,"""
<p>Las cadenas de caracteres sólo pueden ser de tipo db.</p>
<p class="exercise"> Modifica el código del editor, reemplazando el tipo db por dw.
¿Qué sucede? </p>
<p class="answer"> El compilador detecta el error y no compila el programa.
</p>
""",UIConfig.enableAll,Some("org 5h\\ncadena db \\"hola\\"\\norg 2000h\\nhlt\\nend")
)
,TutorialStep("Variables vs Etiquetas "
,"""
<p>Cuando tenemos una variable de tipo dw, reservamos dos celdas de memoria
para guardar un valor. Por ejemplo, en el programa del editor, las celdas 5h y 6h
contienen el valor de la variable distancia.</p>
<p>De la misma forma, cuando tenemos un vector, se utilizan varias celdas de memoria para almacenarlo.</p>
<p>Generalmente hablamos de la <strong>dirección</strong> de estas variables, pero no queda claro
a qué nos referimos, si a la dirección de la primer celda, de la última, de todas, etc.</p>
<p>Por eso, se define la <strong>dirección</strong> de una variable como la dirección de su
primera celda, sin importar cuantas ocupe (1_ para db, 2_ para dw,
o varias para un vector).</p>
<p class="exercise"> Lee y ejecuta el código del editor. ¿Cuáles son las direcciones de las variables declaradas?</p>
<div class="answer">
<ul>
<li>La variable <code>distancia</code> tiene dirección 5h.</li>
<li>La variable <code>amigos</code> tiene dirección 7h.</li>
<li>La variable <code>mensaje</code> tiene dirección 8h.</li>
<li>La variable <code>tablita</code> tiene dirección 16h.</li>
<li>La variable <code>androide</code> tiene dirección 22h.</li>
</ul>
</div>
""",UIConfig.enableAll,Some("""org 5h
distancia dw 14A3h
amigos db 4Ah
mensaje db "Buenas noches."
tablita dw 10,5,4Fh,1D4Ch,3h,4BCDh
androide db "R2D2"
org 2000h
hlt
end""")
)
,TutorialStep("Variables vs Etiquetas"
,"""
<p>Pensándolo de ese modo, en assembly <em>declarar una variable</em> es simplemente etiquetar una dirección de memoria.
Entonces más que una variable, tenemos una <strong>etiqueta</strong> para una celda de memoria. </p>
<p> Entonces, en realidad assembly no nos da un mecanismo para definir <strong>variables</strong> como
las entendemos en otros lenguajes de programación.
En cambio, tenemos un mecanismo para <strong>inicializar celdas de memoria</strong> con algún valor y
<strong>etiquetar celdas de memoria</strong> con un nombre.
</p>
""",UIConfig.enableAll,Some("org 5h\\ndw 1A3Bh\\norg 2000h\\nhlt\\nend")
)
//,TutorialStep("Variables sin etiquetas TODO"
//,"""
//
//
//<p class="exercise"> Lee y ejecuta el código del editor</p>
//
//<p class="answer"> El compilador detecta el error y no compila el programa.
//</p>
//
//""",UIConfig.enableAll,Some("org 5h\\ndw 1A3Bh\\norg 2000h\\nhlt\\nend")
//)
//,TutorialStep("Dup TODO"
//,"""
//<p>En ocasiones, queremos definir un vector pero con todos los elementos iguales.</p>
//
//<p class="exercise"> </p>
//""",UIConfig.enableAll,Some("org 5h\\n;las variables van aqui\\norg 2000h\\nhlt\\nend")
//)
,TutorialStep("Autoevaluación (1/6)"
,"""
<ol>
<li><p class="exercise">La sentencia org sirve para ubicar las instrucciones en la memoria.
¿Cómo se ubican las variables en la misma?</p>
<p class="answer">También se utiliza la sentencia org para ubicar las variables.
Generalmente para las variables se utiliza otro org, distinto al del codigo.</p></li>
<li><p class="exercise">¿Qué tipos básicos de variables hay en Assembly?</p>
<p class="answer">Hay dos tipos: las db, que ocupan un byte, y las dw, que ocupan dos bytes.</p></li>
<li><p class="exercise">¿Hay otros tipos de datos, como booleanos, caracteres, strings,
arreglos y otros en Assembly?</p>
<p class="answer">No, no existen formalmente esos tipos de datos en el lenguaje.
No obstante, se pueden <em>representar</em> valores de esos tipos utilizando
el código binario. </p></li>
<li><p class="exercise">Las variables de Assembly ¿funcionan como en otros lenguajes tradicionales,
tales como Pascal, C, Java o Python?</p>
<p class="answer">No, funcionan en realidad como una forma de inicializar la memoria,
y ponerle un nombre (<em>etiqueta</em>) a ciertas direcciones de memoria. </p></li>
</ol>
""",UIConfig.enableAll,None
)
,TutorialStep("Autoevaluación (2/6)"
,"""
<ol>
<li><p class="exercise">¿Qué se le agrega a un valor para que sea considerado
como un valor en hexadecimal? De un ejemplo con el valor 3B5</p>
<p class="answer">Se agrega una h al final del número. Por ejemplo,
el valor 3B5 se escribiría como 3B5h. </p></li>
<li><p class="exercise">Si queremos escribir el valor hexadecimal A3F2 en Assembly,
¿qué <strong>dos</strong> cosas debemos agregarle para que el lenguaje lo reconozca como tal? </p>
<p class="answer">Se debe agregar una h al final del número, como antes,
pero como el valor comienza con una letra (la <strong>A</strong>), debe agregarse
también un 0 al comienzo. Entonces el valor se debería ingresar como 0A3F2h.
De esta manera se evita confundir al compilador, que sino pensaría que estamos
intentando utilizar la variable con nombre <strong>A3F2h</strong>.</p></li>
<li><p class="exercise">¿Cómo se escribe el valor binario 0100010 en Assembly?</p>
<p class="answer">Se escribe 0100010b, agregando una <strong>b</strong> al final
para distinguirlo del número decimal 100010.</p></li>
<li><p class="exercise">¿Cómo se escribe el valor decimal 28 en Assembly? ¿y el valor decimal
101 ?.</p>
<p class="answer">Se escriben directamente como 28 y 101.</p>
</li>
</ol>
""",UIConfig.enableAll,None
)
,TutorialStep("Autoevaluación (3/6)"
,"""
<ol>
<li><div class="exercise"><p>Decidir si es válida la forma de
escribir los siguientes valores en Assembly:</p>
<ol style="text-align:left;">
<li><p>12h</p></li>
<li><p><span class="value"> 5B2_</span> </p></li>
<li><p>0010b</p></li>
<li><p><span class="value">13b</span></p></li>
<li><p><span class="value"> B2h</span> </p></li>
<li><p>1101</p></li>
</ol>
</div>
<div class="answer" style="text-align:left;">
<ol>
<li><p>12h : Válido, es un valor en hexadecimal.</p></li>
<li><p>5B2_ : Inválido, falta una <strong>h</strong> al final para ser un valor hexadecimal válido.</p></li>
<li><p>0010b : Válido, es un valor binario.</p></li>
<li><p>13b : Inválido, el <strong>b</strong> sólo puede usarse si hay digitos 0 o 1.</p></li>
<li><p>B2h: Inválido, falta el 0 adelante para distinguirlo de una variable.</p></li>
<li><p>1101 : Válido, es un valor en decimal</p></li>
</ol>
</div>
</li>
</ol>
""",UIConfig.enableAll,None
)
,TutorialStep("Autoevaluación (4/6)"
,"""
<ol>
<li><p class="exercise">¿Se puede ingresar números negativos en Assembly?.</p>
<p class="answer">Si, solo basta poner el signo - adelante.</p>
</li>
<li><p class="exercise">¿Cómo se codifican los números positivos en la memoria?</p>
<p class="answer">Se codifican con el sistema Binario sin signo (BSS).</p>
</li>
<li><p class="exercise">¿Cómo se codifican los números negativos en la memoria?</p>
<p class="answer">Se codifican con el sistema Complemento a 2 (CA2). En dicho
sistema los números negativos tienen el bit más significativo (el de más a la izquierda)
en 1.</p>
</li>
<li><p class="exercise">Si un byte en la memoria tiene el bit más significativo (el
de más a la izquierda) en 1, ¿qué significa?</p>
<p class="answer"> Puede significar dos cosas. Si interpretamos el número en
CA2, entonces significa que es negativo. Sino, significa que es un número mayor o igual a 128. </p>
</li>
</ol>
""",UIConfig.enableAll,None
)
,TutorialStep("Autoevaluación (5/6)"
,"""
<ol>
<li><p class="exercise">¿Cuáles son los valores mínimos y máximos para una variable de
tipo db? </p>
<p class="answer"> En BSS, el mínimo es 0 y el máximo es 255 (2^8).
En CA2, el mínimo es -128 (-2^7) y el máximo es 127 (2^7-1).</p>
</li>
<li><p class="exercise">¿Cuáles son los valores mínimos y máximos para una variable de
tipo dw? </p>
<p class="answer"> En BSS, el mínimo es 0 y el máximo es 65535 (2^16).
En CA2, el mínimo es -32768 (-2^15) y el máximo es 32767 (2^15-1).</p>
</li>
<li><p class="exercise">¿Se puede definir una variable sin valor inicial? </p>
<p class="answer"> Si, poniendo <code>?</code> como valor de la variable.</p>
</li>
<li><p class="exercise">¿Qué función cumpliría una variable sin valor inicial? </p>
<p class="answer"> Dicho mecanismo nos permite reservar espacio para la variable,
y luego especificar su valor en el programa.</p>
</li>
</ol>
""",UIConfig.enableAll,None
)
,TutorialStep("Autoevaluación (6/6)"
,"""
<ol>
<li><p class="exercise"> ¿Cómo se define un vector de valores en Assembly? </p>
<div class="answer"> <p>Se especifica el nombre y el tipo de una variable normalmente,
pero luego se agregan varios valores separados por una coma. Por ejemplo:</p>
<pre><code>temperaturas dw 15, 29, -5, 99, 1500</code></pre>
</div>
</li>
<li><p class="exercise"> La línea <code>mensaje db "Hola"</code> define un string
con etiqueta <em>mensaje</em>. ¿Podríamos utilizar dw en lugar de db? </p>
<p class="answer"> No, ya que cada caracter se codifica con el código ASCII,
y por ende ocupa un byte.</p>
<li><div class="exercise"> <p>¿Cómo se ubican en la memoria los valores de un vector?.
Indicar las direcciones de los valores de la variable temperaturas </p>
<pre><code>org 5
temperaturas db 15, 29, -5</code></pre>
</div>
<p class="answer">
El valor 15 queda almacenado en la celda de memoria con dirección 5.
El 29 en la celda con dirección 6, y el -5 en la celda 7.
</p>
</li>
</ol>
""",UIConfig.enableAll,None
)
,TutorialStep("Resumen"
,"""
<p>En Assembly se pueden etiquetar celdas de memoria e inicializar su valor,
que llamamos <strong>declarar variables</strong> aunque su significado sea algo
diferente del de otros lenguajes de programación.</p>
<p>Para ello primero debemos establecer la dirección donde se comienzan a ubicar las variables
con la sentencia org.</p>
<p>Luego podemos definir variables. La sintaxis para definir variables es
<code>nombre tipo valor1, valor2, ...</code></p>
<p> Hay dos tipos de variables: db, si ocupan un byte,
o dw si ocupan 2_ bytes. </p>
<p>Las variables tienen un nombre o etiqueta, que luego nos servirá como referencia para accederlas o modificarlas
</p>
<p>Las variables se ubican secuencialmente en la memoria, es decir, donde termina una empieza la siguiente.
</p>
<p>Los valores pueden ingresarse en hexadecimal (4Bh), en binario (00001011b) o en decimal (11).
Los números positivos se codifican en el sistema BSS. Además, se pueden ingresar números negativos, que
se codifican con el sistema CA2.
</p>
<p>Podemos declarar vectores poniendo varios valores a una variable, separados por coma.
También podemos declarar vectores de caracteres o strings, escribiendo un texto entre comillas.
</p>
""",UIConfig.enableAll,Some("org 5h\\n;las variables van aqui\\norg 2000h\\nhlt\\nend")
)
//,TutorialStep("A continuación"
//,"""
// <p>Ahora que sabes más sobre cómo se codifican los datos en assembly y cómo definir variables,
// puedes avanzar más con el <a href="?tutorial=code">tutorial sobre
// registros e instrucciones simples</a>.</p>
//
//""",UIConfig.disableAll,Some(""))
)
} | facundoq/vonsim | src/main/scala/vonsim/webapp/tutorials/VariablesTutorial.scala | Scala | agpl-3.0 | 30,360 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.connector.datasource
import quasar.RenderTreeT
import quasar.api.datasource.DatasourceType
import quasar.api.datasource.DatasourceError.InitializationError
import quasar.api.resource.ResourcePathType
import quasar.connector.{ByteStore, QueryResult}
import quasar.qscript.{MonadPlannerErr, QScriptEducated}
import scala.concurrent.ExecutionContext
import scala.util.Either
import argonaut.Json
import cats.effect.{ConcurrentEffect, ContextShift, Timer, Resource}
import fs2.Stream
import matryoshka.{BirecursiveT, EqualT, ShowT}
trait HeavyweightDatasourceModule {
def kind: DatasourceType
def sanitizeConfig(config: Json): Json
def heavyweightDatasource[
T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT,
F[_]: ConcurrentEffect: ContextShift: MonadPlannerErr: Timer](
config: Json,
byteStore: ByteStore[F])(
implicit ec: ExecutionContext)
: Resource[F, Either[InitializationError[Json], Datasource[F, Stream[F, ?], T[QScriptEducated[T, ?]], QueryResult[F], ResourcePathType.Physical]]]
}
| slamdata/quasar | connector/src/main/scala/quasar/connector/datasource/HeavyweightDatasourceModule.scala | Scala | apache-2.0 | 1,657 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import play.api.libs.json.Format
import uk.gov.hmrc.ct.box.formats._
import uk.gov.hmrc.ct.ct600j.v3._
package object formats {
implicit val b1Format: Format[B1] = new StringFormat[B1](B1.apply)
implicit val B40Format: Format[B40] = new OptionalBooleanFormat[B40](B40.apply)
implicit val B45Format: Format[B45Input] = new OptionalBooleanFormat[B45Input](B45Input.apply)
implicit val B55Format: Format[B55] = new OptionalBooleanFormat[B55](B55.apply)
implicit val B80AFormat: Format[B80A] = new OptionalBooleanFormat[B80A](B80A.apply)
implicit val B85AFormat: Format[B85A] = new OptionalBooleanFormat[B85A](B85A.apply)
implicit val B90AFormat: Format[B90A] = new OptionalStringFormat[B90A](B90A.apply)
implicit val B140Format: Format[B140] = new OptionalBooleanFormat[B140](B140.apply)
implicit val B145Format: Format[B145] = new OptionalIntegerFormat[B145](B145.apply)
implicit val B150Format: Format[B150] = new OptionalBooleanFormat[B150](B150.apply)
implicit val B155Format: Format[B155] = new IntegerFormat[B155](B155.apply)
implicit val B160Format: Format[B160] = new OptionalIntegerFormat[B160](B160.apply)
implicit val B165Format: Format[B165] = new IntegerFormat[B165](B165.apply)
implicit val B170Format: Format[B170] = new IntegerFormat[B170](B170.apply)
implicit val B190Format: Format[B190] = new IntegerFormat[B190](B190.apply)
implicit val B235Format: Format[B235] = new IntegerFormat[B235](B235.apply)
implicit val B275Format: Format[B275] = new IntegerFormat[B275](B275.apply)
implicit val B280Format: Format[B280] = new BooleanFormat[B280](B280.apply)
implicit val B295Format: Format[B295] = new IntegerFormat[B295](B295.apply)
implicit val B300Format: Format[B300] = new IntegerFormat[B300](B300.apply)
implicit val B305Format: Format[B305] = new IntegerFormat[B305](B305.apply)
implicit val B315Format: Format[B315] = new IntegerFormat[B315](B315.apply)
implicit val B335Format: Format[B335] = new IntegerFormat[B335](B335.apply)
implicit val B485Format: Format[B485] = new BooleanFormat[B485](B485.apply)
implicit val BFQ1Format: Format[BFQ1] = new OptionalBooleanFormat[BFQ1](BFQ1.apply)
implicit val B620Format: Format[B620] = new OptionalIntegerFormat[B620](B620.apply)
implicit val B515Format: Format[B515] = new BigDecimalFormat[B515](B515.apply)
implicit val B595Format: Format[B595] = new BigDecimalFormat[B595](B595.apply)
implicit val B705Format: Format[B705] = new OptionalIntegerFormat[B705](B705.apply)
implicit val B710Format: Format[B710] = new OptionalIntegerFormat[B710](B710.apply)
implicit val B735Format: Format[B735] = new OptionalIntegerFormat[B735](B735.apply)
implicit val B750Format: Format[B750] = new OptionalIntegerFormat[B750](B750.apply)
implicit val B755Format: Format[B755] = new OptionalIntegerFormat[B755](B755.apply)
implicit val B760Format: Format[B760] = new OptionalIntegerFormat[B760](B760.apply)
implicit val B765Format: Format[B765] = new OptionalIntegerFormat[B765](B765.apply)
implicit val B775Format: Format[B775] = new OptionalIntegerFormat[B775](B775.apply)
implicit val B780Format: Format[B780] = new OptionalIntegerFormat[B780](B780.apply)
implicit val B860Format: Format[B860] = new OptionalIntegerFormat[B860](B860.apply)
implicit val B920Format: Format[B920] = new StringFormat[B920](B920.apply)
implicit val PAYEEQ1Format: Format[PAYEEQ1] = new OptionalBooleanFormat[PAYEEQ1](PAYEEQ1.apply)
implicit val REPAYMENTSQ1Format: Format[REPAYMENTSQ1] = new OptionalBooleanFormat[REPAYMENTSQ1](REPAYMENTSQ1.apply)
implicit val B925Format: Format[B925] = new StringFormat[B925](B925.apply)
implicit val B930Format: Format[B930] = new StringFormat[B930](B930.apply)
implicit val B935Format: Format[B935] = new StringFormat[B935](B935.apply)
implicit val B940Format: Format[B940] = new OptionalStringFormat[B940](B940.apply)
implicit val B955Format: Format[B955] = new OptionalStringFormat[B955](B955.apply)
implicit val B960Format: Format[B960] = new OptionalStringFormat[B960](B960.apply)
implicit val B965Format: Format[B965] = new OptionalStringFormat[B965](B965.apply)
implicit val B975Format: Format[B975] = new OptionalStringFormat[B975](B975.apply)
implicit val B985Format: Format[B985] = new OptionalStringFormat[B985](B985.apply)
implicit val N092Format: Format[N092] = new OptionalBooleanFormat[N092](N092.apply)
}
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v3/formats/package.scala | Scala | apache-2.0 | 5,080 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import scala.util.Properties
val EXPECTED_SPARK_VERSION = scala.util.Properties.envOrElse("SPARK_VERSION", "")
var CURRENT_SPARK_VERSION = sc.version
if (sc.version == EXPECTED_SPARK_VERSION) {
System.exit(0);
}
else {
println(s"Incorrect spark version, expected $EXPECTED_SPARK_VERSION got $CURRENT_SPARK_VERSION.")
System.exit(1);
}
| aayushidwivedi01/spark-tk | sparktk-core/version.scala | Scala | apache-2.0 | 1,038 |
package fpinscala.errorhandling
import scala.{Option => _, Some => _, Either => _, _} // hide std library `Option`, `Some` and `Either`, since we are writing our own in this chapter
sealed trait Option[+A] {
def map[B](f: A => B): Option[B] = this match {
case None => None
case Some(a) => Some(f(a))
}
def getOrElse[B>:A](default: => B): B = this match {
case None => default
case Some(a) => a
}
def flatMap[B](f: A => Option[B]): Option[B] = map(f).getOrElse(None: Option[B])
def orElse[B>:A](ob: => Option[B]): Option[B] = map(Some(_)).getOrElse(ob)
def filter(f: A => Boolean): Option[A] = flatMap(a => if (f(a)) Some(a) else None)
}
case class Some[+A](get: A) extends Option[A]
case object None extends Option[Nothing]
object Option {
def failingFn(i: Int): Int = {
val y: Int = throw new Exception("fail!") // `val y: Int = ...` declares `y` as having type `Int`, and sets it equal to the right hand side of the `=`.
try {
val x = 42 + 5
x + y
}
catch { case e: Exception => 43 } // A `catch` block is just a pattern matching block like the ones we've seen. `case e: Exception` is a pattern that matches any `Exception`, and it binds this value to the identifier `e`. The match returns the value 43.
}
def failingFn2(i: Int): Int = {
try {
val x = 42 + 5
x + ((throw new Exception("fail!")): Int) // A thrown Exception can be given any type; here we're annotating it with the type `Int`
}
catch { case e: Exception => 43 }
}
def mean(xs: Seq[Double]): Option[Double] =
if (xs.isEmpty) None
else Some(xs.sum / xs.length)
/* Following uses population variance formula to traverse the sequence once only:
*
* SUM((x - mean)^2) = SUM(x^2 - 2 * mean * x + mean^2)
* = SUM(x^2) - 2 * mean * SUM(x) + n * mean^2
* = SUM(x^2) - 2 * mean * (n * mean) + n * mean^2
* = SUM(x^2) - 2 * n * mean^2 + n * mean^2
* = SUM(x^2) - n * mean^2
*/
def variance1(xs: Seq[Double]): Option[Double] = {
case class Accumulator(count: Int, sum: Double, squares: Double) {
def mean = sum / count
}
val accumulator = xs.foldLeft(Accumulator(0, 0.0, 0.0))(
(acc, x) => acc.copy(acc.count + 1, acc.sum + x, acc.squares + x * x)
)
accumulator match {
case Accumulator(0, _, _) => None
case a @ Accumulator(n, sm, ssq) => Some(ssq / n - a.mean * a.mean)
}
}
def variance2(xs: Seq[Double]): Option[Double] = {
val mn = mean(xs)
mn match {
case None => None
case Some(m) => mean(xs.map(x => math.pow(x - m, 2)))
}
// Note: When seeing this pattern...
// a. If the last expression returns an Option, convert to flatMap.
// b. If the last expression returns the type of the sequence, convert to map.
}
// Implement variance in terms of flatMap, as per the instructions:
def variance(xs: Seq[Double]): Option[Double] = mean(xs).flatMap(
m => mean(xs.map(x => math.pow(x - m, 2)))
)
def map2[A,B,C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] = a.flatMap(
aGet => b.map(bGet => f(aGet,bGet))
)
def sequence[A](a: List[Option[A]]): Option[List[A]] = a.foldRight(Some(Nil: List[A]): Option[List[A]]) (
(ax: Option[A], as: Option[List[A]]) => map2(ax, as)(_::_)
)
def traverse[A, B](a: List[A])(f: A => Option[B]): Option[List[B]] = a.foldRight[Option[List[B]]](Some(Nil))(
(ax: A, bs: Option[List[B]]) => map2(f(ax), bs)(_ :: _)
)
def sequenceViaTraverse[A](a: List[Option[A]]): Option[List[A]] = traverse[Option[A], A](a)(x => x)
} | AndrewTweddle/fpinscala | exercises/src/main/scala/fpinscala/errorhandling/Option.scala | Scala | mit | 3,680 |
package org.dsa.iot.ignition.spark
import com.ignition.frame.SparkRuntime
import org.dsa.iot.scala.Having
/**
* Prints out the data frame data to the standard output.
*/
class DebugOutput(implicit rt: SparkRuntime) extends RxFrameTransformer {
def names(show: Boolean): DebugOutput = this having (showNames <~ show)
def types(show: Boolean): DebugOutput = this having (showTypes <~ show)
def title(str: String): DebugOutput = this having (title <~ Some(str))
def noTitle(): DebugOutput = this having (title <~ None)
def width(n: Int): DebugOutput = this having (maxWidth <~ Some(n))
def unlimitedWidth(): DebugOutput = this having (maxWidth <~ None)
val showNames = Port[Boolean]("showNames")
val showTypes = Port[Boolean]("showTypes")
val title = Port[Option[String]]("title")
val maxWidth = Port[Option[Int]]("maxWidth")
protected def compute =
(showNames.in combineLatest showTypes.in combineLatest title.in combineLatest maxWidth.in) flatMap {
case (((names, types), ttl), width) => doTransform(com.ignition.frame.DebugOutput(names, types, ttl, width))
}
}
/**
* Factory for [[DebugOutput]] instances.
*/
object DebugOutput {
/**
* Creates a new DebugOutput instance with no title, unlimited width, names and no types.
*/
def apply()(implicit rt: SparkRuntime): DebugOutput = {
new DebugOutput noTitle () unlimitedWidth () names true types false
}
} | IOT-DSA/dslink-scala-ignition | src/main/scala/org/dsa/iot/ignition/spark/DebugOutput.scala | Scala | apache-2.0 | 1,416 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.formats
import java.math.{BigDecimal, BigInteger}
import cmwell.domain._
import cmwell.syntaxutils._
import cmwell.util.string.dateStringify
import org.apache.jena.datatypes.xsd.XSDDatatype
import com.typesafe.scalalogging.LazyLogging
import org.apache.commons.codec.binary._
import org.joda.time.DateTime
import play.api.libs.json._
/**
* Created by gilad on 12/4/14.
*/
abstract class AbstractJsonFormatter(override val fieldNameModifier: String => String) extends TreeLikeFormatter {
override def format: FormatType = JsonType
override type Inner = JsValue
override def makeFromTuples(tuples: Seq[(String, Inner)]): Inner = JsObject(tuples)
override def makeFromValues(values: Seq[Inner]): Inner = JsArray(values)
override def empty: Inner = JsNull
override def single[T](value: T): Inner = value match {
case i: Int => JsNumber(new BigDecimal(i.toString))
case i: Long => JsNumber(new BigDecimal(i.toString))
case i: BigInteger => JsNumber(new BigDecimal(i.toString))
case i: Float => JsNumber(new BigDecimal(i.toString))
case i: Double if i == Double.PositiveInfinity =>
JsString("Infinity") //JsNumber(BigDecimal.valueOf(Double.MaxValue))
case i: Double if i == Double.NegativeInfinity =>
JsString("-Infinity") //JsNumber(BigDecimal.valueOf(Double.MinValue))
case i: Double if i.toString == "NaN" => JsString("NaN")
case i: Double => JsNumber(new BigDecimal(i.toString))
case i: BigDecimal => JsNumber(i)
case b: Boolean => JsBoolean(b)
case d: Array[Byte] => JsString(new Base64(0).encodeToString(d))
case d: DateTime => JsString(dateStringify(d))
case _ => JsString(value.toString)
}
}
/**
* wrap a json output with a callback
*/
trait JsonP extends Formatter {
def callback: Option[String]
abstract override def render(formattable: Formattable): String = callback match {
case None => super.render(formattable)
case Some(cb) => {
val j = super.render(formattable)
s"$cb($j);"
}
}
abstract override def format = callback match {
case Some(_) => JsonpType
case None => super.format
}
}
class JsonFormatter(fieldNameModifier: String => String, override val callback: Option[String] = None)
extends AbstractJsonFormatter(fieldNameModifier)
with JsonP {
override def makeFromValues(values: Seq[Inner]): Inner = JsArray(cleanDuplicatesPreserveOrder(values))
override def mkString(inner: Inner) = Json.stringify(inner)
}
class PrettyJsonFormatter(fieldNameModifier: String => String, override val callback: Option[String] = None)
extends AbstractJsonFormatter(fieldNameModifier)
with JsonP {
override def makeFromValues(values: Seq[Inner]): Inner = JsArray(cleanDuplicatesPreserveOrder(values))
override def mkString(inner: Inner) = Json.prettyPrint(inner)
}
abstract class AbstractJsonlFormatter(hashToUrlAndPrefix: String => Option[(String, String)],
quadToAlias: String => Option[String])
extends AbstractJsonFormatter(identity) {
//todo add @id
override def format: FormatType = JsonlType
override def mkString(value: Inner): String = Json.stringify(value)
override def empty: JsObject = JsObject(Seq())
protected def toTupleSeq[T](t: (String, Option[T])*) =
t.toSeq.flatMap(
x =>
x._2 match {
case Some(v) => Seq(x._1 -> single(v))
case None => Seq()
}
)
val m = scala.collection.mutable.Map.empty[String, String]
val s = scala.collection.mutable.Set.empty[String]
private[this] def f(q: String): Option[String] = {
if (m.contains(q)) m.get(q)
else if (s(q)) None
else {
val opt = quadToAlias(q)
opt match {
case Some(x) => m.update(q, x)
case None => s += q
}
opt
}
}
protected def mkVal[V](value: V,
quad: Option[String] = None,
lang: Option[String] = None,
dataType: Option[String] = None): Inner = {
JsObject(Seq("value" -> single(value)) ++ toTupleSeq("quad" -> quad.flatMap(f), "lang" -> lang, "type" -> dataType))
}
protected def mkValWrappedInArray[V](value: V,
quad: Option[String] = None,
lang: Option[String] = None,
dataType: Option[String] = None): Inner =
JsArray(Seq(mkVal(value, quad, lang, dataType)))
override def singleFieldValue(fv: FieldValue): Inner = fv match {
case FString(value: String, lang, quad) => mkVal(value, quad, lang, Some(XSDDatatype.XSDstring.getURI))
case FInt(value: Int, quad) => mkVal(value, quad, dataType = Some(XSDDatatype.XSDint.getURI))
case FLong(value: Long, quad) => mkVal(value, quad, dataType = Some(XSDDatatype.XSDlong.getURI))
case FBigInt(value: java.math.BigInteger, quad) =>
mkVal(value, quad, dataType = Some(XSDDatatype.XSDinteger.getURI))
case FFloat(value: Float, quad) => mkVal(value, quad, dataType = Some(XSDDatatype.XSDfloat.getURI))
case FDouble(value: Double, quad) => mkVal(value, quad, dataType = Some(XSDDatatype.XSDdouble.getURI))
case FBigDecimal(value: java.math.BigDecimal, quad) =>
mkVal(value, quad, dataType = Some(XSDDatatype.XSDdecimal.getURI))
case FBoolean(value: Boolean, quad) => mkVal(value, quad, dataType = Some(XSDDatatype.XSDboolean.getURI))
case FDate(value: String, quad) => mkVal(value, quad, dataType = Some(XSDDatatype.XSDdateTime.getURI))
case FReference(value: String, quad) =>
mkVal({ if (value.startsWith("cmwell://")) value.drop("cmwell:/".length) else value },
quad,
dataType = Some(XSDDatatype.XSDanyURI.getURI))
case FExternal(value: String, dataTypeURI: String, quad) => mkVal(value, quad, None, Some(dataTypeURI))
case FNull(_) => !!! //this is just a marker for IMP, should not index it anywhere...
case _: FExtra[_] =>
!!! // FExtra is just a marker for outputting special properties, should not index it anywhere...
}
override def system(i: Infoton): JsObject =
JsObject(
Seq("@id.sys" -> mkValWrappedInArray(i.path), "type.sys" -> mkValWrappedInArray(i.kind)) ++ system(
i,
(s: String) => s"$s.sys",
mkValWrappedInArray(_),
mkValWrappedInArray(_),
mkValWrappedInArray(_)
)
)
override def fields(i: Infoton): JsObject = i.fields match {
case None => JsObject(Seq())
case Some(xs) =>
JsObject(
xs.filter(_._1.head != '$')
.flatMap(
field => Seq(convertToCorrectPrefixedForm(field._1) -> JsArray(field._2.map(singleFieldValue).toSeq))
)
.toSeq
)
}
override def makeFromTuples(tuples: Seq[(String, Inner)]): Inner = JsObject(tuples)
override def makeFromValues(values: Seq[Inner]) = JsArray(values)
override def fileContent(fileContent: FileContent) =
JsObject(
super.fileContent(fileContent,
(s: String) => s"$s.content.sys",
v => mkValWrappedInArray(v),
v => mkValWrappedInArray(v),
v => mkValWrappedInArray(v))
)
override def infoton(i: Infoton): Inner = {
val iSystem = system(i)
val iFields = i.fields
.collect {
case fm if fm.exists(_._1.head != '$') => fields(i)
}
.getOrElse(empty)
(i: @unchecked) match {
case CompoundInfoton(_, _, _, _, _, children, offset, length, total, _, _) =>
iSystem ++ iFields ++
JsObject(
Seq(
"children.sys" -> infotons(children),
"offset.sys" -> mkValWrappedInArray(offset),
"length.sys" -> mkValWrappedInArray(length),
"total.sys" -> mkValWrappedInArray(total)
)
)
case ObjectInfoton(_, _, _, _, _, _, _) => iSystem ++ iFields
case FileInfoton(_, _, _, _, _, content, _, _) =>
iSystem ++ iFields ++ (content match {
case Some(c) => fileContent(c)
case None => JsObject(Seq())
})
case LinkInfoton(_, _, _, _, _, linkTo, linkType, _, _) =>
iSystem ++ iFields ++ JsObject(
Seq("linkTo" -> mkValWrappedInArray(linkTo), "linkType" -> mkValWrappedInArray(linkType))
)
case d: DeletedInfoton => iSystem ++ iFields
}
}
private def convertToCorrectPrefixedForm(fieldKey: String) = {
lazy val splitted = fieldKey.split('.')
lazy val opt = hashToUrlAndPrefix(splitted.last)
if (fieldKey.contains(".") && opt.isDefined)
s"${splitted.init.mkString(".")}.${opt.get._2}"
else
s"$fieldKey.nn"
}
}
class JsonlFormatter(hashToUrlAndPrefix: String => Option[(String, String)],
quadToAlias: String => Option[String],
override val callback: Option[String] = None)
extends AbstractJsonlFormatter(hashToUrlAndPrefix, quadToAlias)
with JsonP {
override def mkString(value: Inner): String = Json.stringify(value)
}
class PrettyJsonlFormatter(hashToUrlAndPrefix: String => Option[(String, String)],
quadToAlias: String => Option[String],
override val callback: Option[String] = None)
extends AbstractJsonlFormatter(hashToUrlAndPrefix, quadToAlias)
with JsonP {
override def mkString(value: Inner): String = Json.prettyPrint(value)
}
| thomsonreuters/CM-Well | server/cmwell-formats/src/main/scala/cmwell/formats/JsonFormatter.scala | Scala | apache-2.0 | 10,315 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.source.libsvm
import java.io.IOException
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.mapreduce.{Job, TaskAttemptContext}
import org.apache.spark.TaskContext
import org.apache.spark.internal.Logging
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{Vectors, VectorUDT}
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.util.SerializableConfiguration
private[libsvm] class LibSVMOutputWriter(
path: String,
dataSchema: StructType,
context: TaskAttemptContext)
extends OutputWriter {
private val writer = CodecStreams.createOutputStreamWriter(context, new Path(path))
// This `asInstanceOf` is safe because it's guaranteed by `LibSVMFileFormat.verifySchema`
private val udt = dataSchema(1).dataType.asInstanceOf[VectorUDT]
override def write(row: InternalRow): Unit = {
val label = row.getDouble(0)
val vector = udt.deserialize(row.getStruct(1, udt.sqlType.length))
writer.write(label.toString)
vector.foreachActive { case (i, v) =>
writer.write(s" ${i + 1}:$v")
}
writer.write('\\n')
}
override def close(): Unit = {
writer.close()
}
}
/** @see [[LibSVMDataSource]] for public documentation. */
// If this is moved or renamed, please update DataSource's backwardCompatibilityMap.
private[libsvm] class LibSVMFileFormat
extends TextBasedFileFormat
with DataSourceRegister
with Logging {
override def shortName(): String = "libsvm"
override def toString: String = "LibSVM"
private def verifySchema(dataSchema: StructType, forWriting: Boolean): Unit = {
if (
dataSchema.size != 2 ||
!dataSchema(0).dataType.sameType(DataTypes.DoubleType) ||
!dataSchema(1).dataType.sameType(new VectorUDT()) ||
!(forWriting || dataSchema(1).metadata.getLong(LibSVMOptions.NUM_FEATURES).toInt > 0)
) {
throw new IOException(s"Illegal schema for libsvm data, schema=$dataSchema")
}
}
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
val libSVMOptions = new LibSVMOptions(options)
val numFeatures: Int = libSVMOptions.numFeatures.getOrElse {
require(files.nonEmpty, "No input path specified for libsvm data")
logWarning(
"'numFeatures' option not specified, determining the number of features by going " +
"though the input. If you know the number in advance, please specify it via " +
"'numFeatures' option to avoid the extra scan.")
val paths = files.map(_.getPath.toUri.toString)
val parsed = MLUtils.parseLibSVMFile(sparkSession, paths)
MLUtils.computeNumFeatures(parsed)
}
val featuresMetadata = new MetadataBuilder()
.putLong(LibSVMOptions.NUM_FEATURES, numFeatures)
.build()
Some(
StructType(
StructField("label", DoubleType, nullable = false) ::
StructField("features", new VectorUDT(), nullable = false, featuresMetadata) :: Nil))
}
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
verifySchema(dataSchema, true)
new OutputWriterFactory {
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new LibSVMOutputWriter(path, dataSchema, context)
}
override def getFileExtension(context: TaskAttemptContext): String = {
".libsvm" + CodecStreams.getCompressionExtension(context)
}
}
}
override def buildReader(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
verifySchema(dataSchema, false)
val numFeatures = dataSchema("features").metadata.getLong(LibSVMOptions.NUM_FEATURES).toInt
assert(numFeatures > 0)
val libSVMOptions = new LibSVMOptions(options)
val isSparse = libSVMOptions.isSparse
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
(file: PartitionedFile) => {
val linesReader = new HadoopFileLinesReader(file, broadcastedHadoopConf.value.value)
Option(TaskContext.get()).foreach(_.addTaskCompletionListener(_ => linesReader.close()))
val points = linesReader
.map(_.toString.trim)
.filterNot(line => line.isEmpty || line.startsWith("#"))
.map { line =>
val (label, indices, values) = MLUtils.parseLibSVMRecord(line)
LabeledPoint(label, Vectors.sparse(numFeatures, indices, values))
}
val converter = RowEncoder(dataSchema)
val fullOutput = dataSchema.map { f =>
AttributeReference(f.name, f.dataType, f.nullable, f.metadata)()
}
val requiredOutput = fullOutput.filter { a =>
requiredSchema.fieldNames.contains(a.name)
}
val requiredColumns = GenerateUnsafeProjection.generate(requiredOutput, fullOutput)
points.map { pt =>
val features = if (isSparse) pt.features.toSparse else pt.features.toDense
requiredColumns(converter.toRow(Row(pt.label, features)))
}
}
}
}
| bravo-zhang/spark | mllib/src/main/scala/org/apache/spark/ml/source/libsvm/LibSVMRelation.scala | Scala | apache-2.0 | 6,752 |
/**
* Copyright (C) 2016 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr.persistence.relational.search.part
import org.orbeon.oxf.fr.persistence.relational.Provider
import org.orbeon.oxf.fr.persistence.relational.Statement._
import org.orbeon.oxf.fr.persistence.relational.search.adt.{Column, Request}
object columnFilterPart {
def apply(request: Request) =
if (! request.columns.exists(_.filterWith.nonEmpty))
NilPart
else
StatementPart(
sql =
request.columns
// Just consider the columns for which we have a filter
.filter(_.filterWith.nonEmpty)
// Add index, used to refer the appropriate tf table
.zipWithIndex
.map{ case (_, i) ⇒
s"""AND tf$i.data_id = c.data_id
|AND tf$i.control = ?
|AND ${Provider.textContains(request.provider, s"tf$i.val")}
|""".stripMargin }
.mkString(" "),
setters =
request.columns
// Just consider the columns for which we have a filter
.collect { case Column(path, Some(filter)) ⇒
List[Setter](
_.setString(_, path),
_.setString(_, s"%${filter.toLowerCase}%")
)}
.flatten
)
}
| brunobuzzi/orbeon-forms | form-runner/jvm/src/main/scala/org/orbeon/oxf/fr/persistence/relational/search/part/columnFilterPart.scala | Scala | lgpl-2.1 | 1,972 |
package AccurateML.nonLinearRegression
import breeze.linalg.{DenseVector => BDV}
/**
* @author osboxes
*/
class NeuralNetworkModel(inputDim : Int, hiddenUnits:Int) extends Serializable with NonlinearModel{
var nodes: Int = hiddenUnits
var n:Int = inputDim
var dim:Int = (n+2)*nodes
def eval(w:BDV[Double], x: BDV[Double]) : Double = {
assert(x.size == n)
assert(w.size == dim)
var f : Double = 0.0
for (i <- 0 to nodes-1){
var arg: Double = 0.0
for (j <- 0 to n-1){
arg = arg + x(j)*w(i*(n+2)+j)
}
arg = arg + w(i*(n+2) + n)
f = f + w(i*(n+2) + n + 1) / ( 1.0 + Math.exp(-arg))
}
return f
}
def grad(w:BDV[Double], x: BDV[Double]) : BDV[Double] = {
assert(x.size == n)
assert(w.size == dim)
var gper : BDV[Double] = BDV.zeros(dim) // (n+2)*nodes
for (i <- 0 to nodes-1){
var arg: Double = 0
for (j <- 0 to n-1){
arg = arg + x(j)*w(i*(n+2)+j)
}
arg = arg + w(i*(n+2) + n)
var sig : Double = 1.0 / ( 1.0 + Math.exp(-arg))
gper(i*(n+2) + n + 1) = sig
gper(i*(n+2) + n) = w(i*(n+2) + n + 1) * sig * (1-sig)
for (j <- 0 to n-1){
gper(i*(n+2)+j) = x(j) * w(i*(n+2) + n + 1) * sig * (1-sig)
}
}
return gper;
}
def getDim(): Int = {
return dim
}
def getNodes() :Int = {
return nodes
}
def setNodes(n : Int) = {
nodes = n
dim = (n+2) * nodes
}
def gradnumer(w:BDV[Double], x: BDV[Double]) : BDV[Double] = {
var h: Double = 0.000001
var g:BDV[Double] = BDV.zeros(this.dim)
var xtemp:BDV[Double] = BDV.zeros(this.dim)
xtemp = w.copy
var f0 = eval(xtemp, x)
for (i<-0 until this.dim){
xtemp = w.copy
xtemp(i) += h
var f1 = eval(xtemp, x)
g(i) = (f1-f0)/h
}
return g
}
} | harryandlina/ARIM | project/nonLinearRegression/NeuralNetworkModel.scala | Scala | apache-2.0 | 1,778 |
package org.http4s.client
package blaze
import java.nio.channels.AsynchronousChannelGroup
import javax.net.ssl.SSLContext
import org.http4s.headers.`User-Agent`
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.Duration
/** Config object for the blaze clients
*
* @param idleTimeout duration that a connection can wait without traffic before timeout
* @param requestTimeout maximum duration for a request to complete before a timeout
* @param userAgent optional custom user agent header
* @param sslContext optional custom `SSLContext` to use to replace
* the default, `SSLContext.getDefault`.
* @param checkEndpointIdentification require endpoint identification
* for secure requests according to RFC 2818, Section 3.1. If the
* certificate presented does not match the hostname of the request,
* the request fails with a CertificateException. This setting does
* not affect checking the validity of the cert via the
* `sslContext`'s trust managers.
* @param maxResponseLineSize maximum length of the request line
* @param maxHeaderLength maximum length of headers
* @param maxChunkSize maximum size of chunked content chunks
* @param lenientParser a lenient parser will accept illegal chars but replaces them with � (0xFFFD)
* @param bufferSize internal buffer size of the blaze client
* @param executionContext custom executionContext to run async computations.
* @param group custom `AsynchronousChannelGroup` to use other than the system default
*/
final case class BlazeClientConfig(// HTTP properties
idleTimeout: Duration,
requestTimeout: Duration,
userAgent: Option[`User-Agent`],
// security options
sslContext: Option[SSLContext],
@deprecatedName('endpointAuthentication) checkEndpointIdentification: Boolean,
// parser options
maxResponseLineSize: Int,
maxHeaderLength: Int,
maxChunkSize: Int,
lenientParser: Boolean,
// pipeline management
bufferSize: Int,
executionContext: ExecutionContext,
group: Option[AsynchronousChannelGroup]
) {
@deprecated("Parameter has been renamed to `checkEndpointIdentification`", "0.16")
def endpointAuthentication: Boolean = checkEndpointIdentification
}
object BlazeClientConfig {
/** Default configuration of a blaze client. */
val defaultConfig =
BlazeClientConfig(
idleTimeout = bits.DefaultTimeout,
requestTimeout = Duration.Inf,
userAgent = bits.DefaultUserAgent,
sslContext = None,
checkEndpointIdentification = true,
maxResponseLineSize = 4*1024,
maxHeaderLength = 40*1024,
maxChunkSize = Integer.MAX_VALUE,
lenientParser = false,
bufferSize = bits.DefaultBufferSize,
executionContext = ExecutionContext.global,
group = None
)
/**
* Creates an SSLContext that trusts all certificates and disables
* endpoint identification. This is convenient in some development
* environments for testing with untrusted certificates, but is
* not recommended for production use.
*/
val insecure: BlazeClientConfig =
defaultConfig.copy(sslContext = Some(bits.TrustingSslContext), checkEndpointIdentification = false)
}
| ZizhengTai/http4s | blaze-client/src/main/scala/org/http4s/client/blaze/BlazeClientConfig.scala | Scala | apache-2.0 | 3,674 |
package com.github.chaabaj.openid.oauth
import spray.json.RootJsonFormat
sealed trait AuthorizationResponse
case class AuthorizationSuccess(
code: String,
state: Option[String]
) extends AuthorizationResponse
object AuthorizationSuccess {
implicit val jsonFormat: RootJsonFormat[AuthorizationSuccess] = jsonFormat2(AuthorizationSuccess.apply)
}
case class AuthorizationError(
error: String,
errorDescription: Option[String],
errorUri: Option[String],
state: Option[String]
) extends AuthorizationResponse
object AuthorizationError {
implicit val jsonFormat: RootJsonFormat[AuthorizationError] = jsonFormat4(AuthorizationError.apply)
}
| chaabaj/openid-scala | src/main/scala/com/github/chaabaj/openid/oauth/AuthorizationResponse.scala | Scala | mit | 658 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.mysql.binary.encoder
import io.netty.buffer.ByteBuf
import java.util.Calendar
import org.joda.time.{LocalDateTime, DateTime}
import com.github.mauricio.async.db.mysql.column.ColumnTypes
object CalendarEncoder extends BinaryEncoder {
def encode(value: Any, buffer: ByteBuf): Unit = {
val calendar = value.asInstanceOf[Calendar]
LocalDateTimeEncoder.encode(
new LocalDateTime(calendar.getTimeInMillis),
buffer
)
}
def encodesTo: Int = ColumnTypes.FIELD_TYPE_TIMESTAMP
}
| dripower/postgresql-async | mysql-async/src/main/scala/com/github/mauricio/async/db/mysql/binary/encoder/CalendarEncoder.scala | Scala | apache-2.0 | 1,177 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.bigtable.spark
import com.typesafe.scalalogging.LazyLogging
import org.apache.hadoop.conf.{Configurable, Configuration}
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce._
import org.locationtech.geomesa.hbase.jobs.GeoMesaHBaseInputFormat.GeoMesaHBaseRecordReader
import org.locationtech.geomesa.jobs.GeoMesaConfigurator
import org.opengis.feature.simple.SimpleFeature
class GeoMesaBigtableInputFormat extends InputFormat[Text, SimpleFeature] with Configurable with LazyLogging {
private val delegate = new BigtableInputFormat
private var conf: Configuration = _
override def getSplits(context: JobContext): java.util.List[InputSplit] = {
val splits = delegate.getSplits(context)
logger.debug(s"Got ${splits.size()} splits")
splits
}
override def createRecordReader(
split: InputSplit,
context: TaskAttemptContext): RecordReader[Text, SimpleFeature] = {
val toFeatures = GeoMesaConfigurator.getResultsToFeatures[Result](context.getConfiguration)
val reducer = GeoMesaConfigurator.getReducer(context.getConfiguration)
new GeoMesaHBaseRecordReader(toFeatures, reducer, delegate.createRecordReader(split, context))
}
override def setConf(conf: Configuration): Unit = {
this.conf = conf
delegate.setConf(conf)
// see TableMapReduceUtil.java
HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf))
}
override def getConf: Configuration = conf
}
| elahrvivaz/geomesa | geomesa-bigtable/geomesa-bigtable-spark/src/main/scala/org/locationtech/geomesa/bigtable/spark/GeoMesaBigtableInputFormat.scala | Scala | apache-2.0 | 2,037 |
package mimir.models;
import scala.util.Random
import mimir.algebra._
import mimir.util._
/**
* A dumb, default Meta-Model to stand in until we get something better.
*
* This meta model always ignores VG arguments and picks the first model
* in the list.
*/
@SerialVersionUID(1001L)
class DefaultMetaModel(name: ID, context: String, models: Seq[ID])
extends Model(name)
with DataIndependentFeedback
with NoArgModel
with FiniteDiscreteDomain
{
def varType(idx: Int, args: Seq[Type]): Type = TString()
def bestGuess(idx: Int, args: Seq[PrimitiveValue], hints: Seq[PrimitiveValue]): PrimitiveValue =
choices(idx).getOrElse( StringPrimitive(models.head.id) )
def sample(idx: Int, randomness: Random, args: Seq[PrimitiveValue], hints: Seq[PrimitiveValue]): PrimitiveValue =
StringPrimitive(RandUtils.pickFromList(randomness, models).id)
def reason(idx: Int, args: Seq[PrimitiveValue], hints: Seq[PrimitiveValue]): String =
{
choices(idx) match {
case None => {
val bestChoice = models.head
val modelString = models.mkString(", ")
s"I defaulted to guessing with '$bestChoice' (out of $modelString) for $context"
}
case Some(choiceStr) =>
s"${getReasonWho(idx,args)} told me to use $choiceStr for $context"
}
}
def validateChoice(idx: Int, v: PrimitiveValue) = models.contains(v.asString)
def getDomain(idx: Int, args: Seq[PrimitiveValue], hints: Seq[PrimitiveValue]): Seq[(PrimitiveValue,Double)] =
models.map( x => (StringPrimitive(x.id), 0.0) )
def confidence (idx: Int, args: Seq[PrimitiveValue], hints:Seq[PrimitiveValue]) : Double = 1.0/models.size
} | UBOdin/mimir | src/main/scala/mimir/models/DefaultMetaModel.scala | Scala | apache-2.0 | 1,660 |
package de.choffmeister.microserviceutils.auth
import java.time.Instant
import akka.Done
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{Directive1, Route}
import de.choffmeister.microserviceutils.auth.grants.{AuthorizationCodeGrant, PasswordGrant, RefreshTokenGrant}
import de.choffmeister.microserviceutils.auth.models._
import de.choffmeister.microserviceutils.auth.utils.SecretGenerator
import play.api.libs.json.{JsArray, JsObject, JsString}
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.implicitConversions
trait TestAuthProvider
extends AuthProvider[TestResourceOwner, TestClient, AccessToken]
with AuthProviderRefreshTokenFlow[TestResourceOwner, TestClient, AccessToken, RefreshToken]
with AuthProviderAuthorizationCodeFlow[TestResourceOwner, TestClient, AccessToken, AuthorizationCode] {
def routes: Route = {
pathPrefix("oauth")(
concat((path("access_token") & post)(accessTokenRoute), (path("authorize") & get)(authorizeRoute))
)
}
override def authorizeResourceOwner: Directive1[TestResourceOwner] =
provide(TestResourceOwner("user1", Set("admin", "read", "write"), team = Some("team1")))
override def generateAccessTokenPayload(
client: TestClient,
resourceOwner: TestResourceOwner,
scopes: Set[String]
): JsObject = {
super.generateAccessTokenPayload(client, resourceOwner, scopes) ++ JsObject(
Seq("team" -> resourceOwner.team.map(JsString.apply), "scopes" -> Some(JsArray(scopes.map(JsString.apply).toSeq)))
.collect { case (k, Some(v)) =>
k -> v
}
)
}
val refreshTokenGrant =
new RefreshTokenGrant[TestResourceOwner, TestClient, AccessToken, RefreshToken](this)
val authorizationCodeGrant =
new AuthorizationCodeGrant[TestResourceOwner, TestClient, AccessToken, AuthorizationCode](this)
private val passwordGrant =
new PasswordGrant[TestResourceOwner, TestClient, AccessToken](this) {
override def verifyCredentials(
username: String,
password: String,
parameters: Map[String, String]
): Future[AuthResult[Option[String]]] =
Future.successful(Right(resourceOwners.find(p => p._1.id == username && p._2 == password).map(_._1.id)))
}
override val grants = List(refreshTokenGrant, authorizationCodeGrant, passwordGrant)
private lazy val clients = List(
TestClient(
id = "public",
secret = None,
redirectUris = "http://public/callback" :: Nil,
grantTypes = grants.map(_.id).toSet,
scopes = Set("admin", "read", "write"),
refreshTokenLifetime = Some(30.days)
),
TestClient(
id = "private",
secret = Some("private-secret"),
redirectUris = "http://private/callback" :: Nil,
grantTypes = grants.map(_.id).toSet,
scopes = Set("admin", "read", "write"),
refreshTokenLifetime = Some(30.days)
),
TestClient(
id = "limited-scopes",
secret = None,
redirectUris = "http://single/callback" :: Nil,
grantTypes = grants.map(_.id).toSet,
scopes = Set("read"),
refreshTokenLifetime = Some(30.days)
),
TestClient(
id = "limited-grant-types",
secret = None,
redirectUris = "http://single/callback" :: Nil,
grantTypes = Set("refresh_token", "authorization_code"),
scopes = Set("admin", "read", "write"),
refreshTokenLifetime = Some(30.days)
),
TestClient(
id = "queried-redirect-uri",
secret = None,
redirectUris = "http://queried/callback?foo=bar&foo=bar2" :: Nil,
grantTypes = grants.map(_.id).toSet,
scopes = Set("admin", "read", "write"),
refreshTokenLifetime = Some(30.days)
),
TestClient(
id = "malformed-redirect-uri",
secret = None,
redirectUris = "com.company:/callback" :: Nil,
grantTypes = grants.map(_.id).toSet,
scopes = Set("admin", "read", "write"),
refreshTokenLifetime = Some(30.days)
)
)
override def findClient(id: String): Future[Option[TestClient]] = Future.successful(clients.find(_.id == id))
override implicit def toClient(value: TestClient): Client =
Client(id = value.id, secret = value.secret, grantTypes = value.grantTypes, scopes = value.scopes)
var resourceOwners = List(
(TestResourceOwner(id = "user1", scopes = Set("admin", "read", "write"), team = Some("team1")), "pass1"),
(TestResourceOwner(id = "user2", scopes = Set("read", "write"), team = Some("team1")), "pass2"),
(TestResourceOwner(id = "user3", scopes = Set("read"), team = Some("team2")), "pass3"),
(TestResourceOwner(id = "user4", scopes = Set("read"), team = Some("team2"), disabled = true), "pass4"),
(TestResourceOwner(id = "user-special:%=", scopes = Set.empty, team = None, disabled = true), "pass-special")
)
override def findResourceOwner(id: String): Future[Option[TestResourceOwner]] =
Future.successful(resourceOwners.find(_._1.id == id).map(_._1))
override implicit def toResourceOwner(value: TestResourceOwner): ResourceOwner =
ResourceOwner(id = value.id, scopes = value.scopes, disabled = value.disabled)
var accessTokens = List.empty[AccessToken]
override def createAccessToken(
scopes: Set[String],
clientId: String,
resourceOwnerId: String,
payload: JsObject,
expiresAt: Instant
): Future[AccessToken] = {
val createdAccessToken = AccessToken(
accessTokenId = SecretGenerator.generateHex(16),
scopes = scopes,
clientId = clientId,
resourceOwnerId = resourceOwnerId,
payload = payload,
expiresAt = expiresAt
)
accessTokens = accessTokens :+ createdAccessToken
Future.successful(createdAccessToken)
}
override implicit def toAccessToken(value: AccessToken): AccessToken = value
var refreshTokens = List.empty[RefreshToken]
override def findRefreshToken(refreshToken: String): Future[Option[RefreshToken]] =
Future.successful(refreshTokens.find(_.refreshToken == refreshToken))
override def createOrUpdateRefreshToken(
refreshToken: Option[String],
scopes: Set[String],
clientId: String,
resourceOwnerId: String,
expiresAt: Option[Instant]
): Future[RefreshToken] = {
val createdOrUpdatedRefreshToken = RefreshToken(
refreshToken = refreshToken.getOrElse(SecretGenerator.generateHex(16)),
scopes = scopes,
clientId = clientId,
resourceOwnerId = resourceOwnerId,
expiresAt = expiresAt,
revoked = false
)
refreshTokens = refreshTokens.filterNot(
_.refreshToken == createdOrUpdatedRefreshToken.refreshToken
) :+ createdOrUpdatedRefreshToken
Future.successful(createdOrUpdatedRefreshToken)
}
def createOrUpdateRefreshTokenTest(refreshToken: RefreshToken): RefreshToken = {
refreshTokens = refreshTokens.filterNot(_.refreshToken == refreshToken.refreshToken) :+ refreshToken
refreshToken
}
override implicit def toRefreshToken(value: RefreshToken): RefreshToken = value
override implicit def toClientRefreshTokenLifetime(value: TestClient): ClientWithRefreshTokenLifetime =
ClientWithRefreshTokenLifetime(value.refreshTokenLifetime)
var authorizationCodes = List.empty[AuthorizationCode]
override def findAuthorizationCode(code: String): Future[Option[AuthorizationCode]] =
Future.successful(authorizationCodes.find(_.code == code))
override def createAuthorizationCode(
state: Option[String],
scopes: Set[String],
resourceOwnerId: String,
clientId: String,
challenge: Option[AuthorizationCodeChallenge],
redirectUri: String,
expiresAt: Instant
): Future[AuthorizationCode] = {
val createdAuthorizationCode = AuthorizationCode(
code = SecretGenerator.generateHex(32),
state = state,
scopes = scopes,
resourceOwnerId = resourceOwnerId,
clientId = clientId,
challenge = challenge,
redirectUri = redirectUri,
expiresAt = expiresAt,
used = false
)
authorizationCodes = authorizationCodes :+ createdAuthorizationCode
Future.successful(createdAuthorizationCode)
}
override def exchangeAuthorizationCode(code: String): Future[Done] = {
authorizationCodes = authorizationCodes.map {
case c if c.code == code => c.copy(used = true)
case c => c
}
Future.successful(Done)
}
override implicit def toAuthorizationCode(value: AuthorizationCode): AuthorizationCode = value
override implicit def toClientWithRedirectUris(value: TestClient): ClientWithRedirectUris =
ClientWithRedirectUris(value.redirectUris)
}
| choffmeister/microservice-utils | microservice-utils-auth/src/test/scala/de/choffmeister/microserviceutils/auth/TestAuthProvider.scala | Scala | mit | 8,617 |
/*
* ecalogic: a tool for performing energy consumption analysis.
*
* Copyright (c) 2013, J. Neutelings, D. Peelen, M. Schoolderman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* Neither the name of the Radboud University Nijmegen nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package nl.ru.cs.ecalogic
package config
import util.{Positional, Position}
import scala.collection.mutable
/*
Stub.
TODO: adds some code that reads cmdline flags here.
*/
object Options {
/* Whether to analyse a C-like language instead, more implications in the future */
var clike = false
/* The "main" function to analyze; should change to 'main' in the future */
var entryPoint = "program"
/* Should the output of our program be to the point or nice to read? */
var terse = false
/* Override re-aliassing giving on the commandline */
var aliasOverrides = Seq.empty[String]
/* NOTE: there may be some interplay between these options; also, some
options may be unnecessary if you enable other options. */
object Model {
/* Should a delta-function always update the energy-aware
state-information even if the component state did not change?
Technical report: false */
var alwaysUpdate = false
/* Should, on a energy state-update, the component-update also take
into account the time the function itself takes? I.e. should the
timestamp be set to the most recent time (true) or the last
state change (false)
Technical report: false */
var alwaysForwardTime = false
}
object Analysis {
/* Should all component states be forwarded just before a decision
in the control flow? (if/while statement). Setting this to false
results in over-estimations.
Technical report: false */
var beforeSync = false
/* In the original document, at the exit of a while-loop, all timestamps
of components get reset to the time before entering the while loop,
while the global timestamp gets set to a time *after*. This is consistent
with the if-statement (similar problem), but causes a factor two over-
estimation in even simple cases.
Setting this to true causes much tighter bounds on if and while's, but
the question is, is it correct?
Technical report: false */
var afterSync = false
/* How long should we attempt to find fixpoint? Note that 10000 is a high setting */
var fixPatience = 10000
/* Should we use the while-rule mentioned in the tech-report, or the simplified one
mentioned in the paper?
Technical report: true */
var techReport = false
}
/* Call the Options object as a function; this returns the actual arguments */
def apply(args: Array[String]): Array[String] = {
import Analysis._
import Model._
val argHandler: mutable.Queue[String=>Unit] = mutable.Queue.empty
val newArgs = Array.newBuilder[String]
args.foreach {
case "-c" | "--clike"
=> clike = true
entryPoint = "main"
beforeSync = true
afterSync = true
case "-t" | "--terse"
=> terse = true
case "-e" | "--entry"
=> argHandler += (s => entryPoint = s)
case "-tr" | "--techReport"
=> techReport = true
case "-P" | "--fixPatience"
=> argHandler += (s => try fixPatience = s.toInt)
case "-I" | "--import"
=> argHandler += (s => aliasOverrides = s+:aliasOverrides)
case "-s0" | "--beforeSync"
=> beforeSync = true
case "-s1" | "--afterSync"
=> afterSync = true
case "-s" | "--sync"
=> beforeSync = true; afterSync = true
case "-u0" | "--alwaysUpdate"
=> alwaysUpdate = true
case "-u1" | "--alwaysForwardTime"
=> alwaysForwardTime = true
case "-u" | "--update"
=> alwaysUpdate = true; alwaysForwardTime = true
case "-h" | "-?" | "--help"
=> friendlyHelpMsg(); return Array.empty
case s if s.startsWith("-")
=> throw new ECAException(s"unknown flag: $s")
case s if argHandler.nonEmpty
=> argHandler.dequeue()(s)
case s
=> newArgs += s
}
newArgs.result()
}
/* Reset values to their default values */
def reset {
if (!clike) entryPoint = "program"
else entryPoint = "main"
terse = false
aliasOverrides = Seq.empty[String]
Model.alwaysUpdate = false
Model.alwaysForwardTime = false
Analysis.beforeSync = false
Analysis.afterSync = false
Analysis.fixPatience = 10000
Analysis.techReport = false
}
def friendlyHelpMsg() {
println("""usage: ecalogic.jar [OPTIONS] file1.eca [[Alias=]file2.ecm ...]
Functionality options:
-c --clike Analyses on a C-like syntax instead
-t --terse Give only brief output of analysis
-I --import [W=]<uri> Load a component from the Java classpath as W
-h --help This friendly help message
-P --fixPatience <N> Num iters for finding fixpoints (def=10000)
-e --entry <main> Perform analyss in the given function (def=program,
or main with -c)
If no Alias= is provided for an explicitly loaded component, it is determined
from the classpath/filename instead.
Options controlling analysis:
-tr --techReport Use while loop as specified in Tech Report
-s --sync Synchronize all components to the global time
-u --update Update component timestamps, even when no change
The options -s or -u should give stricter bounds. -u is probably
subsumed by -s, but since this is a research tool, more analysis is needed.
""")
}
/* For testing purposes only */
def main(args: Array[String]) =
println(apply(args).reduce(_+", "+_))
}
| jangroothuijse/ecalogic-c | src/main/scala/nl/ru/cs/ecalogic/config/Options.scala | Scala | bsd-3-clause | 7,159 |
package com.twitter.finagle.thrift
import com.twitter.silly.Silly
import com.twitter.util.TimeConversions._
import com.twitter.util.{Await, Promise, Return, Try}
import org.jboss.netty.bootstrap.{ClientBootstrap, ServerBootstrap}
import org.jboss.netty.channel._
import org.jboss.netty.channel.local._
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AsyncServerEndToEndTest extends FunSuite {
val protocolFactory = Protocols.binaryFactory()
test("async Thrift server should work"){
// Set up the server.
ThriftTypes.add(new ThriftCallFactory[Silly.bleep_args, Silly.bleep_result](
"bleep", classOf[Silly.bleep_args], classOf[Silly.bleep_result]))
val serverBootstrap = new ServerBootstrap(new DefaultLocalServerChannelFactory())
serverBootstrap.setPipelineFactory(new ChannelPipelineFactory {
def getPipeline() = {
val pipeline = Channels.pipeline()
pipeline.addLast("framer", new ThriftFrameCodec)
pipeline.addLast("decode", new ThriftServerDecoder(protocolFactory))
pipeline.addLast("encode", new ThriftServerEncoder(protocolFactory))
pipeline.addLast("handler", new SimpleChannelUpstreamHandler {
override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent) {
e.getMessage match {
case bleep: ThriftCall[Silly.bleep_args, Silly.bleep_result]
if bleep.method.equals("bleep") =>
val response = bleep.newReply
response.setSuccess(bleep.arguments.request.reverse)
Channels.write(ctx.getChannel, bleep.reply(response))
case _ =>
throw new IllegalArgumentException
}
}
})
pipeline
}
})
val callResults = new Promise[ThriftReply[Silly.bleep_result]]
// Set up the client.
val clientBootstrap = new ClientBootstrap(new DefaultLocalClientChannelFactory)
clientBootstrap.setPipelineFactory(new ChannelPipelineFactory {
def getPipeline() = {
val pipeline = Channels.pipeline()
pipeline.addLast("framer", new ThriftFrameCodec)
pipeline.addLast("decode", new ThriftClientDecoder(protocolFactory))
pipeline.addLast("encode", new ThriftClientEncoder(protocolFactory))
pipeline.addLast("handler", new SimpleChannelUpstreamHandler {
override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent) {
callResults() = Return(e.getMessage.asInstanceOf[ThriftReply[Silly.bleep_result]])
}
})
pipeline
}
})
val addr = new LocalAddress("thrift-async")
val serverChannel = serverBootstrap.bind(addr)
clientBootstrap.connect(addr).addListener(new ChannelFutureListener {
override def operationComplete(f: ChannelFuture): Unit =
if (f.isSuccess) {
val ch = f.getChannel
val thriftCall =
new ThriftCall[Silly.bleep_args, Silly.bleep_result](
"bleep",
new Silly.bleep_args("heyhey"),
classOf[Silly.bleep_result])
Channels.write(ch, thriftCall)
}
})
val result = Try(Await.result(callResults, 1.second))
assert(result.isReturn === true)
assert(result().response.success === "yehyeh")
serverChannel.close().awaitUninterruptibly()
serverBootstrap.getFactory.releaseExternalResources()
}
}
| lysu/finagle | finagle-thrift/src/test/scala/com/twitter/finagle/thrift/AsyncServerEndToEndTest.scala | Scala | apache-2.0 | 3,498 |
/*
* Copyright 2012 Emil Hellman
*
* This file is part of SortableChallenge.
*
* SortableChallenge is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SortableChallenge is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SortableChallenge. If not, see <http://www.gnu.org/licenses/>.
*/
package sortablechallenge.actors
import scala.actors.Actor
import scala.actors.Actor._
class Master[R](work: () => R) extends Actor {
def act {
}
}
| archevel/SortableCodingChallenge | src/main/scala/sortablechallenge/actors/Actors.scala | Scala | gpl-3.0 | 911 |
package jgo.tools.compiler
package parser.combinatorExten
import scala.util.parsing.combinator._
import scala.util.parsing.input.{Position, Positional}
/**
* Provides useful parser combinators (p-combinators) and other extras
* not implemented in the Scala parsing library.
*/
trait FancyParsers extends Parsers with ImplicitConversions {
/**
* A parser that returns the current position of the input and
* consumes nothing.
*/
object InPos extends Parser[Pos] {
def apply(in: Input): ParseResult[Pos] = Success(in.pos, in)
}
/**
* Converts the specified side-effectful action into a parser that performs
* that action, consuming no input.
* The intent of this conversion is to permit a clean syntax for semantic actions:
* {{{
* lazy val block =
* { pushScope() } ~> "{" ~> stmtList <~ "}" <~ { popScope() }
* }}}
*/
implicit def unit2parser(action: => Unit): Parser[Unit] = Parser { in =>
Success(action, in)
}
class FancyParserOps[+T](p: Parser[T]) {
/** Produces a parser committed in the second parser which discards the first result. */
def ~>! [U] (q: => Parser[U]): Parser[U] = p ~> commit(q)
/** Produces a parser committed in the second parser which discards the second result. */
def <~! [U] (q: => Parser[U]): Parser[T] = p <~ commit(q)
/**
* Produces a parser that optionally applies this parser, indicating via result whether
* or not it was applied.
*/
def ?? : Parser[Boolean] = p.? ^^ { _.isDefined }
def &@ (name: String): Parser[T] = nameize(p, name)
}
implicit def parser2Fancy[T](p: Parser[T]): FancyParserOps[T] = new FancyParserOps(p)
/**
* Produces a parser that performs the same actions as the specified parser
* but returns the position of the input before the given parser is applied
* instead of whatever the provided parser would.
*/
def pos(p: Parser[_]): Parser[Position] = Parser { in =>
p(in) map { _ => in.pos }
}
/**
* Produces a parser that applies the specified one and tuples the
* result with the position of the input.
*/
def withPos[T](p: Parser[T]): Parser[(T, Pos)] = Parser { in =>
p(in) map { res => (res, in.pos) }
}
/**
* Produces a parser which applies the specified parser and whose result
* is the input before that application.
*/
def inputAt[T](p: Parser[T]): Parser[Input] = Parser { in =>
p(in) map { _ => in }
}
/**
* Provides a $-based syntax for naming parsers.
* Example:
* {{{
* lazy val returnKeyword = "return keyword" $
* "return"
* }}}
*/
final class ParserName(val name: String) {
/** Labels the specified parser with this name, returning that parser. */
def $ [T] (p: Parser[T]): Parser[T] = nameize(p, name)
}
implicit def string2ParserName(name: String): ParserName = new ParserName(name)
//We override nameize in TracePrintingParsers and ExceptionTracing.
protected def nameize[T](p: Parser[T], name: String): Parser[T] = p named name
} | thomasmodeneis/jgo | src/src/main/scala/jgo/tools/compiler/parser/combinatorExten/FancyParsers.scala | Scala | gpl-3.0 | 3,051 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.SparkException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.{UnsafeArrayWriter, UnsafeRowWriter, UnsafeWriter}
import org.apache.spark.sql.catalyst.util.ArrayData
import org.apache.spark.sql.types.{UserDefinedType, _}
import org.apache.spark.unsafe.Platform
/**
* An interpreted unsafe projection. This class reuses the [[UnsafeRow]] it produces, a consumer
* should copy the row if it is being buffered. This class is not thread safe.
*
* @param expressions that produces the resulting fields. These expressions must be bound
* to a schema.
*/
class InterpretedUnsafeProjection(expressions: Array[Expression]) extends UnsafeProjection {
import InterpretedUnsafeProjection._
/** Number of (top level) fields in the resulting row. */
private[this] val numFields = expressions.length
/** Array that expression results. */
private[this] val values = new Array[Any](numFields)
/** The row representing the expression results. */
private[this] val intermediate = new GenericInternalRow(values)
/* The row writer for UnsafeRow result */
private[this] val rowWriter = new UnsafeRowWriter(numFields, numFields * 32)
/** The writer that writes the intermediate result to the result row. */
private[this] val writer: InternalRow => Unit = {
val baseWriter = generateStructWriter(
rowWriter,
expressions.map(e => StructField("", e.dataType, e.nullable)))
if (!expressions.exists(_.nullable)) {
// No nullable fields. The top-level null bit mask will always be zeroed out.
baseWriter
} else {
// Zero out the null bit mask before we write the row.
row => {
rowWriter.zeroOutNullBytes()
baseWriter(row)
}
}
}
override def initialize(partitionIndex: Int): Unit = {
expressions.foreach(_.foreach {
case n: Nondeterministic => n.initialize(partitionIndex)
case _ =>
})
}
override def apply(row: InternalRow): UnsafeRow = {
// Put the expression results in the intermediate row.
var i = 0
while (i < numFields) {
values(i) = expressions(i).eval(row)
i += 1
}
// Write the intermediate row to an unsafe row.
rowWriter.reset()
writer(intermediate)
rowWriter.getRow()
}
}
/**
* Helper functions for creating an [[InterpretedUnsafeProjection]].
*/
object InterpretedUnsafeProjection extends UnsafeProjectionCreator {
/**
* Returns an [[UnsafeProjection]] for given sequence of bound Expressions.
*/
override protected def createProjection(exprs: Seq[Expression]): UnsafeProjection = {
// We need to make sure that we do not reuse stateful expressions.
val cleanedExpressions = exprs.map(_.transform {
case s: Stateful => s.freshCopy()
})
new InterpretedUnsafeProjection(cleanedExpressions.toArray)
}
/**
* Generate a struct writer function. The generated function writes an [[InternalRow]] to the
* given buffer using the given [[UnsafeRowWriter]].
*/
private def generateStructWriter(
rowWriter: UnsafeRowWriter,
fields: Array[StructField]): InternalRow => Unit = {
val numFields = fields.length
// Create field writers.
val fieldWriters = fields.map { field =>
generateFieldWriter(rowWriter, field.dataType, field.nullable)
}
// Create basic writer.
row => {
var i = 0
while (i < numFields) {
fieldWriters(i).apply(row, i)
i += 1
}
}
}
/**
* Generate a writer function for a struct field, array element, map key or map value. The
* generated function writes the element at an index in a [[SpecializedGetters]] object (row
* or array) to the given buffer using the given [[UnsafeWriter]].
*/
private def generateFieldWriter(
writer: UnsafeWriter,
dt: DataType,
nullable: Boolean): (SpecializedGetters, Int) => Unit = {
// Create the the basic writer.
val unsafeWriter: (SpecializedGetters, Int) => Unit = dt match {
case BooleanType =>
(v, i) => writer.write(i, v.getBoolean(i))
case ByteType =>
(v, i) => writer.write(i, v.getByte(i))
case ShortType =>
(v, i) => writer.write(i, v.getShort(i))
case IntegerType | DateType =>
(v, i) => writer.write(i, v.getInt(i))
case LongType | TimestampType =>
(v, i) => writer.write(i, v.getLong(i))
case FloatType =>
(v, i) => writer.write(i, v.getFloat(i))
case DoubleType =>
(v, i) => writer.write(i, v.getDouble(i))
case DecimalType.Fixed(precision, scale) =>
(v, i) => writer.write(i, v.getDecimal(i, precision, scale), precision, scale)
case CalendarIntervalType =>
(v, i) => writer.write(i, v.getInterval(i))
case BinaryType =>
(v, i) => writer.write(i, v.getBinary(i))
case StringType =>
(v, i) => writer.write(i, v.getUTF8String(i))
case StructType(fields) =>
val numFields = fields.length
val rowWriter = new UnsafeRowWriter(writer, numFields)
val structWriter = generateStructWriter(rowWriter, fields)
(v, i) => {
val previousCursor = writer.cursor()
v.getStruct(i, fields.length) match {
case row: UnsafeRow =>
writeUnsafeData(
rowWriter,
row.getBaseObject,
row.getBaseOffset,
row.getSizeInBytes)
case row =>
// Nested struct. We don't know where this will start because a row can be
// variable length, so we need to update the offsets and zero out the bit mask.
rowWriter.resetRowWriter()
structWriter.apply(row)
}
writer.setOffsetAndSizeFromPreviousCursor(i, previousCursor)
}
case ArrayType(elementType, containsNull) =>
val arrayWriter = new UnsafeArrayWriter(writer, getElementSize(elementType))
val elementWriter = generateFieldWriter(
arrayWriter,
elementType,
containsNull)
(v, i) => {
val previousCursor = writer.cursor()
writeArray(arrayWriter, elementWriter, v.getArray(i))
writer.setOffsetAndSizeFromPreviousCursor(i, previousCursor)
}
case MapType(keyType, valueType, valueContainsNull) =>
val keyArrayWriter = new UnsafeArrayWriter(writer, getElementSize(keyType))
val keyWriter = generateFieldWriter(
keyArrayWriter,
keyType,
nullable = false)
val valueArrayWriter = new UnsafeArrayWriter(writer, getElementSize(valueType))
val valueWriter = generateFieldWriter(
valueArrayWriter,
valueType,
valueContainsNull)
(v, i) => {
val previousCursor = writer.cursor()
v.getMap(i) match {
case map: UnsafeMapData =>
writeUnsafeData(
valueArrayWriter,
map.getBaseObject,
map.getBaseOffset,
map.getSizeInBytes)
case map =>
// preserve 8 bytes to write the key array numBytes later.
valueArrayWriter.grow(8)
valueArrayWriter.increaseCursor(8)
// Write the keys and write the numBytes of key array into the first 8 bytes.
writeArray(keyArrayWriter, keyWriter, map.keyArray())
Platform.putLong(
valueArrayWriter.getBuffer,
previousCursor,
valueArrayWriter.cursor - previousCursor - 8
)
// Write the values.
writeArray(valueArrayWriter, valueWriter, map.valueArray())
}
writer.setOffsetAndSizeFromPreviousCursor(i, previousCursor)
}
case udt: UserDefinedType[_] =>
generateFieldWriter(writer, udt.sqlType, nullable)
case NullType =>
(_, _) => {}
case _ =>
throw new SparkException(s"Unsupported data type $dt")
}
// Always wrap the writer with a null safe version.
dt match {
case _: UserDefinedType[_] =>
// The null wrapper depends on the sql type and not on the UDT.
unsafeWriter
case DecimalType.Fixed(precision, _) if precision > Decimal.MAX_LONG_DIGITS =>
// We can't call setNullAt() for DecimalType with precision larger than 18, we call write
// directly. We can use the unwrapped writer directly.
unsafeWriter
case BooleanType | ByteType =>
(v, i) => {
if (!v.isNullAt(i)) {
unsafeWriter(v, i)
} else {
writer.setNull1Bytes(i)
}
}
case ShortType =>
(v, i) => {
if (!v.isNullAt(i)) {
unsafeWriter(v, i)
} else {
writer.setNull2Bytes(i)
}
}
case IntegerType | DateType | FloatType =>
(v, i) => {
if (!v.isNullAt(i)) {
unsafeWriter(v, i)
} else {
writer.setNull4Bytes(i)
}
}
case _ =>
(v, i) => {
if (!v.isNullAt(i)) {
unsafeWriter(v, i)
} else {
writer.setNull8Bytes(i)
}
}
}
}
/**
* Get the number of bytes elements of a data type will occupy in the fixed part of an
* [[UnsafeArrayData]] object. Reference types are stored as an 8 byte combination of an
* offset (upper 4 bytes) and a length (lower 4 bytes), these point to the variable length
* portion of the array object. Primitives take up to 8 bytes, depending on the size of the
* underlying data type.
*/
private def getElementSize(dataType: DataType): Int = dataType match {
case NullType | StringType | BinaryType | CalendarIntervalType |
_: DecimalType | _: StructType | _: ArrayType | _: MapType => 8
case _ => dataType.defaultSize
}
/**
* Write an array to the buffer. If the array is already in serialized form (an instance of
* [[UnsafeArrayData]]) then we copy the bytes directly, otherwise we do an element-by-element
* copy.
*/
private def writeArray(
arrayWriter: UnsafeArrayWriter,
elementWriter: (SpecializedGetters, Int) => Unit,
array: ArrayData): Unit = array match {
case unsafe: UnsafeArrayData =>
writeUnsafeData(
arrayWriter,
unsafe.getBaseObject,
unsafe.getBaseOffset,
unsafe.getSizeInBytes)
case _ =>
val numElements = array.numElements()
arrayWriter.initialize(numElements)
var i = 0
while (i < numElements) {
elementWriter.apply(array, i)
i += 1
}
}
/**
* Write an opaque block of data to the buffer. This is used to copy
* [[UnsafeRow]], [[UnsafeArrayData]] and [[UnsafeMapData]] objects.
*/
private def writeUnsafeData(
writer: UnsafeWriter,
baseObject: AnyRef,
baseOffset: Long,
sizeInBytes: Int) : Unit = {
writer.grow(sizeInBytes)
Platform.copyMemory(
baseObject,
baseOffset,
writer.getBuffer,
writer.cursor,
sizeInBytes)
writer.increaseCursor(sizeInBytes)
}
}
| brad-kaiser/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/InterpretedUnsafeProjection.scala | Scala | apache-2.0 | 12,122 |
package twitter4s
import http.client.method.PostMethod
import http.client.response.HttpHeader
import org.scalatest._
import twitter4s.request.{TwitterAuthorizationHeader, TwitterTimelineRequest}
class TwitterAuthorizationHeaderPostRequestSpec extends FlatSpec with Matchers with OptionValues with Inside with Inspectors {
val _baseUrl = "https://api.twitter.com"
val _method = PostMethod
val _relativeUrl = "/1/statuses/update.json"
val _headers = Seq(
HttpHeader("Accept", "*/*"),
HttpHeader("Connection", "close"),
HttpHeader("User-Agent", "OAuth gem v0.4.4"),
HttpHeader("Content-Type", "application/x-www-form-urlencoded"),
HttpHeader("Content-Length", "76"),
HttpHeader("Host", "api.twitter.com"))
val _queryString = Map("include_entities" → Seq("true"))
val _body = "status=Hello Ladies + Gentlemen, a signed OAuth request!"
val oauthConsumerSecret = "kAcSOqF21Fu85e7zjz7ZN2U4ZRhfV3WpwPAoE3Z7kBw"
val oauthConsumerKey = "xvz1evFS4wEEPTGEFPHBog"
val oauthToken = "370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb"
val oauthTokenSecret = "LswwdoUaIvS8ltyTt5jkRh4J50vUPVVHtR2YPi5kE"
val oauthTimestamp = "1318622958"
val oauthNonce = "kYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg"
val expectedSigningKey = "kAcSOqF21Fu85e7zjz7ZN2U4ZRhfV3WpwPAoE3Z7kBw&LswwdoUaIvS8ltyTt5jkRh4J50vUPVVHtR2YPi5kE"
val expectedParameterString = "include_entities=true&oauth_consumer_key=xvz1evFS4wEEPTGEFPHBog&oauth_nonce=kYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1318622958&oauth_token=370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb&oauth_version=1.0&status=Hello%20Ladies%20%2B%20Gentlemen%2C%20a%20signed%20OAuth%20request%21"
val expectedSignatureBaseString = "POST&https%3A%2F%2Fapi.twitter.com%2F1%2Fstatuses%2Fupdate.json&include_entities%3Dtrue%26oauth_consumer_key%3Dxvz1evFS4wEEPTGEFPHBog%26oauth_nonce%3DkYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1318622958%26oauth_token%3D370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb%26oauth_version%3D1.0%26status%3DHello%2520Ladies%2520%252B%2520Gentlemen%252C%2520a%2520signed%2520OAuth%2520request%2521"
val expectedAuthHeaderValue = """OAuth oauth_consumer_key="xvz1evFS4wEEPTGEFPHBog", oauth_nonce="kYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg", oauth_signature="tnnArxj06cWHq44gCs1OSKk%2FjLY%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1318622958", oauth_token="370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb", oauth_version="1.0""""
val expectedAuthHeaderName = "Authorization"
val twAuthHeaderGen = TwitterAuthorizationHeader.generate(
oauthConsumerKey = oauthConsumerKey,
oauthToken = oauthToken,
oauthConsumerSecret = oauthConsumerSecret,
oauthTokenSecret = oauthTokenSecret,
oauthNonce = oauthNonce,
oauthTimestamp = oauthTimestamp)(_)
val request = TwitterTimelineRequest(
baseUrl = _baseUrl,
relativeUrl = _relativeUrl,
headers = _headers,
method = PostMethod,
queryString = _queryString,
body = Some(_body.getBytes("utf-8")),
paginated = false,
authHeaderGen = twAuthHeaderGen)
private def _parameterString = {
val fieldsWithoutSignature = TwitterAuthorizationHeader.createOauthFieldsWithoutSignature(
oauthConsumerKey,
oauthToken,
oauthConsumerSecret,
oauthTokenSecret,
oauthNonce,
oauthTimestamp)
TwitterAuthorizationHeader.createParameterString(request, fieldsWithoutSignature)
}
"Twitter Auth Header" should "create a valid parameter string for POSTs" in {
val parameterString = _parameterString
assert(parameterString.equals(expectedParameterString))
}
it should "create a valid signature base string for POSTs" in {
val signatureBaseString = TwitterAuthorizationHeader.createSignatureBaseString(request, _parameterString)
assert(signatureBaseString.equals(expectedSignatureBaseString))
}
it should "create valid signing keys for POSTs" in {
val mySigningKey = TwitterAuthorizationHeader.createSigningKey(oauthConsumerSecret, oauthTokenSecret)
assert(mySigningKey.equals(expectedSigningKey))
}
it should "create valid authorization headers for POSTs " in {
val authHeader = twAuthHeaderGen(request)
assert(authHeader.name.equals(expectedAuthHeaderName))
assert(authHeader.value.equals(expectedAuthHeaderValue))
}
}
| SocialOrra/social4s | twitter4s/src/test/scala/twitter4s/TwitterAuthorizationHeaderPostRequestSpec.scala | Scala | apache-2.0 | 4,429 |
package com.giorgioinf.twtml.spark
import com.giorgioinf.twtml.web.WebClient
import com.typesafe.config.ConfigFactory
import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.mllib.clustering.StreamingKMeans
import org.apache.spark.mllib.feature.{HashingTF,StandardScaler}
import org.apache.spark.mllib.linalg.{Vector,Vectors}
import org.apache.spark.streaming.{Seconds,StreamingContext}
import org.apache.spark.streaming.twitter.TwitterUtils
import org.viz.lightning.Lightning
import scala.util.{Random, Try}
import twitter4j.Status
object KMeans extends Logging {
val htf = new HashingTF(100)
def featurize(status: Status): Vector = {
val s = status.getRetweetedStatus
val v = Vectors.dense(
//s.getRetweetCount.toDouble/100000L,
//s.getUser.getFollowersCount.toDouble/10000000L
s.getRetweetCount.toDouble,
s.getUser.getFollowersCount.toDouble
)
v
//normalizer.transform(v)
//s.getURLEntities.length.toDouble,
//s.getUserMentionEntities.length.toDouble,
//val v = htf.transform(s.getText.split("\\\\s+")).asInstanceOf[SparseVector]
/*// sum 4 values to HashingTF vector
val n:Int = v.size
val indExt:Array[Int] = Array(n, n+1, n+2, n+3)
val indices:Array[Int] = v.indices ++ indExt
val values:Array[Double] = v.values ++ valExt
Vectors.sparse(n+4, indices, values)*/
//LabeledPoint(status.getRetweetCount.toDouble, v)
}
def main(args: Array[String]) {
log.info("Loading application config...")
val conf = ConfigFactory.load
lazy val lgnHost = conf.getString("lightning")
lazy val webHost = conf.getString("twtweb")
lazy val web = WebClient(webHost)
val numDimensions = 2
val numClusters = 3
log.info("Initializing Streaming Spark Context...")
val sparkConf = new SparkConf()
.setAppName("twitter-stream-ml-kmeans")
val ssc = new StreamingContext(sparkConf, Seconds(5))
log.info("Initializing Twitter stream...")
val model = new StreamingKMeans()
.setK(numClusters)
//.setDecayFactor(1.0)
.setHalfLife(5, "batches")
.setRandomCenters(numDimensions, 0.0)
val stream = TwitterUtils.createStream(ssc, None)
val data = stream.filter(s => (
//s.getLang=="en" &&
s.isRetweet
)).map(featurize)
//data.print()
log.info("Initializing Lightning graph session...")
val lgn = Lightning(lgnHost)
lgn.createSession("twitter-stream-ml-kmeans")
//val scatter = lgn.scatterstreaming(Array(0.0), Array(0.0), size=Array(0.1))
val black = Array.fill(numClusters, 3)(0)
//val line = lgn.linestreaming(Array(Array(0.0), Array(0.0), Array(0.0)), size=size)
//val line = lgn.linestreaming(Array.fill(numClusters , 1)(0.0), size=Array.fill(numClusters)(5.0))
//Try(web.config(lgn.session, lgnHost, List(scatter.id)))
var count:Long = 0
data.foreachRDD { rdd =>
if (rdd.count > 0) {
count += rdd.count
val scaledData = new StandardScaler(false, true).fit(rdd).transform(rdd)
model.latestModel.update(scaledData, model.decayFactor, model.timeUnit)
val datax = scaledData.map(_.apply(0)).toArray
val datay = scaledData.map(_.apply(1)).toArray
val centers = model.latestModel.clusterCenters
val modelx = centers.map(_.apply(0))
val modely = centers.map(_.apply(1))
val modelline = centers.map(p => Array(p.apply(0)))
val pred = model.latestModel.predict(scaledData).toArray
//Try(line.append(modelline))
//Try(web.stats(count))
if (log.isDebugEnabled) {
log.debug(
"\\n\\tmodelx: " + modelx.deep +
"\\n\\tmodely: " + modely.deep +
"\\n\\tdatax: " + datax.deep +
"\\n\\tdatay: " + datay.deep +
"\\n\\tpred: " + pred.deep +
"\\n\\tmodelline: " + modelline.deep
)
}//
//Try(scatter.append(datax, datay, label=pred))
//Try(scatter.append(modelx, modely
//, color=black
//))
}
}
//model.trainOn(data)
//val predictions = model.predictOn(data)
//val predictions = model.predictOn(data)
// //predictions.print()
// predictions.foreachRDD { rdd =>
// val centers = model.latestModel.clusterCenters
// val weights = model.latestModel.clusterWeights
// val modelx = centers.map(_.apply(0))
// val modely = centers.map(_.apply(1))
// val predn = rdd.count
// val pred = rdd.toArray
// println("modelx: " + modelx.deep)
// println("modely: " + modely.deep)
// println("predn: " + predn)
// println("pred: " + pred.deep)
// println("clusterWeights: " + weights.deep)
// println()
// }
// Start the streaming computation
log.info("Initialization complete.")
ssc.start()
ssc.awaitTermination()
}
}
| giorgioinf/twitter-stream-ml | spark/src/main/scala/com/giorgioinf/twtml/spark/KMeans.scala | Scala | gpl-3.0 | 5,473 |
/* _ _ _ *\\
** | (_) | | **
** ___| |_ __| | ___ clide 2 **
** / __| | |/ _` |/ _ \\ (c) 2012-2014 Martin Ring **
** | (__| | | (_| | __/ http://clide.flatmap.net **
** \\___|_|_|\\__,_|\\___| **
** **
** This file is part of Clide. **
** **
** Clide is free software: you can redistribute it and/or modify **
** it under the terms of the GNU Lesser General Public License as **
** published by the Free Software Foundation, either version 3 of **
** the License, or (at your option) any later version. **
** **
** Clide is distributed in the hope that it will be useful, **
** but WITHOUT ANY WARRANTY; without even the implied warranty of **
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the **
** GNU General Public License for more details. **
** **
** You should have received a copy of the GNU Lesser General Public **
** License along with Clide. **
** If not, see <http://www.gnu.org/licenses/>. **
\\* */
package clide.assistants
import akka.actor._
import clide.models._
import clide.actors.Events._
import clide.actors.Messages.{RequestSessionInfo,IdentifiedFor,WithUser,Talk}
import clide.collaboration.{Annotations,Operation,Document,AnnotationType}
import scala.collection.mutable.Map
import scala.collection.mutable.Set
import scala.concurrent.Future
import clide.actors.Messages._
import scala.util.Success
import scala.util.Failure
import scala.collection.mutable.Buffer
import scala.concurrent.Future
import scala.language.postfixOps
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.concurrent.Promise
import clide.actors.Messages
import clide.collaboration.Client
/**
* @param owner The Session, this cursor belongs to
* @param file The referenced file state
* @param anchor The position of the cursor
* @param head Optional value indicating the end of the selected range if something is seleced. This might be before or after the anchor position.
* @todo head is not implemented right now
* @author Martin Ring <martin.ring@dfki.de>
*/
case class Cursor(owner: SessionInfo, file: OpenedFile, anchor: Int, head: Option[Int] = None) {
override def equals(other: Any) = other match {
case c: Cursor if c.owner == this.owner && c.file == this.file => true
case _ => false
}
}
/**
* @author Martin Ring <martin.ring@dfki.de>
*/
private class Assistant(project: ProjectInfo, createBehavior: AssistantControl => AssistBehavior) extends Actor with AssistantControl with ActorLogging with Stash {
var peer = context.system.deadLetters
var info: SessionInfo = null
val collaborators = Set.empty[SessionInfo]
val files = Map.empty[Long,OpenedFile]
val clients = Map.empty[Long,Client[Char]]
val behavior = createBehavior(this)
val cursors = Map.empty[Long,Map[Long,Cursor]]
val config = context.system.settings.config
val assistantName = config.getString("assistant.username")
val receiveOwnChatMessages = config.getBoolean("assistant.receiveOwnChatMessages")
val automaticWorkingIndicator = config.getBoolean("assistant.automaticWorkingIndicator")
val automaticFailureIndicator = config.getBoolean("assistant.automaticFailureIndicator")
val workIndicatorDelay = config.getDuration("assistant.workIndicatorDelay", MILLISECONDS).millis
val inputDelayMin = config.getDuration("assistant.inputDelayMin", MILLISECONDS).millis
val inputDelayMax = config.getDuration("assistant.inputDelayMax", MILLISECONDS).millis
def chat(msg: String, tpe: Option[String] = None) = {
peer ! Talk(None,msg,tpe)
}
case object Continue
var fileRequests = List.empty[(Long,Promise[OpenedFile])]
def openFile(id: Long): Future[OpenedFile] =
files.get(id) match {
case Some(file) => Future.successful(file)
case None =>
val promise = Promise[OpenedFile]
fileRequests ::= (id -> promise)
peer ! OpenFile(id)
promise.future
}
var annotationDelays = Map.empty[Long,Cancellable]
def annotate(file: OpenedFile, name: String, annotations: Annotations): Unit = {
annotationDelays.get(file.info.id).map(_.cancel())
peer ! Annotate(file.info.id, file.revision, annotations, name)
}
def annotate(file: OpenedFile, name: String, annotations: Annotations, delay: FiniteDuration): Unit = {
if (delay == Duration.Zero)
annotate(file,name,annotations)
else
annotationDelays(file.info.id) = context.system.scheduler.scheduleOnce(delay)(annotate(file,name,annotations))
}
def edit(file: OpenedFile, edit: Operation[Char]): Future[Unit] = ???
val workStates: Map[Long, Boolean] = Map.empty.withDefaultValue(false)
val workTimeouts: Map[Long, Cancellable] = Map.empty
def workOnFile(file: OpenedFile): Unit = workOnFile(file.info.id)
def workOnFile(file: Long): Unit = {
if (workIndicatorDelay.length > 0) {
if (workStates(file))
workTimeouts.get(file).foreach(_.cancel())
else context.system.scheduler.scheduleOnce(workIndicatorDelay){ () =>
workStates(file) = true
peer ! WorkingOnFile(file)
}
} else {
peer ! WorkingOnFile(file)
}
}
def doneWithFile(file: OpenedFile): Unit = doneWithFile(file.info.id)
def doneWithFile(file: Long): Unit = {
if (workIndicatorDelay.length > 0) {
if (workStates(file) == false)
workTimeouts.get(file).foreach(_.cancel())
else context.system.scheduler.scheduleOnce(workIndicatorDelay){ () =>
workStates(file) = false
peer ! DoneWithFile(file)
}
} else{
peer ! DoneWithFile(file)
}
}
def failedInFile(file: OpenedFile, message: Option[String]): Unit = failedInFile(file.info.id, message)
def failedInFile(file: Long, message: Option[String]): Unit = peer ! FailureInFile(file, message)
def offerAnnotations(file: OpenedFile, name: String, description: Option[String]) = peer ! OfferAnnotations(file.info.id, name, description)
def stop() = self ! PoisonPill
implicit val executionContext = context.dispatcher
case class Processed(e: Event)
def working: Receive = {
val edits: Map[Long,Operation[Char]] = Map.empty
val annotations: Map[Long,scala.collection.Map[(Long,String),Annotations]] = Map.empty
{
case Processed(Edited(file,operation)) =>
edits(file) = if (edits.isDefinedAt(file))
Operation.compose(edits(file), operation).get
else operation
if (annotations.isDefinedAt(file))
annotations(file) = annotations(file).mapValues(_ transform operation get)
case Edited(file,operation) if files.isDefinedAt(file) =>
val prev = files(file)
val next = OpenedFile(prev.info,new Document(prev.state).apply(operation).get.content.mkString, prev.revision + 1)
files(file) = next
edits(file) = if (edits.isDefinedAt(file)) Operation.compose(edits(file), operation).getOrElse {
sys.error(s"${edits(file)} -> $operation")
} else operation
if (annotations.isDefinedAt(file))
annotations(file) = annotations(file).mapValues(_ transform operation get)
case Annotated(file,user,as,name) if files.isDefinedAt(file) =>
if (annotations.isDefinedAt(file))
annotations(file) += (user,name) -> as
case RefreshInterval =>
case Continue =>
context.become(initialized)
for {
(file, op) <- edits
} self ! Processed(Edited(file,op))
for {
(file,as) <- annotations
((user,name),as) <- as
} self ! Annotated(file,user,as,name)
unstashAll()
case Terminated(_) =>
log.warning("peer terminated")
context.stop(self)
case _ => this.stash()
}
}
def doWork(file: Option[Long])(task: Future[Unit]) {
// can be forced to block for tiny computations with Future.sucessful
if (!task.isCompleted) {
if (automaticWorkingIndicator) file.foreach(workOnFile(_))
context.become(working)
task.onComplete {
case Success(_) =>
self ! Continue
if (automaticWorkingIndicator) file.foreach(doneWithFile(_))
case Failure(e) =>
log.error(e, "there is a problem with the behavior")
self ! Continue
if (automaticFailureIndicator) file.foreach(failedInFile(_,Some(e.getMessage())))
}
}
}
def initialized: Receive = {
case FileOpened(file@OpenedFile(info,content,revision)) =>
log.debug("file opened: {}", info)
if (files.isDefinedAt(info.id)) {
log.warning("file info has been renewed from server: {} (at revision {})", info, revision)
files(info.id) = file
} else if (behavior.mimeTypes.intersect(file.info.mimeType.toSet).nonEmpty) {
files(info.id) = file
doWork(Some(info.id))(for {
_ <- behavior.fileOpened(file)
_ <- behavior.fileActivated(file)
} yield())
}
case FileClosed(file) if files.isDefinedAt(file) =>
val f = files(file)
files.remove(file)
doWork(None)(for {
_ <- behavior.fileInactivated(f)
_ <- behavior.fileClosed(f)
} yield ())
case Processed(Edited(file,operation)) if files.isDefinedAt(file) =>
doWork(Some(file))(behavior.fileChanged(files(file), operation, cursors.get(file).map(_.values.toSeq).getOrElse(Seq.empty)))
case Edited(file,operation) if files.isDefinedAt(file) =>
val prev = files(file)
val next = OpenedFile(prev.info,new Document(prev.state).apply(operation).get.content.mkString, prev.revision + 1)
files(file) = next
doWork(Some(file))(behavior.fileChanged(next, operation, cursors.get(file).map(_.values.toSeq).getOrElse(Seq.empty)))
case BroadcastEvent(who, when, Talk(to, msg, tpe)) if (who != info.id || receiveOwnChatMessages) =>
doWork(None)(behavior.receiveChatMessage(collaborators.find(_.id == who).get,msg,tpe,when))
case SessionChanged(session) =>
val existing = collaborators.find(_.id == session.id)
existing match {
case Some(old) =>
collaborators.remove(old)
// TODO: Handle sesion changes
if (!old.active && session.active)
doWork(None)(behavior.collaboratorJoined(session))
if (old.active && !session.active)
doWork(None)(behavior.collaboratorLeft(session))
collaborators.add(session)
case None =>
collaborators.add(session)
if (session.active)
doWork(None)(behavior.collaboratorJoined(session))
}
case SessionStopped(session) =>
collaborators.filter(_.id != session.id)
behavior.collaboratorLeft(session)
case Annotated(file, user, annotations, name) if files.isDefinedAt(file) =>
// TODO: More universal approach on cursor positions etc.
val ps = annotations.positions(AnnotationType.Class,"cursor")
if (ps.nonEmpty) for {
user <- collaborators.find(_.id == user)
file <- files.get(file)
pos <- ps
} {
if (!cursors.isDefinedAt(file.info.id))
cursors(file.info.id) = Map.empty
cursors(file.info.id) += user.id -> Cursor(user,file,pos)
doWork(Some(file.info.id))(behavior.cursorMoved(Cursor(user,file,pos)))
}
val rs = annotations.positions(AnnotationType.HelpRequest)
if (rs.nonEmpty) for {
user <- collaborators.find(_.id == user)
file <- files.get(file)
(pos,req) <- rs
} {
val r = req.split(":")
if (r.length == 2) {
val Array(request,id) = r
doWork(Some(file.info.id))(behavior.helpRequest(user, file, pos, id, request))
}
}
case AnnotationsRequested(file,name) if files.isDefinedAt(file) =>
for {
file <- files.get(file)
} doWork(Some(file.info.id))(behavior.annotationsRequested(file, name))
case AnnotationsDisregarded(file,name) if files.isDefinedAt(file) =>
for {
file <- files.get(file)
} doWork(Some(file.info.id))(behavior.annotationsDisregarded(file, name))
case BroadcastEvent(who, when, LookingAtFile(file)) =>
for (who <- collaborators.find(_.id == who) if who.isHuman) {
log.debug("{} is looking at file {}", who.user, file)
if (!files.contains(file)) {
peer ! OpenFile(file)
}
}
case RefreshInterval =>
behavior.refreshInterval()
case Terminated(_) => context.stop(self)
}
private case object Initialized
private case class InitializationFailed(cause: Throwable)
def receive = {
case EventSocket(ref,"session") =>
log.debug("session started")
peer = ref
context.watch(peer)
behavior.start(project).onComplete {
case Success(()) => self ! Initialized
case Failure(e) => self ! InitializationFailed(e)
}
case Initialized =>
log.debug("requesting session info")
peer ! RequestSessionInfo
case InitializationFailed(e) =>
context.stop(self)
case SessionInit(info, collaborators, conversation) =>
log.debug("session info received")
this.info = info
this.collaborators ++= collaborators
context.become(initialized)
case Terminated(_) => context.stop(self)
case RefreshInterval =>
}
private object RefreshInterval
override def preStart() {
context.system.scheduler.schedule(inputDelayMax, inputDelayMax)(self ! RefreshInterval)
}
override def postStop() {
Await.ready(behavior.stop, 1.minute)
}
}
| martinring/clide2 | modules/clide-core/src/main/scala/clide/assistants/Assistant.scala | Scala | lgpl-3.0 | 14,504 |
object bresenham_line {
case class Point(x: Int, y: Int)
/**
* Uses the Bresenham Algorithm to calculate all points on a line from (x0, y0) to (x1, y1).
* The iterator returns all points in the interval [start, end[.
* @param x0 start point x
* @param y0 start point y
* @param x1 end point x
* @param y1 end point y
* @return the iterator containing all points on the line
*/
def bresenham(x0: Int, y0: Int, x1: Int, y1: Int): Iterator[Point] = {
import scala.math.abs
val dx = abs(x1 - x0)
val dy = abs(y1 - y0)
val sx = if (x0 < x1) 1 else -1
val sy = if (y0 < y1) 1 else -1
new Iterator[Point] {
var (x, y) = (x0, y0)
var err = dx - dy
def next = {
val point = Point(x, y)
val e2 = 2 * err
if (e2 > -dy) {
err -= dy
x += sx
}
if (e2 < dx) {
err += dx
y += sy
}
point
}
def hasNext = !(x == x1 && y == y1)
}
}
} | jiang42/Algorithm-Implementations | Bresenham_Line/Scala/lichtsprung/bresenham_line.scala | Scala | mit | 1,015 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import scala.language.existentials
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.execution.joins._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.StructType
import org.apache.spark.TestUtils.{assertNotSpilled, assertSpilled}
class JoinSuite extends QueryTest with SharedSQLContext {
import testImplicits._
setupTestData()
def statisticSizeInByte(df: DataFrame): BigInt = {
df.queryExecution.optimizedPlan.stats(sqlConf).sizeInBytes
}
test("equi-join is hash-join") {
val x = testData2.as("x")
val y = testData2.as("y")
val join = x.join(y, $"x.a" === $"y.a", "inner").queryExecution.optimizedPlan
val planned = spark.sessionState.planner.JoinSelection(join)
assert(planned.size === 1)
}
def assertJoin(pair: (String, Class[_])): Any = {
val (sqlString, c) = pair
val df = sql(sqlString)
val physical = df.queryExecution.sparkPlan
val operators = physical.collect {
case j: BroadcastHashJoinExec => j
case j: ShuffledHashJoinExec => j
case j: CartesianProductExec => j
case j: BroadcastNestedLoopJoinExec => j
case j: SortMergeJoinExec => j
}
assert(operators.size === 1)
if (operators.head.getClass != c) {
fail(s"$sqlString expected operator: $c, but got ${operators.head}\n physical: \n$physical")
}
}
test("join operator selection") {
spark.sharedState.cacheManager.clearCache()
withSQLConf("spark.sql.autoBroadcastJoinThreshold" -> "0",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT SEMI JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2", classOf[CartesianProductExec]),
("SELECT * FROM testData JOIN testData2 WHERE key = 2", classOf[CartesianProductExec]),
("SELECT * FROM testData LEFT JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2", classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 WHERE key = 2",
classOf[CartesianProductExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2 WHERE key > a", classOf[CartesianProductExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key > a",
classOf[CartesianProductExec]),
("SELECT * FROM testData JOIN testData2 ON key = a", classOf[SortMergeJoinExec]),
("SELECT * FROM testData JOIN testData2 ON key = a and key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData JOIN testData2 ON key = a where key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2 ON key = a", classOf[SortMergeJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 ON key = a where key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData right join testData2 ON key = a and key = 2",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData full outer join testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData left JOIN testData2 ON (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData right JOIN testData2 ON (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData full JOIN testData2 ON (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData ANTI JOIN testData2 ON key = a", classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT ANTI JOIN testData2", classOf[BroadcastNestedLoopJoinExec])
).foreach(assertJoin)
}
}
// ignore("SortMergeJoin shouldn't work on unsortable columns") {
// Seq(
// ("SELECT * FROM arrayData JOIN complexData ON data = a", classOf[ShuffledHashJoin])
// ).foreach { case (query, joinClass) => assertJoin(query, joinClass) }
// }
test("broadcasted hash join operator selection") {
spark.sharedState.cacheManager.clearCache()
sql("CACHE TABLE testData")
Seq(
("SELECT * FROM testData join testData2 ON key = a",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData join testData2 ON key = a and key = 2",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData join testData2 ON key = a where key = 2",
classOf[BroadcastHashJoinExec])
).foreach(assertJoin)
sql("UNCACHE TABLE testData")
}
test("broadcasted hash outer join operator selection") {
spark.sharedState.cacheManager.clearCache()
sql("CACHE TABLE testData")
sql("CACHE TABLE testData2")
Seq(
("SELECT * FROM testData LEFT JOIN testData2 ON key = a",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 ON key = a where key = 2",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData right join testData2 ON key = a and key = 2",
classOf[BroadcastHashJoinExec])
).foreach(assertJoin)
sql("UNCACHE TABLE testData")
}
test("multiple-key equi-join is hash-join") {
val x = testData2.as("x")
val y = testData2.as("y")
val join = x.join(y, ($"x.a" === $"y.a") && ($"x.b" === $"y.b")).queryExecution.optimizedPlan
val planned = spark.sessionState.planner.JoinSelection(join)
assert(planned.size === 1)
}
test("inner join where, one match per row") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
upperCaseData.join(lowerCaseData).where('n === 'N),
Seq(
Row(1, "A", 1, "a"),
Row(2, "B", 2, "b"),
Row(3, "C", 3, "c"),
Row(4, "D", 4, "d")
))
}
}
test("inner join ON, one match per row") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N"),
Seq(
Row(1, "A", 1, "a"),
Row(2, "B", 2, "b"),
Row(3, "C", 3, "c"),
Row(4, "D", 4, "d")
))
}
}
test("inner join, where, multiple matches") {
val x = testData2.where($"a" === 1).as("x")
val y = testData2.where($"a" === 1).as("y")
checkAnswer(
x.join(y).where($"x.a" === $"y.a"),
Row(1, 1, 1, 1) ::
Row(1, 1, 1, 2) ::
Row(1, 2, 1, 1) ::
Row(1, 2, 1, 2) :: Nil
)
}
test("inner join, no matches") {
val x = testData2.where($"a" === 1).as("x")
val y = testData2.where($"a" === 2).as("y")
checkAnswer(
x.join(y).where($"x.a" === $"y.a"),
Nil)
}
test("big inner join, 4 matches per row") {
val bigData = testData.union(testData).union(testData).union(testData)
val bigDataX = bigData.as("x")
val bigDataY = bigData.as("y")
checkAnswer(
bigDataX.join(bigDataY).where($"x.key" === $"y.key"),
testData.rdd.flatMap(row => Seq.fill(16)(Row.merge(row, row))).collect().toSeq)
}
test("cartesian product join") {
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
checkAnswer(
testData3.join(testData3),
Row(1, null, 1, null) ::
Row(1, null, 2, 2) ::
Row(2, 2, 1, null) ::
Row(2, 2, 2, 2) :: Nil)
}
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "false") {
val e = intercept[Exception] {
checkAnswer(
testData3.join(testData3),
Row(1, null, 1, null) ::
Row(1, null, 2, 2) ::
Row(2, 2, 1, null) ::
Row(2, 2, 2, 2) :: Nil)
}
assert(e.getMessage.contains("Detected cartesian product for INNER join " +
"between logical plans"))
}
}
test("left outer join") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N", "left"),
Row(1, "A", 1, "a") ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N" && $"n" > 1, "left"),
Row(1, "A", null, null) ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N" && $"N" > 1, "left"),
Row(1, "A", null, null) ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
checkAnswer(
upperCaseData.join(lowerCaseData, $"n" === $"N" && $"l" > $"L", "left"),
Row(1, "A", 1, "a") ::
Row(2, "B", 2, "b") ::
Row(3, "C", 3, "c") ::
Row(4, "D", 4, "d") ::
Row(5, "E", null, null) ::
Row(6, "F", null, null) :: Nil)
// Make sure we are choosing left.outputPartitioning as the
// outputPartitioning for the outer join operator.
checkAnswer(
sql(
"""
|SELECT l.N, count(*)
|FROM uppercasedata l LEFT OUTER JOIN allnulls r ON (l.N = r.a)
|GROUP BY l.N
""".stripMargin),
Row(
1, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) :: Nil)
checkAnswer(
sql(
"""
|SELECT r.a, count(*)
|FROM uppercasedata l LEFT OUTER JOIN allnulls r ON (l.N = r.a)
|GROUP BY r.a
""".stripMargin),
Row(null, 6) :: Nil)
}
}
test("right outer join") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N", "right"),
Row(1, "a", 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N" && $"n" > 1, "right"),
Row(null, null, 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N" && $"N" > 1, "right"),
Row(null, null, 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
lowerCaseData.join(upperCaseData, $"n" === $"N" && $"l" > $"L", "right"),
Row(1, "a", 1, "A") ::
Row(2, "b", 2, "B") ::
Row(3, "c", 3, "C") ::
Row(4, "d", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
// Make sure we are choosing right.outputPartitioning as the
// outputPartitioning for the outer join operator.
checkAnswer(
sql(
"""
|SELECT l.a, count(*)
|FROM allnulls l RIGHT OUTER JOIN uppercasedata r ON (l.a = r.N)
|GROUP BY l.a
""".stripMargin),
Row(null,
6))
checkAnswer(
sql(
"""
|SELECT r.N, count(*)
|FROM allnulls l RIGHT OUTER JOIN uppercasedata r ON (l.a = r.N)
|GROUP BY r.N
""".stripMargin),
Row(1
, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) :: Nil)
}
}
test("full outer join") {
upperCaseData.where('N <= 4).createOrReplaceTempView("`left`")
upperCaseData.where('N >= 3).createOrReplaceTempView("`right`")
val left = UnresolvedRelation(TableIdentifier("left"))
val right = UnresolvedRelation(TableIdentifier("right"))
checkAnswer(
left.join(right, $"left.N" === $"right.N", "full"),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
left.join(right, ($"left.N" === $"right.N") && ($"left.N" =!= 3), "full"),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", null, null) ::
Row(null, null, 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
checkAnswer(
left.join(right, ($"left.N" === $"right.N") && ($"right.N" =!= 3), "full"),
Row(1, "A", null, null) ::
Row(2, "B", null, null) ::
Row(3, "C", null, null) ::
Row(null, null, 3, "C") ::
Row(4, "D", 4, "D") ::
Row(null, null, 5, "E") ::
Row(null, null, 6, "F") :: Nil)
// Make sure we are UnknownPartitioning as the outputPartitioning for the outer join
// operator.
checkAnswer(
sql(
"""
|SELECT l.a, count(*)
|FROM allNulls l FULL OUTER JOIN upperCaseData r ON (l.a = r.N)
|GROUP BY l.a
""".
stripMargin),
Row(null, 10))
checkAnswer(
sql(
"""
|SELECT r.N, count(*)
|FROM allNulls l FULL OUTER JOIN upperCaseData r ON (l.a = r.N)
|GROUP BY r.N
""".stripMargin),
Row
(1, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) ::
Row(null, 4) :: Nil)
checkAnswer(
sql(
"""
|SELECT l.N, count(*)
|FROM upperCaseData l FULL OUTER JOIN allNulls r ON (l.N = r.a)
|GROUP BY l.N
""".stripMargin),
Row(1
, 1) ::
Row(2, 1) ::
Row(3, 1) ::
Row(4, 1) ::
Row(5, 1) ::
Row(6, 1) ::
Row(null, 4) :: Nil)
checkAnswer(
sql(
"""
|SELECT r.a, count(*)
|FROM upperCaseData l FULL OUTER JOIN allNulls r ON (l.N = r.a)
|GROUP BY r.a
""".
stripMargin),
Row(null, 10))
}
test("broadcasted existence join operator selection") {
spark.sharedState.cacheManager.clearCache()
sql("CACHE TABLE testData")
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString) {
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[BroadcastHashJoinExec]),
("SELECT * FROM testData ANT JOIN testData2 ON key = a", classOf[BroadcastHashJoinExec])
).foreach(assertJoin)
}
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT ANTI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec])
).foreach(assertJoin)
}
sql("UNCACHE TABLE testData")
}
test("cross join with broadcast") {
sql("CACHE TABLE testData")
val sizeInByteOfTestData = statisticSizeInByte(spark.table("testData"))
// we set the threshold is greater than statistic of the cached table testData
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> (sizeInByteOfTestData + 1).toString(),
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
assert(statisticSizeInByte(spark.table("testData2")) >
spark.conf.get(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD))
assert(statisticSizeInByte(spark.table("testData")) <
spark.conf.get(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD))
Seq(
("SELECT * FROM testData LEFT SEMI JOIN testData2 ON key = a",
classOf[SortMergeJoinExec]),
("SELECT * FROM testData LEFT SEMI JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData LEFT JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData RIGHT JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key = 2",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData JOIN testData2 WHERE key > a",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData FULL OUTER JOIN testData2 WHERE key > a",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData left JOIN testData2 WHERE (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData right JOIN testData2 WHERE (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec]),
("SELECT * FROM testData full JOIN testData2 WHERE (key * a != key + a)",
classOf[BroadcastNestedLoopJoinExec])
).foreach(assertJoin)
checkAnswer(
sql(
"""
SELECT x.value, y.a, y.b FROM testData x JOIN testData2 y WHERE x.key = 2
""".stripMargin),
Row("2", 1, 1) ::
Row("2", 1, 2) ::
Row("2", 2, 1) ::
Row("2", 2, 2) ::
Row("2", 3, 1) ::
Row("2", 3, 2) :: Nil)
checkAnswer(
sql(
"""
SELECT x.value, y.a, y.b FROM testData x JOIN testData2 y WHERE x.key < y.a
""".stripMargin),
Row("1", 2, 1) ::
Row("1", 2, 2) ::
Row("1", 3, 1) ::
Row("1", 3, 2) ::
Row("2", 3, 1) ::
Row("2", 3, 2) :: Nil)
checkAnswer(
sql(
"""
SELECT x.value, y.a, y.b FROM testData x JOIN testData2 y ON x.key < y.a
""".stripMargin),
Row("1", 2, 1) ::
Row("1", 2, 2) ::
Row("1", 3, 1) ::
Row("1", 3, 2) ::
Row("2", 3, 1) ::
Row("2", 3, 2) :: Nil)
}
sql("UNCACHE TABLE testData")
}
test("left semi join") {
val df = sql("SELECT * FROM testData2 LEFT SEMI JOIN testData ON key = a")
checkAnswer(df,
Row(1, 1) ::
Row(1, 2) ::
Row(2, 1) ::
Row(2, 2) ::
Row(3, 1) ::
Row(3, 2) :: Nil)
}
test("cross join detection") {
testData.createOrReplaceTempView("A")
testData.createOrReplaceTempView("B")
testData2.createOrReplaceTempView("C")
testData3.createOrReplaceTempView("D")
upperCaseData.where('N >= 3).createOrReplaceTempView("`right`")
val cartesianQueries = Seq(
/** The following should error out since there is no explicit cross join */
"SELECT * FROM testData inner join testData2",
"SELECT * FROM testData left outer join testData2",
"SELECT * FROM testData right outer join testData2",
"SELECT * FROM testData full outer join testData2",
"SELECT * FROM testData, testData2",
"SELECT * FROM testData, testData2 where testData.key = 1 and testData2.a = 22",
/** The following should fail because after reordering there are cartesian products */
"select * from (A join B on (A.key = B.key)) join D on (A.key=D.a) join C",
"select * from ((A join B on (A.key = B.key)) join C) join D on (A.key = D.a)",
/** Cartesian product involving C, which is not involved in a CROSS join */
"select * from ((A join B on (A.key = B.key)) cross join D) join C on (A.key = D.a)");
def checkCartesianDetection(query: String): Unit = {
val e = intercept[Exception] {
checkAnswer(sql(query), Nil);
}
assert(e.getMessage.contains("Detected cartesian product"))
}
cartesianQueries.foreach(checkCartesianDetection)
}
test("test SortMergeJoin (without spill)") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1",
"spark.sql.sortMergeJoinExec.buffer.spill.threshold" -> Int.MaxValue.toString) {
assertNotSpilled(sparkContext, "inner join") {
checkAnswer(
sql("SELECT * FROM testData JOIN testData2 ON key = a where key = 2"),
Row(2, "2", 2, 1) :: Row(2, "2", 2, 2) :: Nil
)
}
val expected = new ListBuffer[Row]()
expected.append(
Row(1, "1", 1, 1), Row(1, "1", 1, 2),
Row(2, "2", 2, 1), Row(2, "2", 2, 2),
Row(3, "3", 3, 1), Row(3, "3", 3, 2)
)
for (i <- 4 to 100) {
expected.append(Row(i, i.toString, null, null))
}
assertNotSpilled(sparkContext, "left outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData big
|LEFT OUTER JOIN
| testData2 small
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
assertNotSpilled(sparkContext, "right outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData2 small
|RIGHT OUTER JOIN
| testData big
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
}
}
test("test SortMergeJoin (with spill)") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1",
"spark.sql.sortMergeJoinExec.buffer.in.memory.threshold" -> "0",
"spark.sql.sortMergeJoinExec.buffer.spill.threshold" -> "1") {
assertSpilled(sparkContext, "inner join") {
checkAnswer(
sql("SELECT * FROM testData JOIN testData2 ON key = a where key = 2"),
Row(2, "2", 2, 1) :: Row(2, "2", 2, 2) :: Nil
)
}
val expected = new ListBuffer[Row]()
expected.append(
Row(1, "1", 1, 1), Row(1, "1", 1, 2),
Row(2, "2", 2, 1), Row(2, "2", 2, 2),
Row(3, "3", 3, 1), Row(3, "3", 3, 2)
)
for (i <- 4 to 100) {
expected.append(Row(i, i.toString, null, null))
}
assertSpilled(sparkContext, "left outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData big
|LEFT OUTER JOIN
| testData2 small
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
assertSpilled(sparkContext, "right outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData2 small
|RIGHT OUTER JOIN
| testData big
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
// FULL OUTER JOIN still does not use [[ExternalAppendOnlyUnsafeRowArray]]
// so should not cause any spill
assertNotSpilled(sparkContext, "full outer join") {
checkAnswer(
sql(
"""
|SELECT
| big.key, big.value, small.a, small.b
|FROM
| testData2 small
|FULL OUTER JOIN
| testData big
|ON
| big.key = small.a
""".stripMargin),
expected
)
}
}
}
test("outer broadcast hash join should not throw NPE") {
withTempView("v1", "v2") {
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") {
Seq(2 -> 2).toDF("x", "y").createTempView("v1")
spark.createDataFrame(
Seq(Row(1, "a")).asJava,
new StructType().add("i", "int", nullable = false).add("j", "string", nullable = false)
).createTempView("v2")
checkAnswer(
sql("select x, y, i, j from v1 left join v2 on x = i and y < length(j)"),
Row(2, 2, null, null)
)
}
}
}
}
| jlopezmalla/spark | sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala | Scala | apache-2.0 | 26,156 |
package org.igye.jfxutils.action
import javafx.scene.input.{KeyCode, KeyEvent}
case class Shortcut(keys: KeyCode*) {
def matches(keyEvent: KeyEvent): Boolean = {
if (keys.contains(KeyCode.CONTROL) && !keyEvent.isControlDown ||
!keys.contains(KeyCode.CONTROL) && keyEvent.isControlDown ||
keys.contains(KeyCode.ALT) && !keyEvent.isAltDown ||
!keys.contains(KeyCode.ALT) && keyEvent.isAltDown ||
keys.contains(KeyCode.SHIFT) && !keyEvent.isShiftDown ||
!keys.contains(KeyCode.SHIFT) && keyEvent.isShiftDown
) {
false
} else {
keys.nonEmpty &&
keys.filter(keyCode => keyCode != KeyCode.CONTROL && keyCode != KeyCode.ALT && keyCode != KeyCode.SHIFT)
.forall(_ == keyEvent.getCode)
}
}
override def toString: String = {
keys.map({
case KeyCode.CONTROL => "Ctrl"
case KeyCode.ALT => "Alt"
case KeyCode.SHIFT => "Shift"
case other@_ => other
}).mkString("+")
}
} | Igorocky/jfxutils | src/main/scala/org/igye/jfxutils/action/ShortCut.scala | Scala | mit | 1,090 |
//package fbSpark
//
//import org.apache.spark.SparkConf
//import org.apache.spark.SparkContext
//import org.apache.spark.sql.DataFrame
//import org.apache.spark.sql.SQLContext
//import org.joda.time.DateTime
//
//object Test extends App {
//
// val sparkConf: SparkConf = Common.getSparkConf("Test")
// val sparkContext: SparkContext = new SparkContext(sparkConf)
// val sqlContext: SQLContext = new SQLContext(sparkContext)
//
//// import sqlContext.implicits._
//
// println("hello world")
//
//
//} | PredictionIO/open-academy | HayleySong/src/main/scala/fbSpark/Test.scala | Scala | apache-2.0 | 521 |
package controllers
import play.mvc.Controller
import play.data.validation._
import models._
import utils._
object Register extends Controller {
def submitJid(@Email @Required jid:String) {
if (Validation.hasErrors()) {
flash.error("JID Missing or invalid format")
Application.index
}
// Add them to the DB
var u = new User
u.xmppID = jid
u.save
// invite
XMPPSend.inviteUser(jid)
// Render welcome page
render()
}
}
| lstoll/twitter-chat | app/controllers/Register.scala | Scala | mit | 482 |
package xyz.discretezoo.web.db.v1
import xyz.discretezoo.web.db.ZooPostgresProfile.api._
case class GraphCVT(
zooid: Int,
cvtIndex: Option[Int],
symcubicIndex: Option[Int],
isMoebiusLadder: Boolean,
isPrism: Boolean,
isSPX: Boolean,
truncation: Option[Int] // foreign key for table graph
)
class GraphsCVT(tag: Tag) extends Table[GraphCVT](tag, "graph_cvt") {
def zooid: Rep[Int] = column[Int]("zooid", O.PrimaryKey)
def cvtIndex = column[Option[Int]]("cvt_index")
def symcubicIndex = column[Option[Int]]("symcubic_index")
def isMoebiusLadder = column[Boolean]("is_moebius_ladder")
def isPrism = column[Boolean]("is_prism")
def isSPX = column[Boolean]("is_spx")
def truncation = column[Option[Int]]("truncation")
def * = (
zooid,
cvtIndex,
symcubicIndex,
isMoebiusLadder,
isPrism,
isSPX,
truncation
) <> ((GraphCVT.apply _).tupled, GraphCVT.unapply)
} | DiscreteZOO/DiscreteZOO-web | src/main/scala/xyz/discretezoo/web/db/v1/GraphCVT.scala | Scala | mit | 1,050 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{InterruptedIOException, IOException, UncheckedIOException}
import java.nio.channels.ClosedByInterruptException
import java.util.UUID
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeUnit}
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.locks.{Condition, ReentrantLock}
import scala.collection.JavaConverters._
import scala.collection.mutable.{Map => MutableMap}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.UncheckedExecutionException
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkContext, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.execution.QueryExecution
import org.apache.spark.sql.execution.command.StreamingExplainCommand
import org.apache.spark.sql.execution.datasources.v2.StreamWriterCommitProgress
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.v2.{SupportsWrite, Table}
import org.apache.spark.sql.sources.v2.reader.streaming.{Offset => OffsetV2, SparkDataStream}
import org.apache.spark.sql.sources.v2.writer.SupportsTruncate
import org.apache.spark.sql.sources.v2.writer.streaming.StreamingWrite
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.{Clock, UninterruptibleThread, Utils}
/** States for [[StreamExecution]]'s lifecycle. */
trait State
case object INITIALIZING extends State
case object ACTIVE extends State
case object TERMINATED extends State
case object RECONFIGURING extends State
/**
* Manages the execution of a streaming Spark SQL query that is occurring in a separate thread.
* Unlike a standard query, a streaming query executes repeatedly each time new data arrives at any
* [[Source]] present in the query plan. Whenever new data arrives, a [[QueryExecution]] is created
* and the results are committed transactionally to the given [[Sink]].
*
* @param deleteCheckpointOnStop whether to delete the checkpoint if the query is stopped without
* errors. Checkpoint deletion can be forced with the appropriate
* Spark configuration.
*/
abstract class StreamExecution(
override val sparkSession: SparkSession,
override val name: String,
private val checkpointRoot: String,
analyzedPlan: LogicalPlan,
val sink: Table,
val trigger: Trigger,
val triggerClock: Clock,
val outputMode: OutputMode,
deleteCheckpointOnStop: Boolean)
extends StreamingQuery with ProgressReporter with Logging {
import org.apache.spark.sql.streaming.StreamingQueryListener._
protected val pollingDelayMs: Long = sparkSession.sessionState.conf.streamingPollingDelay
protected val minLogEntriesToMaintain: Int = sparkSession.sessionState.conf.minBatchesToRetain
require(minLogEntriesToMaintain > 0, "minBatchesToRetain has to be positive")
/**
* A lock used to wait/notify when batches complete. Use a fair lock to avoid thread starvation.
*/
protected val awaitProgressLock = new ReentrantLock(true)
protected val awaitProgressLockCondition = awaitProgressLock.newCondition()
private val initializationLatch = new CountDownLatch(1)
private val startLatch = new CountDownLatch(1)
private val terminationLatch = new CountDownLatch(1)
val resolvedCheckpointRoot = {
val checkpointPath = new Path(checkpointRoot)
val fs = checkpointPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
if (sparkSession.conf.get(SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED)
&& StreamExecution.containsSpecialCharsInPath(checkpointPath)) {
// In Spark 2.4 and earlier, the checkpoint path is escaped 3 times (3 `Path.toUri.toString`
// calls). If this legacy checkpoint path exists, we will throw an error to tell the user how
// to migrate.
val legacyCheckpointDir =
new Path(new Path(checkpointPath.toUri.toString).toUri.toString).toUri.toString
val legacyCheckpointDirExists =
try {
fs.exists(new Path(legacyCheckpointDir))
} catch {
case NonFatal(e) =>
// We may not have access to this directory. Don't fail the query if that happens.
logWarning(e.getMessage, e)
false
}
if (legacyCheckpointDirExists) {
throw new SparkException(
s"""Error: we detected a possible problem with the location of your checkpoint and you
|likely need to move it before restarting this query.
|
|Earlier version of Spark incorrectly escaped paths when writing out checkpoints for
|structured streaming. While this was corrected in Spark 3.0, it appears that your
|query was started using an earlier version that incorrectly handled the checkpoint
|path.
|
|Correct Checkpoint Directory: $checkpointPath
|Incorrect Checkpoint Directory: $legacyCheckpointDir
|
|Please move the data from the incorrect directory to the correct one, delete the
|incorrect directory, and then restart this query. If you believe you are receiving
|this message in error, you can disable it with the SQL conf
|${SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key}."""
.stripMargin)
}
}
val checkpointDir = checkpointPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
fs.mkdirs(checkpointDir)
checkpointDir.toString
}
logInfo(s"Checkpoint root $checkpointRoot resolved to $resolvedCheckpointRoot.")
def logicalPlan: LogicalPlan
/**
* Tracks how much data we have processed and committed to the sink or state store from each
* input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var committedOffsets = new StreamProgress
/**
* Tracks the offsets that are available to be processed, but have not yet be committed to the
* sink.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var availableOffsets = new StreamProgress
@volatile
var sinkCommitProgress: Option[StreamWriterCommitProgress] = None
/** The current batchId or -1 if execution has not yet been initialized. */
protected var currentBatchId: Long = -1
/** Metadata associated with the whole query */
protected val streamMetadata: StreamMetadata = {
val metadataPath = new Path(checkpointFile("metadata"))
val hadoopConf = sparkSession.sessionState.newHadoopConf()
StreamMetadata.read(metadataPath, hadoopConf).getOrElse {
val newMetadata = new StreamMetadata(UUID.randomUUID.toString)
StreamMetadata.write(newMetadata, metadataPath, hadoopConf)
newMetadata
}
}
/** Metadata associated with the offset seq of a batch in the query. */
protected var offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSession.conf)
/**
* A map of current watermarks, keyed by the position of the watermark operator in the
* physical plan.
*
* This state is 'soft state', which does not affect the correctness and semantics of watermarks
* and is not persisted across query restarts.
* The fault-tolerant watermark state is in offsetSeqMetadata.
*/
protected val watermarkMsMap: MutableMap[Int, Long] = MutableMap()
override val id: UUID = UUID.fromString(streamMetadata.id)
override val runId: UUID = UUID.randomUUID
/**
* Pretty identified string of printing in logs. Format is
* If name is set "queryName [id = xyz, runId = abc]" else "[id = xyz, runId = abc]"
*/
protected val prettyIdString =
Option(name).map(_ + " ").getOrElse("") + s"[id = $id, runId = $runId]"
/**
* A list of unique sources in the query plan. This will be set when generating logical plan.
*/
@volatile protected var uniqueSources: Seq[SparkDataStream] = Seq.empty
/** Defines the internal state of execution */
protected val state = new AtomicReference[State](INITIALIZING)
@volatile
var lastExecution: IncrementalExecution = _
/** Holds the most recent input data for each source. */
protected var newData: Map[SparkDataStream, LogicalPlan] = _
@volatile
protected var streamDeathCause: StreamingQueryException = null
/* Get the call site in the caller thread; will pass this into the micro batch thread */
private val callSite = Utils.getCallSite()
/** Used to report metrics to coda-hale. This uses id for easier tracking across restarts. */
lazy val streamMetrics = new MetricsReporter(
this, s"spark.streaming.${Option(name).getOrElse(id)}")
/** Isolated spark session to run the batches with. */
private val sparkSessionForStream = sparkSession.cloneSession()
/**
* The thread that runs the micro-batches of this stream. Note that this thread must be
* [[org.apache.spark.util.UninterruptibleThread]] to workaround KAFKA-1894: interrupting a
* running `KafkaConsumer` may cause endless loop.
*/
val queryExecutionThread: QueryExecutionThread =
new QueryExecutionThread(s"stream execution thread for $prettyIdString") {
override def run(): Unit = {
// To fix call site like "run at <unknown>:0", we bridge the call site from the caller
// thread to this micro batch thread
sparkSession.sparkContext.setCallSite(callSite)
runStream()
}
}
/**
* A write-ahead-log that records the offsets that are present in each batch. In order to ensure
* that a given batch will always consist of the same data, we write to this log *before* any
* processing is done. Thus, the Nth record in this log indicated data that is currently being
* processed and the N-1th entry indicates which offsets have been durably committed to the sink.
*/
val offsetLog = new OffsetSeqLog(sparkSession, checkpointFile("offsets"))
/**
* A log that records the batch ids that have completed. This is used to check if a batch was
* fully processed, and its output was committed to the sink, hence no need to process it again.
* This is used (for instance) during restart, to help identify which batch to run next.
*/
val commitLog = new CommitLog(sparkSession, checkpointFile("commits"))
/** Whether all fields of the query have been initialized */
private def isInitialized: Boolean = state.get != INITIALIZING
/** Whether the query is currently active or not */
override def isActive: Boolean = state.get != TERMINATED
/** Returns the [[StreamingQueryException]] if the query was terminated by an exception. */
override def exception: Option[StreamingQueryException] = Option(streamDeathCause)
/** Returns the path of a file with `name` in the checkpoint directory. */
protected def checkpointFile(name: String): String =
new Path(new Path(resolvedCheckpointRoot), name).toString
/**
* Starts the execution. This returns only after the thread has started and [[QueryStartedEvent]]
* has been posted to all the listeners.
*/
def start(): Unit = {
logInfo(s"Starting $prettyIdString. Use $resolvedCheckpointRoot to store the query checkpoint.")
queryExecutionThread.setDaemon(true)
queryExecutionThread.start()
startLatch.await() // Wait until thread started and QueryStart event has been posted
}
/**
* Run the activated stream until stopped.
*/
protected def runActivatedStream(sparkSessionForStream: SparkSession): Unit
/**
* Activate the stream and then wrap a callout to runActivatedStream, handling start and stop.
*
* Note that this method ensures that [[QueryStartedEvent]] and [[QueryTerminatedEvent]] are
* posted such that listeners are guaranteed to get a start event before a termination.
* Furthermore, this method also ensures that [[QueryStartedEvent]] event is posted before the
* `start()` method returns.
*/
private def runStream(): Unit = {
try {
sparkSession.sparkContext.setJobGroup(runId.toString, getBatchDescriptionString,
interruptOnCancel = true)
sparkSession.sparkContext.setLocalProperty(StreamExecution.QUERY_ID_KEY, id.toString)
if (sparkSession.sessionState.conf.streamingMetricsEnabled) {
sparkSession.sparkContext.env.metricsSystem.registerSource(streamMetrics)
}
// `postEvent` does not throw non fatal exception.
postEvent(new QueryStartedEvent(id, runId, name))
// Unblock starting thread
startLatch.countDown()
// While active, repeatedly attempt to run batches.
SparkSession.setActiveSession(sparkSession)
updateStatusMessage("Initializing sources")
// force initialization of the logical plan so that the sources can be created
logicalPlan
// Adaptive execution can change num shuffle partitions, disallow
sparkSessionForStream.conf.set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, "false")
// Disable cost-based join optimization as we do not want stateful operations to be rearranged
sparkSessionForStream.conf.set(SQLConf.CBO_ENABLED.key, "false")
offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSessionForStream.conf)
if (state.compareAndSet(INITIALIZING, ACTIVE)) {
// Unblock `awaitInitialization`
initializationLatch.countDown()
runActivatedStream(sparkSessionForStream)
updateStatusMessage("Stopped")
} else {
// `stop()` is already called. Let `finally` finish the cleanup.
}
} catch {
case e if isInterruptedByStop(e, sparkSession.sparkContext) =>
// interrupted by stop()
updateStatusMessage("Stopped")
case e: IOException if e.getMessage != null
&& e.getMessage.startsWith(classOf[InterruptedException].getName)
&& state.get == TERMINATED =>
// This is a workaround for HADOOP-12074: `Shell.runCommand` converts `InterruptedException`
// to `new IOException(ie.toString())` before Hadoop 2.8.
updateStatusMessage("Stopped")
case e: Throwable =>
streamDeathCause = new StreamingQueryException(
toDebugString(includeLogicalPlan = isInitialized),
s"Query $prettyIdString terminated with exception: ${e.getMessage}",
e,
committedOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString,
availableOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString)
logError(s"Query $prettyIdString terminated with error", e)
updateStatusMessage(s"Terminated with exception: ${e.getMessage}")
// Rethrow the fatal errors to allow the user using `Thread.UncaughtExceptionHandler` to
// handle them
if (!NonFatal(e)) {
throw e
}
} finally queryExecutionThread.runUninterruptibly {
// The whole `finally` block must run inside `runUninterruptibly` to avoid being interrupted
// when a query is stopped by the user. We need to make sure the following codes finish
// otherwise it may throw `InterruptedException` to `UncaughtExceptionHandler` (SPARK-21248).
// Release latches to unblock the user codes since exception can happen in any place and we
// may not get a chance to release them
startLatch.countDown()
initializationLatch.countDown()
try {
stopSources()
state.set(TERMINATED)
currentStatus = status.copy(isTriggerActive = false, isDataAvailable = false)
// Update metrics and status
sparkSession.sparkContext.env.metricsSystem.removeSource(streamMetrics)
// Notify others
sparkSession.streams.notifyQueryTermination(StreamExecution.this)
postEvent(
new QueryTerminatedEvent(id, runId, exception.map(_.cause).map(Utils.exceptionString)))
// Delete the temp checkpoint when either force delete enabled or the query didn't fail
if (deleteCheckpointOnStop &&
(sparkSession.sessionState.conf
.getConf(SQLConf.FORCE_DELETE_TEMP_CHECKPOINT_LOCATION) || exception.isEmpty)) {
val checkpointPath = new Path(resolvedCheckpointRoot)
try {
logInfo(s"Deleting checkpoint $checkpointPath.")
val fs = checkpointPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
fs.delete(checkpointPath, true)
} catch {
case NonFatal(e) =>
// Deleting temp checkpoint folder is best effort, don't throw non fatal exceptions
// when we cannot delete them.
logWarning(s"Cannot delete $checkpointPath", e)
}
}
} finally {
awaitProgressLock.lock()
try {
// Wake up any threads that are waiting for the stream to progress.
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
terminationLatch.countDown()
}
}
}
private def isInterruptedByStop(e: Throwable, sc: SparkContext): Boolean = {
if (state.get == TERMINATED) {
StreamExecution.isInterruptionException(e, sc)
} else {
false
}
}
override protected def postEvent(event: StreamingQueryListener.Event): Unit = {
sparkSession.streams.postListenerEvent(event)
}
/** Stops all streaming sources safely. */
protected def stopSources(): Unit = {
uniqueSources.foreach { source =>
try {
source.stop()
} catch {
case NonFatal(e) =>
logWarning(s"Failed to stop streaming source: $source. Resources may have leaked.", e)
}
}
}
/**
* Blocks the current thread until processing for data from the given `source` has reached at
* least the given `Offset`. This method is intended for use primarily when writing tests.
*/
private[sql] def awaitOffset(sourceIndex: Int, newOffset: OffsetV2, timeoutMs: Long): Unit = {
assertAwaitThread()
def notDone = {
val localCommittedOffsets = committedOffsets
if (sources == null) {
// sources might not be initialized yet
false
} else {
val source = sources(sourceIndex)
!localCommittedOffsets.contains(source) || localCommittedOffsets(source) != newOffset
}
}
while (notDone) {
awaitProgressLock.lock()
try {
awaitProgressLockCondition.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
} finally {
awaitProgressLock.unlock()
}
}
logDebug(s"Unblocked at $newOffset for ${sources(sourceIndex)}")
}
/** A flag to indicate that a batch has completed with no new data available. */
@volatile protected var noNewData = false
/**
* Assert that the await APIs should not be called in the stream thread. Otherwise, it may cause
* dead-lock, e.g., calling any await APIs in `StreamingQueryListener.onQueryStarted` will block
* the stream thread forever.
*/
private def assertAwaitThread(): Unit = {
if (queryExecutionThread eq Thread.currentThread) {
throw new IllegalStateException(
"Cannot wait for a query state from the same thread that is running the query")
}
}
/**
* Await until all fields of the query have been initialized.
*/
def awaitInitialization(timeoutMs: Long): Unit = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
if (streamDeathCause != null) {
throw streamDeathCause
}
initializationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def processAllAvailable(): Unit = {
assertAwaitThread()
if (streamDeathCause != null) {
throw streamDeathCause
}
if (!isActive) return
awaitProgressLock.lock()
try {
noNewData = false
while (true) {
awaitProgressLockCondition.await(10000, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
if (noNewData || !isActive) {
return
}
}
} finally {
awaitProgressLock.unlock()
}
}
override def awaitTermination(): Unit = {
assertAwaitThread()
terminationLatch.await()
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def awaitTermination(timeoutMs: Long): Boolean = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
terminationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
} else {
!isActive
}
}
/** Expose for tests */
def explainInternal(extended: Boolean): String = {
if (lastExecution == null) {
"No physical plan. Waiting for data."
} else {
val explain = StreamingExplainCommand(lastExecution, extended = extended)
sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect()
.map(_.getString(0)).mkString("\\n")
}
}
override def explain(extended: Boolean): Unit = {
// scalastyle:off println
println(explainInternal(extended))
// scalastyle:on println
}
override def explain(): Unit = explain(extended = false)
override def toString: String = {
s"Streaming Query $prettyIdString [state = $state]"
}
private def toDebugString(includeLogicalPlan: Boolean): String = {
val debugString =
s"""|=== Streaming Query ===
|Identifier: $prettyIdString
|Current Committed Offsets: $committedOffsets
|Current Available Offsets: $availableOffsets
|
|Current State: $state
|Thread State: ${queryExecutionThread.getState}""".stripMargin
if (includeLogicalPlan) {
debugString + s"\\n\\nLogical Plan:\\n$logicalPlan"
} else {
debugString
}
}
protected def getBatchDescriptionString: String = {
val batchDescription = if (currentBatchId < 0) "init" else currentBatchId.toString
Option(name).map(_ + "<br/>").getOrElse("") +
s"id = $id<br/>runId = $runId<br/>batch = $batchDescription"
}
protected def createStreamingWrite(
table: SupportsWrite,
options: Map[String, String],
inputPlan: LogicalPlan): StreamingWrite = {
val writeBuilder = table.newWriteBuilder(new CaseInsensitiveStringMap(options.asJava))
.withQueryId(id.toString)
.withInputDataSchema(inputPlan.schema)
outputMode match {
case Append =>
writeBuilder.buildForStreaming()
case Complete =>
// TODO: we should do this check earlier when we have capability API.
require(writeBuilder.isInstanceOf[SupportsTruncate],
table.name + " does not support Complete mode.")
writeBuilder.asInstanceOf[SupportsTruncate].truncate().buildForStreaming()
case Update =>
// Although no v2 sinks really support Update mode now, but during tests we do want them
// to pretend to support Update mode, and treat Update mode same as Append mode.
if (Utils.isTesting) {
writeBuilder.buildForStreaming()
} else {
throw new IllegalArgumentException(
"Data source v2 streaming sinks does not support Update mode.")
}
}
}
protected def purge(threshold: Long): Unit = {
logDebug(s"Purging metadata at threshold=$threshold")
offsetLog.purge(threshold)
commitLog.purge(threshold)
}
}
object StreamExecution {
val QUERY_ID_KEY = "sql.streaming.queryId"
val IS_CONTINUOUS_PROCESSING = "__is_continuous_processing"
def isInterruptionException(e: Throwable, sc: SparkContext): Boolean = e match {
// InterruptedIOException - thrown when an I/O operation is interrupted
// ClosedByInterruptException - thrown when an I/O operation upon a channel is interrupted
case _: InterruptedException | _: InterruptedIOException | _: ClosedByInterruptException =>
true
// The cause of the following exceptions may be one of the above exceptions:
//
// UncheckedIOException - thrown by codes that cannot throw a checked IOException, such as
// BiFunction.apply
// ExecutionException - thrown by codes running in a thread pool and these codes throw an
// exception
// UncheckedExecutionException - thrown by codes that cannot throw a checked
// ExecutionException, such as BiFunction.apply
case e2 @ (_: UncheckedIOException | _: ExecutionException | _: UncheckedExecutionException)
if e2.getCause != null =>
isInterruptionException(e2.getCause, sc)
case se: SparkException =>
val jobGroup = sc.getLocalProperty("spark.jobGroup.id")
if (jobGroup == null) return false
val errorMsg = se.getMessage
if (errorMsg.contains("cancelled") && errorMsg.contains(jobGroup) && se.getCause == null) {
true
} else if (se.getCause != null) {
isInterruptionException(se.getCause, sc)
} else {
false
}
case _ =>
false
}
/** Whether the path contains special chars that will be escaped when converting to a `URI`. */
def containsSpecialCharsInPath(path: Path): Boolean = {
path.toUri.getPath != new Path(path.toUri.toString).toUri.getPath
}
}
/**
* A special thread to run the stream query. Some codes require to run in the QueryExecutionThread
* and will use `classOf[QueryxecutionThread]` to check.
*/
abstract class QueryExecutionThread(name: String) extends UninterruptibleThread(name)
| aosagie/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala | Scala | apache-2.0 | 27,046 |
package io.latent.resilience.cache
import com.google.common.cache.{Cache => GCache, CacheBuilder}
import scala.concurrent.Future
import scala.concurrent.duration.Duration
/**
* A local, in-memory cache backed by guava.
*
* @see com.google.common.cache.CacheBuilder for detailed configuration documentation
*/
class SimpleCache[K, V](maxSize: Int = 100,
timeToLive: Duration = Duration.Inf,
timeToIdle: Duration = Duration.Inf,
softValues: Boolean = false,
weakValues: Boolean = false,
weakKeys: Boolean = false) extends Cache[K, V] {
// when a cacheLoader is not provided, Google CacheBuilder types the cache as [AnyRef, AnyRef]
// which is unfortunate and leads to the mess that creates the need for use of asInstanceOf within
// this whole class. While the Guava CacheBuilder provides an excellent implementation of a local
// caching system, it's API is incredibly inelegant. Since it does not allow storage of null within
// the cache that forces us to use Guava's version of Option(al) as scala.Option cannot be stored
// within a purely java data structure without consequences. But we can hide all that monstrosity
// and still leverage the power of the Guava Cache within the SimpleCache by way of encapsulation.
private val cache: GCache[AnyRef, AnyRef] = create()
def put(key: K,
value: Future[V]): Unit = {
cache.put(key.asInstanceOf[AnyRef], value.asInstanceOf[AnyRef])
}
def get(key: K): Option[Future[V]] = {
cache.getIfPresent(key) match {
case null => None
case value => Some(value.asInstanceOf[Future[V]])
}
}
def invalidate(key: K): Unit = {
cache.invalidate(key.asInstanceOf[AnyRef])
}
private def create(): GCache[AnyRef, AnyRef] = {
var builder = CacheBuilder.newBuilder()
.maximumSize(maxSize)
if (timeToLive.isFinite()) builder = builder.expireAfterWrite(timeToLive.length, timeToLive.unit)
if (timeToIdle.isFinite()) builder = builder.expireAfterAccess(timeToIdle.length, timeToIdle.unit)
if (softValues) builder = builder.softValues()
if (weakValues) builder = builder.weakValues()
if (weakKeys) builder = builder.weakKeys()
builder.build()
}
}
| ppat/resilience | src/main/scala/io/latent/resilience/cache/SimpleCache.scala | Scala | mit | 2,331 |
package us.feliscat.util
import java.io.{BufferedWriter, InputStream, OutputStream, OutputStreamWriter}
import java.nio.charset.{Charset, CodingErrorAction}
import us.feliscat.text.{StringNone, StringOption}
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.concurrent.{Await, Future, Promise, TimeoutException}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.ExecutionContext.Implicits.global
import scala.io.{Codec, Source}
import scala.sys.process.{Process, ProcessBuilder, ProcessIO}
import scala.util.{Failure, Success, Try}
/**
* @author K. Sakamoto
* Created on 2017/05/24
*/
package object process {
private val cache: mutable.WeakHashMap[String, Seq[String]] = mutable.WeakHashMap.empty[String, Seq[String]]
implicit class ProcessBuilderUtils(val repr: ProcessBuilder) extends AnyVal {
def lineStream(encoding: Charset,
onMalformedInput: CodingErrorAction,
onUnmappableCharacter: CodingErrorAction,
replacementOpt: StringOption,
timeout: FiniteDuration): Iterator[String] = {
lineStreamTry(
encoding,
onMalformedInput,
onUnmappableCharacter,
replacementOpt,
StringNone,
needInputText = false,
timeout)
}
def lineStream(encoding: Charset,
onMalformedInput: CodingErrorAction,
onUnmappableCharacter: CodingErrorAction,
replacementOpt: StringOption,
inputText: StringOption,
timeout: FiniteDuration): Iterator[String] = {
lineStreamTry(
encoding,
onMalformedInput,
onUnmappableCharacter,
replacementOpt,
inputText,
needInputText = true,
timeout)
}
private def lineStreamTry(encoding: Charset,
onMalformedInput: CodingErrorAction,
onUnmappableCharacter: CodingErrorAction,
replacementOpt: StringOption,
inputText: StringOption,
needInputText: Boolean,
timeout: FiniteDuration): Iterator[String] = {
Try(
lineStream(
encoding,
onMalformedInput,
onUnmappableCharacter,
replacementOpt,
inputText,
needInputText,
timeout
)
) match {
case Success(result) =>
result
case Failure(err: Throwable) =>
err.printStackTrace(System.err)
Iterator.empty
}
}
@throws[TimeoutException]
private def lineStream(encoding: Charset,
onMalformedInput: CodingErrorAction,
onUnmappableCharacter: CodingErrorAction,
replacementOpt: StringOption,
inputText: StringOption,
needInputText: Boolean,
timeout: FiniteDuration): Iterator[String] = {
if (needInputText && inputText.isEmpty) {
return Iterator.empty
}
val key: String = {
if (needInputText) {
repr.toString.concat(inputText.get.trim)
} else {
repr.toString
}
}
if (cache.contains(key)) {
return cache(key).iterator
}
val promise = Promise[Iterator[String]]
implicit val codec: Codec = Codec(encoding).
onMalformedInput(onMalformedInput).
onUnmappableCharacter(onUnmappableCharacter)
if (replacementOpt.nonEmpty) {
codec.decodingReplaceWith(replacementOpt.get)
codec.encodingReplaceWith(replacementOpt.get.getBytes)
}
def writeJob(out: OutputStream): Unit = {
if (needInputText) {
inputText foreach { text: String =>
val outputStreamWriter = new OutputStreamWriter(out, encoding)
val writer = new BufferedWriter(outputStreamWriter)
val correctText: String = Source.
fromBytes(text.getBytes). //Codec
getLines.
mkString("\n")
writer.write(correctText)
writer.write('\n')
writer.close()
if (LibrariesConfig.runMode == RunModes.ProcessDetail) {
println(correctText)
}
outputStreamWriter.close()
}
}
out.close()
}
def readJob(in: InputStream): Unit = {
if (!promise.isCompleted) {
val lineBuffer = ListBuffer.empty[String]
Source.
fromInputStream(in). //Codec
getLines foreach { line: String =>
if (LibrariesConfig.runMode == RunModes.ProcessDetail) {
println(line)
}
lineBuffer += line
}
in.close()
val lines: Seq[String] = lineBuffer.result
cache.put(key, lines)
val iterator: Iterator[String] = lines.iterator
promise.success(iterator)
}
}
def errorJob(err: InputStream): Unit = {
Source.
fromInputStream(err). //Codec
getLines.
foreach(System.err.println)
err.close()
}
val io = new ProcessIO(
writeJob,
readJob,
errorJob,
daemonizeThreads = false)
val processFuture: Future[Int] = Future {
val process: Process = repr.run(io)
while (!promise.isCompleted) {
Thread.sleep(1000)
}
process.exitValue
}
var result: Future[Iterator[String]] = Future(Iterator.empty)
processFuture onComplete {
case Success(_) =>
result = promise.future
case Failure(err: Throwable) =>
System.err.println(key)
err.printStackTrace(System.err)
}
Await.ready(processFuture, timeout)
result.onComplete {
case Success(_) =>
case Failure(err: Throwable) =>
System.err.println(key)
err.printStackTrace(System.err)
}
Await.result(result, timeout)
}
}
}
| ktr-skmt/FelisCatusZero-multilingual | libraries/src/main/scala/us/feliscat/util/process/package.scala | Scala | apache-2.0 | 6,221 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.index
import java.util.concurrent.ConcurrentHashMap
import com.typesafe.scalalogging.StrictLogging
import org.locationtech.jts.geom.{Envelope, Geometry}
import org.locationtech.geomesa.utils.geotools.GridSnap
import scala.annotation.tailrec
/**
* Creates an index suitable for geometries with extents. The index is broken up into tiers, where each
* geometry is assigned a tier based on its envelope size. When querying, each tier must be evaluated, so
* if possible tiers should be matched closely to the envelopes of the entries.
*
* Values are indexed by the centroid of their envelope. When querying, the bounding box is expanded based
* on the max envelope size of the tier, to ensure that all potential results are found. Thus, the false
* positive rate tends to go up with larger tiers, and post-filtering is recommended.
*
* @param sizes (x, y) max envelope size for each tier
* @param xBucketMultiplier multiplier for number of x buckets to create per tier
* @param yBucketMultiplier multiplier for number of y buckets to create per tier
* @param extents total area being indexed
* @tparam T item type
*/
class SizeSeparatedBucketIndex[T](sizes: Seq[(Double, Double)] = SizeSeparatedBucketIndex.DefaultTiers,
xBucketMultiplier: Double = 1,
yBucketMultiplier: Double = 1,
extents: Envelope = new Envelope(-180.0, 180.0, -90.0, 90.0))
extends SpatialIndex[T] with StrictLogging {
// TODO https://geomesa.atlassian.net/browse/GEOMESA-2323 better anti-meridian handling
require(sizes.nonEmpty, "No valid tier sizes specified")
require(sizes.lengthCompare(1) == 0 ||
sizes.sliding(2).forall { case Seq((x1, y1), (x2, y2)) => x1 <= x2 && y1 <= y2 },
"Tiers must be ordered by increasing size")
// note: for point ops, we always use the first (smallest) tier
private val tiers = sizes.map { case (width, height) =>
val xSize = math.ceil(extents.getWidth * xBucketMultiplier / width).toInt
val ySize = math.ceil(extents.getHeight * yBucketMultiplier / height).toInt
// create the buckets up front to avoid having to synchronize the whole array
// we use a ConcurrentHashMap, which gives us iterators that aren't affected by modifications to the backing map
val buckets = Array.fill(xSize, ySize)(new ConcurrentHashMap[String, T]())
logger.debug(s"Creating tier for size ($width $height) with buckets [${xSize}x$ySize]")
new Tier(width, height, buckets, new GridSnap(extents, xSize, ySize))
}
override def insert(geom: Geometry, key: String, value: T): Unit = {
val envelope = geom.getEnvelopeInternal
val tier = selectTier(envelope)
// volatile reads should be cheaper than writes, so only update the variable if necessary
if (tier.empty) {
tier.empty = false
}
tier.bucket(envelope).put(key, value)
}
override def remove(geom: Geometry, key: String): T = {
val envelope = geom.getEnvelopeInternal
selectTier(envelope).bucket(envelope).remove(key)
}
override def get(geom: Geometry, key: String): T = {
val envelope = geom.getEnvelopeInternal
selectTier(envelope).bucket(envelope).get(key)
}
override def query(xmin: Double, ymin: Double, xmax: Double, ymax: Double): Iterator[T] =
tiers.iterator.flatMap(_.iterator(xmin, ymin, xmax, ymax))
override def query(): Iterator[T] = query(extents.getMinX, extents.getMinY, extents.getMaxX, extents.getMaxY)
override def size(): Int = {
var size = 0
tiers.foreach(tier => size += tier.size())
size
}
override def clear(): Unit = tiers.foreach(_.clear())
private def selectTier(envelope: Envelope): Tier = {
val width = envelope.getWidth
val height = envelope.getHeight
tiers.find(t => t.maxSizeX >= width && t.maxSizeY >= height).getOrElse {
throw new IllegalArgumentException(s"Envelope $envelope exceeds the max tier size ${sizes.last}")
}
}
private class Tier(val maxSizeX: Double,
val maxSizeY: Double,
buckets: Array[Array[ConcurrentHashMap[String, T]]],
gridSnap: GridSnap) {
// we can safely use volatile instead of synchronized here, as this is a primitive boolean whose
// state doesn't depend on its own value
@volatile
var empty: Boolean = true
private val maxX = buckets.length - 1
private val maxY = buckets(0).length - 1
def bucket(x: Double, y: Double): ConcurrentHashMap[String, T] = buckets(snapX(x))(snapY(y))
// the bucket is selected based on the envelope centroid
def bucket(envelope: Envelope): ConcurrentHashMap[String, T] =
buckets(snapX((envelope.getMinX + envelope.getMaxX) / 2.0))(snapY((envelope.getMinY + envelope.getMaxY) / 2.0))
def iterator(xmin: Double, ymin: Double, xmax: Double, ymax: Double): Iterator[T] =
if (empty) { Iterator.empty } else { new TierIterator(xmin, ymin, xmax, ymax) }
def size(): Int = {
if (empty) { 0 } else {
var size = 0
var i = 0
while (i <= maxX) {
var j = 0
while (j <= maxY) {
size += buckets(i)(j).size()
j += 1
}
i += 1
}
size
}
}
def clear(): Unit = {
var i = 0
while (i <= maxX) {
var j = 0
while (j <= maxY) {
buckets(i)(j).clear()
j += 1
}
i += 1
}
}
private def snapX(x: Double): Int = {
val i = gridSnap.i(x)
if (i != -1) { i } else if (x < extents.getMinX) { 0 } else { maxX }
}
private def snapY(y: Double): Int = {
val j = gridSnap.j(y)
if (j != -1) { j } else if (y < extents.getMinY) { 0 } else { maxY }
}
/**
* Iterator over a range of buckets
*/
class TierIterator (xmin: Double, ymin: Double, xmax: Double, ymax: Double) extends Iterator[T] {
private val maxi = snapX(xmax + maxSizeX)
private val minj = snapY(ymin - maxSizeY)
private val maxj = snapY(ymax + maxSizeY)
private var i = snapX(xmin - maxSizeX)
private var j = minj
private var iter = buckets(i)(j).values.iterator() // note: `.values` is a cached view
@tailrec
override final def hasNext: Boolean = iter.hasNext || {
if (i == maxi && j == maxj) { false } else {
if (j < maxj) {
j += 1
} else {
j = minj
i += 1
}
iter = buckets(i)(j).values.iterator() // note: `.values` is a cached view
hasNext
}
}
override def next(): T = iter.next()
}
}
}
object SizeSeparatedBucketIndex {
// TODO https://geomesa.atlassian.net/browse/GEOMESA-2322 these are somewhat arbitrary
val DefaultTiers: Seq[(Double, Double)] = Seq((1, 1), (4, 4), (32, 32), (360, 180))
}
| locationtech/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/index/SizeSeparatedBucketIndex.scala | Scala | apache-2.0 | 7,419 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage
import com.typesafe.config._
import org.locationtech.geomesa.fs.storage.api.NamedOptions
import org.locationtech.geomesa.fs.storage.common.metadata.MetadataSerialization.Persistence.PartitionSchemeConfig
import org.locationtech.geomesa.utils.text.Suffixes.Memory
import org.opengis.feature.simple.SimpleFeatureType
import pureconfig.ConfigConvert
import pureconfig.generic.semiauto.deriveConvert
import scala.util.{Failure, Success, Try}
import scala.util.control.NonFatal
package object common {
val RenderOptions: ConfigRenderOptions = ConfigRenderOptions.concise().setFormatted(true)
val ParseOptions: ConfigParseOptions = ConfigParseOptions.defaults()
implicit val NamedOptionsConvert: ConfigConvert[NamedOptions] = deriveConvert[NamedOptions]
object StorageSerialization {
/**
* Serialize configuration options as a typesafe config string
*
* @param options options
* @return
*/
def serialize(options: NamedOptions): String = NamedOptionsConvert.to(options).render(RenderOptions)
/**
* Deserialize configuration options, e.g. for partition schemes and metadata connections
*
* @param options options as a typesafe config string
* @return
*/
def deserialize(options: String): NamedOptions = {
val config = ConfigFactory.parseString(options, ParseOptions)
try { pureconfig.loadConfigOrThrow[NamedOptions](config) } catch {
case NonFatal(e) => Try(deserializeOldScheme(config)).getOrElse(throw e)
}
}
private def deserializeOldScheme(config: Config): NamedOptions = {
val parsed = pureconfig.loadConfigOrThrow[PartitionSchemeConfig](config)
NamedOptions(parsed.scheme, parsed.options)
}
}
object StorageKeys {
val EncodingKey = "geomesa.fs.encoding"
val LeafStorageKey = "geomesa.fs.leaf-storage"
val MetadataKey = "geomesa.fs.metadata"
val SchemeKey = "geomesa.fs.scheme"
val FileSizeKey = "geomesa.fs.file-size"
val ObserversKey = "geomesa.fs.observers"
}
/**
* Implicit methods to set/retrieve storage configuration options in SimpleFeatureType user data
*
* @param sft simple feature type
*/
implicit class RichSimpleFeatureType(val sft: SimpleFeatureType) extends AnyVal {
import StorageKeys._
import StorageSerialization.{deserialize, serialize}
def setEncoding(encoding: String): Unit = sft.getUserData.put(EncodingKey, encoding)
def removeEncoding(): Option[String] = remove(EncodingKey)
def setLeafStorage(leafStorage: Boolean): Unit = sft.getUserData.put(LeafStorageKey, leafStorage.toString)
def removeLeafStorage(): Option[Boolean] = remove(LeafStorageKey).map(_.toBoolean)
def setScheme(name: String, options: Map[String, String] = Map.empty): Unit =
sft.getUserData.put(SchemeKey, serialize(NamedOptions(name, options)))
// noinspection ScalaDeprecation
def removeScheme(): Option[NamedOptions] =
remove(SchemeKey).map(deserialize).orElse(remove("geomesa.fs.partition-scheme.config").map(deserialize))
def setMetadata(name: String, options: Map[String, String] = Map.empty): Unit =
sft.getUserData.put(MetadataKey, serialize(NamedOptions(name, options)))
def removeMetadata(): Option[NamedOptions] = remove(MetadataKey).map(deserialize)
def setTargetFileSize(size: String): Unit = {
// validate input
Memory.bytes(size).failed.foreach(e => throw new IllegalArgumentException("Invalid file size", e))
sft.getUserData.put(FileSizeKey, size)
}
def removeTargetFileSize(): Option[Long] = {
remove(FileSizeKey).map { s =>
Memory.bytes(s) match {
case Success(b) => b
case Failure(e) => throw new IllegalArgumentException("Invalid file size", e)
}
}
}
def setObservers(names: Seq[String]): Unit = sft.getUserData.put(ObserversKey, names.mkString(","))
def getObservers: Seq[String] = {
val obs = sft.getUserData.get(ObserversKey).asInstanceOf[String]
if (obs == null || obs.isEmpty) { Seq.empty } else { obs.split(",") }
}
private def remove(key: String): Option[String] = Option(sft.getUserData.remove(key).asInstanceOf[String])
}
}
| locationtech/geomesa | geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-common/src/main/scala/org/locationtech/geomesa/fs/storage/common/package.scala | Scala | apache-2.0 | 4,746 |
package com.swara.learn.genetic
import net.jcip.annotations.ThreadSafe
/**
* A recombination operator. Recombinators crossover the genetic makeup of two parent individuals
* in order to produce offspring that are representative of both parent genomes.
*
* @tparam T Type of genome.
*/
@ThreadSafe
trait Recombinator[T] {
def crossover(father: T, mother: T): T
}
| ashwin153/swara | swara-learn/src/main/scala/com/swara/learn/genetic/Recombinator.scala | Scala | mit | 373 |
package software.uncharted.salt.examples.hpie
import software.uncharted.salt.core.projection.Projection
import org.apache.spark.sql.Row
/**
* This Projection projects an input string (./path/to/filename) into
* relevant parent path components (./, ./path, ./path/to). In this case,
* 'tiles' are directories which contain a single bin, and aggregate all their
* contained files' metadata.
*/
class PathProjection(maxDepth: Int) extends Projection[String, String, Int] {
private val depths = Seq.range(0,maxDepth+1)
override def project(dc: Option[String], maxBin: Int): Option[Seq[(String, Int)]] = {
if (!dc.isDefined) {
None
} else {
if (dc.get.indexOf("/") < 0) {
val path = dc.get
Some(Seq((path,0)))
} else {
//map file path to all its parent path components
val pathComponents = dc.get.split("/")
Some(
depths.map(z => {
val path = pathComponents.slice(0,z+1).mkString("/")
(path,0)
})
)
}
}
}
override def binTo1D(bin: Int, maxBin: Int): Int = {
bin
}
}
| unchartedsoftware/salt-examples | hierarchical-pie-example/generation/src/main/scala/software/uncharted/salt/examples/hpie/PathProjection.scala | Scala | apache-2.0 | 1,114 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.mongodb.query
import java.util.regex.Pattern
import com.mongodb.QueryBuilder
import com.mongodb.casbah.Imports
import com.mongodb.casbah.Imports._
import com.stratio.datasource.mongodb.sources.{NearSphere, Near}
import org.apache.spark.sql.sources._
import com.stratio.datasource.util.Config
import com.stratio.datasource.mongodb.config.MongodbConfig
object FilterSection {
/**
* Implicit conversion to pass from an array of [[Filter]] to [[FilterSection]] filter description object.
*
* @param sFilters
* @param config
* @return [[FilterSection]] built from `sFilters`
*/
implicit def srcFilArr2filSel(sFilters: Array[Filter])(implicit config: Config): FilterSection =
new SourceFilters(sFilters)
//Factory methods
def apply(sFilters: Array[Filter])(implicit config: Config): FilterSection =
srcFilArr2filSel(sFilters)
def apply(): FilterSection = NoFilters
}
/**
* Trait to be implemented to those classes describing the Filter section of a MongoDB query.
*/
trait FilterSection {
/**
* @return a [[DBObject]] describing the filters to apply to a partition.
*/
def filtersToDBObject(): DBObject
}
/**
* Filter described by a [[DBObject]] as it is used by Casbah (https://mongodb.github.io/casbah/)
* @param filterDesc
*/
case class RawFilter(filterDesc: DBObject) extends FilterSection {
override def filtersToDBObject(): Imports.DBObject = filterDesc
}
/**
* No filter to be applied
*/
case object NoFilters extends FilterSection {
override def filtersToDBObject(): Imports.DBObject = QueryBuilder.start.get()
}
/**
* This [[FilterSection]] is described by an array of [[org.apache.spark.sql.sources.Filter]] where each
* element is a restriction.
*
* @param sFilters All filters to be applied (AND)
* @param parentFilterIsNot `true` iff the filter is negated: NOT (sFilters[0] AND ... AND sFilters[n-1])
* @param config Access configuration
*/
case class SourceFilters(
sFilters: Array[Filter],
parentFilterIsNot: Boolean = false
)(implicit config: Config) extends FilterSection {
override def filtersToDBObject: DBObject = {
val queryBuilder: QueryBuilder = QueryBuilder.start
if (parentFilterIsNot) queryBuilder.not()
sFilters.foreach {
case EqualTo(attribute, value) =>
queryBuilder.put(attribute).is(checkObjectID(attribute, value))
case GreaterThan(attribute, value) =>
queryBuilder.put(attribute).greaterThan(checkObjectID(attribute, value))
case GreaterThanOrEqual(attribute, value) =>
queryBuilder.put(attribute).greaterThanEquals(checkObjectID(attribute, value))
case In(attribute, values) =>
queryBuilder.put(attribute).in(values.map(value => checkObjectID(attribute, value)))
case LessThan(attribute, value) =>
queryBuilder.put(attribute).lessThan(checkObjectID(attribute, value))
case LessThanOrEqual(attribute, value) =>
queryBuilder.put(attribute).lessThanEquals(checkObjectID(attribute, value))
case IsNull(attribute) =>
queryBuilder.put(attribute).is(null)
case IsNotNull(attribute) =>
queryBuilder.put(attribute).notEquals(null)
case And(leftFilter, rightFilter) if !parentFilterIsNot =>
queryBuilder.and(
SourceFilters(Array(leftFilter)).filtersToDBObject(),
SourceFilters(Array(rightFilter)).filtersToDBObject()
)
case Or(leftFilter, rightFilter) if !parentFilterIsNot =>
queryBuilder.or(
SourceFilters(Array(leftFilter)).filtersToDBObject(),
SourceFilters(Array(rightFilter)).filtersToDBObject()
)
case StringStartsWith(attribute, value) if !parentFilterIsNot =>
queryBuilder.put(attribute).regex(Pattern.compile("^" + value + ".*$"))
case StringEndsWith(attribute, value) if !parentFilterIsNot =>
queryBuilder.put(attribute).regex(Pattern.compile("^.*" + value + "$"))
case StringContains(attribute, value) if !parentFilterIsNot =>
queryBuilder.put(attribute).regex(Pattern.compile(".*" + value + ".*"))
case Near(attribute, x, y, None) =>
queryBuilder.put(attribute).near(x, y)
case Near(attribute, x, y, Some(max)) =>
queryBuilder.put(attribute).near(x, y, max)
case NearSphere(attribute, longitude, latitude, None) =>
queryBuilder.put(attribute).nearSphere(longitude, latitude)
case NearSphere(attribute, longitude, latitude, Some(maxDistance)) =>
queryBuilder.put(attribute).nearSphere(longitude, latitude, maxDistance)
case Not(filter) =>
SourceFilters(Array(filter), true).filtersToDBObject()
}
queryBuilder.get
}
/**
* Check if the field is "_id" and if the user wants to filter by this field as an ObjectId
*
* @param attribute Name of the file
* @param value Value for the attribute
* @return The value in the correct data type
*/
private def checkObjectID(attribute: String, value: Any)(implicit config: Config) : Any = attribute match {
case "_id" if idAsObjectId => new ObjectId(value.toString)
case _ => value
}
lazy val idAsObjectId: Boolean =
config.getOrElse[String](MongodbConfig.IdAsObjectId, MongodbConfig.DefaultIdAsObjectId).equalsIgnoreCase("true")
} | darroyocazorla/spark-mongodb | spark-mongodb/src/main/scala/com/stratio/datasource/mongodb/query/FilterSection.scala | Scala | apache-2.0 | 5,992 |
package io.taig.android.soap
import io.circe.Encoder
import io.taig.android.soap.syntax.writer._
object Bundle {
val empty = android.os.Bundle.EMPTY
def apply( capacity: Int ): Bundle = new Bundle( capacity )
def apply[V: Encoder]( key: String, value: V ): Bundle = {
Bundle( 1 ).write( key, value )
}
} | Taig/Soap | src/main/scala/io/taig/android/soap/Bundle.scala | Scala | mit | 331 |
package go.models
import io.apibuilder.spec.v0.models.Resource
import lib.generator.GeneratorUtil
import lib.Text._
object GoUtil {
// See
// https://golang.org/ref/spec#Keywords
// https://golang.org/ref/spec#Predeclared_identifiers
private[this] val ReservedWords = Set(
"break", "default", "func", "interface", "select", "case", "defer", "go",
"map", "struct", "chan", "else", "goto", "package", "switch", "const", "fallthrough",
"if", "range", "type", "continue", "for", "import", "return", "var",
"bool", "byte", "complex64", "complex128", "error", "float32", "float64", "int", "int8",
"int16", "int32", "int64", "rune", "string", "uint", "uint8", "uint16", "uint32", "uint64",
"uintptr", "true", "false", "iota", "nil", "append", "cap", "close", "complex", "copy",
"delete", "imag", "len", "make", "new", "panic", "print", "println", "real", "recover"
)
def quoteNameIfKeyword(value: String): String = {
ReservedWords.contains(value.trim) match {
case false => value
case true => s"${value}_"
}
}
def textToComment(text: Option[String]): String = {
text match {
case None => ""
case Some(v) => textToComment(GeneratorUtil.splitIntoLines(v))
}
}
def textToSingleLineComment(text: Option[String]): String = {
text match {
case None => ""
case Some(v) => s"// ${v.trim}\\n"
}
}
/**
* Returns comment, including a trailing newline
*/
def textToComment(text: Seq[String]): String = {
"/**\\n * " + text.mkString("\\n * ") + "\\n */\\n"
}
def wrapInQuotes(value: String) = {
s""""$value""""
}
/**
* returns a safe variable name with leading letter in upper case
*/
def publicName(name: String) = {
quoteNameIfKeyword(
safeName(
if (name == name.toUpperCase) {
initCap(splitIntoWords(name).map(_.toLowerCase)).mkString("")
} else {
initCap(snakeToCamelCase(name))
}
)
)
}
/**
* returns a safe variable name with leading letter in lower case
*/
def privateName(name: String): String = {
quoteNameIfKeyword(
initLowerCase(publicName(name))
)
}
def packageName(name: String): String = {
privateName(name).toLowerCase
}
def methodName(resource: Resource): String = {
publicName(resource.plural)
}
}
| mbryzek/apidoc-generator | go-generator/src/main/scala/models/GoUtil.scala | Scala | mit | 2,366 |
package mesosphere.marathon
package api.v2
import javax.servlet.http.HttpServletResponse
import akka.event.EventStream
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import mesosphere.AkkaUnitTest
import mesosphere.marathon.api.v2.json.Formats.TimestampFormat
import mesosphere.marathon.api.{ RestResource, TaskKiller, TestAuthFixture }
import mesosphere.marathon.core.appinfo.PodStatusService
import mesosphere.marathon.core.async.ExecutionContexts
import mesosphere.marathon.core.condition.Condition
import mesosphere.marathon.core.deployment.DeploymentPlan
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.instance.Instance.InstanceState
import mesosphere.marathon.core.plugin.PluginManager
import mesosphere.marathon.core.pod.impl.PodManagerImpl
import mesosphere.marathon.core.pod.{ MesosContainer, PodDefinition, PodManager }
import mesosphere.marathon.plugin.auth.{ Authenticator, Authorizer }
import mesosphere.marathon.raml.{ EnvVarSecret, ExecutorResources, FixedPodScalingPolicy, NetworkMode, Pod, PodSecretVolume, Raml, Resources }
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state.{ Timestamp, UnreachableStrategy }
import mesosphere.marathon.test.{ Mockito, SettableClock }
import mesosphere.marathon.util.SemanticVersion
import play.api.libs.json._
import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.concurrent.duration._
class PodsResourceTest extends AkkaUnitTest with Mockito {
// TODO(jdef) test findAll
// TODO(jdef) test status
// TODO(jdef) incorporate checks for firing pod events on C, U, D operations
val podSpecJson = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val podSpecJsonWithBridgeNetwork = """
| { "id": "/mypod", "networks": [ { "mode": "container/bridge" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val podSpecJsonWithContainerNetworking = """
| { "id": "/mypod", "networks": [ { "mode": "container" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val podSpecJsonWithExecutorResources = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ],
| "executorResources": { "cpus": 100, "mem": 100 } }
""".stripMargin
val podSpecJsonWithFileBasedSecret = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers":
| [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } },
| "volumeMounts": [ { "name": "vol", "mountPath": "mnt2" } ]
| }
| ],
| "volumes": [ { "name": "vol", "secret": "secret1" } ],
| "secrets": { "secret1": { "source": "/path/to/my/secret" } }
| }
""".stripMargin
val podSpecJsonWithEnvRefSecretOnContainerLevel = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers":
| [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } },
| "environment": { "vol": { "secret": "secret1" } }
| }
| ],
| "secrets": { "secret1": { "source": "/path/to/my/secret" } }
| }
""".stripMargin
val podSpecJsonWithEnvRefSecret = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers":
| [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } }
| }
| ],
| "environment": { "vol": { "secret": "secret1" } },
| "secrets": { "secret1": { "source": "/foo" } }
| }
""".stripMargin
"PodsResource" should {
"support pods" in {
val f = Fixture()
val response = f.podsResource.capability(f.auth.request)
response.getStatus should be(HttpServletResponse.SC_OK)
val body = Option(response.getEntity.asInstanceOf[String])
body should be(None)
}
"be able to create a simple single-container pod from docker image w/ shell command" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // should not be injected into host network spec
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.create(podSpecJson.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_CREATED)
val parsedResponse = Option(response.getEntity.asInstanceOf[String]).map(Json.parse)
parsedResponse should be (defined)
val maybePod = parsedResponse.map(_.as[Pod])
maybePod should be (defined) // validate that we DID get back a pod definition
val pod = maybePod.get
pod.networks(0).mode should be (NetworkMode.Host)
pod.networks(0).name should not be (defined)
pod.executorResources should be (defined) // validate that executor resources are defined
pod.executorResources.get should be (ExecutorResources()) // validate that the executor resources has default values
response.getMetadata.containsKey(RestResource.DeploymentHeader) should be(true)
}
}
"be able to create a simple single-container pod with bridge network" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // should not be injected into host network spec
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.create(podSpecJsonWithBridgeNetwork.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_CREATED)
val parsedResponse = Option(response.getEntity.asInstanceOf[String]).map(Json.parse)
parsedResponse should be (defined)
val maybePod = parsedResponse.map(_.as[Pod])
maybePod should be (defined) // validate that we DID get back a pod definition
val pod = maybePod.get
pod.networks(0).mode should be (NetworkMode.ContainerBridge)
pod.networks(0).name should not be (defined)
pod.executorResources should be (defined) // validate that executor resources are defined
pod.executorResources.get should be (ExecutorResources()) // validate that the executor resources has default values
response.getMetadata.containsKey(RestResource.DeploymentHeader) should be(true)
}
}
"The secrets feature is NOT enabled and create pod (that uses file base secrets) fails" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // should not be injected into host network spec
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.create(podSpecJsonWithFileBasedSecret.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(422)
response.getEntity.toString should include("Feature secrets is not enabled")
}
}
"The secrets feature is NOT enabled and create pod (that uses env secret refs) fails" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // should not be injected into host network spec
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.create(podSpecJsonWithEnvRefSecret.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(422)
response.getEntity.toString should include("Feature secrets is not enabled")
}
}
"The secrets feature is NOT enabled and create pod (that uses env secret refs on container level) fails" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // should not be injected into host network spec
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.create(podSpecJsonWithEnvRefSecretOnContainerLevel.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(422)
response.getEntity.toString should include("Feature secrets is not enabled")
}
}
"The secrets feature is enabled and create pod (that uses env secret refs on container level) succeeds" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--default_network_name", "blah", "--enable_features", Features.SECRETS)) // should not be injected into host network spec
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.create(podSpecJsonWithEnvRefSecretOnContainerLevel.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(201)
val parsedResponse = Option(response.getEntity.asInstanceOf[String]).map(Json.parse)
parsedResponse should be (defined)
val maybePod = parsedResponse.map(_.as[Pod])
maybePod should be (defined) // validate that we DID get back a pod definition
val pod = maybePod.get
pod.containers(0).environment("vol") shouldBe EnvVarSecret("secret1")
}
}
"The secrets feature is enabled and create pod (that uses file based secrets) succeeds" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--default_network_name", "blah", "--enable_features", Features.SECRETS)) // should not be injected into host network spec
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.create(podSpecJsonWithFileBasedSecret.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(201)
val parsedResponse = Option(response.getEntity.asInstanceOf[String]).map(Json.parse)
parsedResponse should be (defined)
val maybePod = parsedResponse.map(_.as[Pod])
maybePod should be (defined) // validate that we DID get back a pod definition
val pod = maybePod.get
pod.volumes(0) shouldBe PodSecretVolume("vol", "secret1")
}
}
"create a pod w/ container networking" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // required since network name is missing from JSON
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.create(podSpecJsonWithContainerNetworking.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_CREATED)
val parsedResponse = Option(response.getEntity.asInstanceOf[String]).map(Json.parse)
parsedResponse should be (defined)
val maybePod = parsedResponse.map(_.as[Pod])
maybePod should be (defined) // validate that we DID get back a pod definition
val pod = maybePod.get
pod.networks(0).mode should be (NetworkMode.Container)
pod.networks(0).name should be (Some("blah"))
pod.executorResources should be (defined) // validate that executor resources are defined
pod.executorResources.get should be (ExecutorResources()) // validate that the executor resources has default values
response.getMetadata.containsKey(RestResource.DeploymentHeader) should be(true)
}
}
"create a pod w/ container networking w/o default network name" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val ex = intercept[NormalizationException] {
f.podsResource.create(podSpecJsonWithContainerNetworking.getBytes(), force = false, f.auth.request)
}
ex.msg shouldBe NetworkNormalizationMessages.ContainerNetworkNameUnresolved
}
"create a pod with custom executor resource declaration" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.create(podSpecJsonWithExecutorResources.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_CREATED)
val parsedResponse = Option(response.getEntity.asInstanceOf[String]).map(Json.parse)
parsedResponse should be (defined)
val maybePod = parsedResponse.map(_.as[Pod])
maybePod should be (defined) // validate that we DID get back a pod definition
val pod = maybePod.get
pod.executorResources should be (defined) // validate that executor resources are defined
pod.executorResources.get.cpus should be (100)
pod.executorResources.get.mem should be (100)
// disk is not assigned in the posted pod definition, therefore this should be the default value 10
pod.executorResources.get.disk should be (10)
response.getMetadata.containsKey(RestResource.DeploymentHeader) should be(true)
}
}
"update a simple single-container pod from docker image w/ shell command" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
podSystem.update(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val postJson = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val response = f.podsResource.update("/mypod", postJson.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_OK)
val parsedResponse = Option(response.getEntity.asInstanceOf[String]).map(Json.parse)
parsedResponse should not be None
parsedResponse.map(_.as[Pod]) should not be None // validate that we DID get back a pod definition
response.getMetadata.containsKey(RestResource.DeploymentHeader) should be(true)
}
}
"save pod with more than one instance" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
podSystem.update(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val postJson = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ],
| "scaling": { "kind": "fixed", "instances": 2 }, "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val response = f.podsResource.update("/mypod", postJson.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_OK)
val parsedResponse = Option(response.getEntity.asInstanceOf[String]).map(Json.parse)
parsedResponse should not be None
val podOption = parsedResponse.map(_.as[Pod])
podOption should not be None // validate that we DID get back a pod definition
response.getMetadata.containsKey(RestResource.DeploymentHeader) should be(true)
podOption.get.scaling should not be None
podOption.get.scaling.get shouldBe a[FixedPodScalingPolicy]
podOption.get.scaling.get.asInstanceOf[FixedPodScalingPolicy].instances should be (2)
}
}
"delete a pod" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
podSystem.find(any).returns(Some(PodDefinition()))
podSystem.delete(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val response = f.podsResource.remove("/mypod", force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_ACCEPTED)
val body = Option(response.getEntity.asInstanceOf[String])
body should be(None)
response.getMetadata.containsKey(RestResource.DeploymentHeader) should be(true)
}
}
"lookup a specific pod, and that pod does not exist" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
podSystem.find(any).returns(Option.empty[PodDefinition])
val response = f.podsResource.find("/mypod", f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_NOT_FOUND)
val body = Option(response.getEntity.asInstanceOf[String])
body should not be None
body.foreach(_ should include("mypod does not exist"))
}
}
"Create a new pod with w/ Docker image and config.json" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--enable_features", "secrets"))
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val podJson =
"""
|{
| "id": "/pod",
| "containers": [{
| "name": "container0",
| "resources": {
| "cpus": 0.1,
| "mem": 32
| },
| "image": {
| "kind": "DOCKER",
| "id": "private/image",
| "pullConfig": {
| "secret": "pullConfigSecret"
| }
| },
| "exec": {
| "command": {
| "shell": "sleep 1"
| }
| }
| }],
| "secrets": {
| "pullConfigSecret": {
| "source": "/config"
| }
| }
|}
""".stripMargin
val response = f.podsResource.create(podJson.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_CREATED)
val parsedResponse = Option(response.getEntity.asInstanceOf[String]).map(Json.parse)
parsedResponse should be (defined)
val maybePod = parsedResponse.map(_.as[Pod])
maybePod should be (defined) // validate that we DID get back a pod definition
val pod = maybePod.get
pod.containers.headOption should be (defined)
val container = pod.containers.head
container.image should be (defined)
val image = container.image.get
image.pullConfig should be (defined)
val pullConfig = image.pullConfig.get
pullConfig.secret should be ("pullConfigSecret")
response.getMetadata.containsKey(RestResource.DeploymentHeader) should be(true)
}
}
"Creating a new pod with w/ AppC image and config.json should fail" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--enable_features", "secrets"))
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val podJson =
"""
|{
| "id": "/pod",
| "containers": [{
| "name": "container0",
| "resources": {
| "cpus": 0.1,
| "mem": 32
| },
| "image": {
| "kind": "APPC",
| "id": "private/image",
| "pullConfig": {
| "secret": "pullConfigSecret"
| }
| },
| "exec": {
| "command": {
| "shell": "sleep 1"
| }
| }
| }],
| "secrets": {
| "pullConfigSecret": {
| "source": "/config"
| }
| }
|}
""".stripMargin
val response = f.podsResource.create(podJson.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(422)
response.getEntity.toString should include("pullConfig is supported only with Docker images")
}
}
"Creating a new pod with w/ Docker image and non-existing secret should fail" in {
implicit val podSystem = mock[PodManager]
val f = Fixture(configArgs = Seq("--enable_features", "secrets"))
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val podJson =
"""
|{
| "id": "/pod",
| "containers": [{
| "name": "container0",
| "resources": {
| "cpus": 0.1,
| "mem": 32
| },
| "image": {
| "kind": "Docker",
| "id": "private/image",
| "pullConfig": {
| "secret": "pullConfigSecret"
| }
| },
| "exec": {
| "command": {
| "shell": "sleep 1"
| }
| }
| }],
| "secrets": {
| "pullConfigSecretA": {
| "source": "/config"
| }
| }
|}
""".stripMargin
val response = f.podsResource.create(podJson.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(422)
response.getEntity.toString should include("pullConfig.secret must refer to an existing secret")
}
}
"Create a new pod with w/ Docker image and config.json, but with secrets disabled" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
podSystem.create(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
val podJson =
"""
|{
| "id": "/pod",
| "containers": [{
| "name": "container0",
| "resources": {
| "cpus": 0.1,
| "mem": 32
| },
| "image": {
| "kind": "DOCKER",
| "id": "private/image",
| "pullConfig": {
| "secret": "pullConfigSecret"
| }
| },
| "exec": {
| "command": {
| "shell": "sleep 1"
| }
| }
| }]
|}
""".stripMargin
val response = f.podsResource.create(podJson.getBytes(), force = false, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(422)
response.getEntity.toString should include("must be empty")
response.getEntity.toString should include("Feature secrets is not enabled. Enable with --enable_features secrets)")
response.getEntity.toString should include("pullConfig.secret must refer to an existing secret")
}
}
"support versions" when {
implicit val ctx = ExecutionContexts.global
"there are no versions" when {
"list no versions" in {
val groupManager = mock[GroupManager]
groupManager.pod(any).returns(None)
implicit val podManager = PodManagerImpl(groupManager)
val f = Fixture()
val response = f.podsResource.versions("/id", f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_NOT_FOUND)
}
}
"return 404 when asking for a version" in {
val groupManager = mock[GroupManager]
groupManager.pod(any).returns(None)
groupManager.podVersions(any).returns(Source.empty)
groupManager.podVersion(any, any).returns(Future.successful(None))
implicit val podManager = PodManagerImpl(groupManager)
val f = Fixture()
val response = f.podsResource.version("/id", "2008", f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_NOT_FOUND)
response.getEntity.toString should be ("{\\"message\\":\\"Pod '/id' does not exist\\"}")
}
}
}
"there are versions" when {
import mesosphere.marathon.state.PathId._
val pod1 = PodDefinition("/id".toRootPath, containers = Seq(MesosContainer(name = "foo", resources = Resources())))
val pod2 = pod1.copy(version = pod1.version + 1.minute)
"list the available versions" in {
val groupManager = mock[GroupManager]
groupManager.pod(any).returns(Some(pod2))
groupManager.podVersions(pod1.id).returns(Source(Seq(pod1.version.toOffsetDateTime, pod2.version.toOffsetDateTime)))
implicit val podManager = PodManagerImpl(groupManager)
val f = Fixture()
val response = f.podsResource.versions("/id", f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_OK)
val timestamps = Json.fromJson[Seq[Timestamp]](Json.parse(response.getEntity.asInstanceOf[String])).get
timestamps should contain theSameElementsAs Seq(pod1.version, pod2.version)
}
}
"get a specific version" in {
val groupManager = mock[GroupManager]
groupManager.pod(any).returns(Some(pod2))
groupManager.podVersions(pod1.id).returns(Source(Seq(pod1.version.toOffsetDateTime, pod2.version.toOffsetDateTime)))
groupManager.podVersion(pod1.id, pod1.version.toOffsetDateTime).returns(Future.successful(Some(pod1)))
groupManager.podVersion(pod1.id, pod2.version.toOffsetDateTime).returns(Future.successful(Some(pod2)))
implicit val podManager = PodManagerImpl(groupManager)
val f = Fixture()
val response = f.podsResource.version("/id", pod1.version.toString, f.auth.request)
withClue(s"reponse body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_OK)
val pod = Raml.fromRaml(Json.fromJson[Pod](Json.parse(response.getEntity.asInstanceOf[String])).get)
pod should equal(pod1)
}
}
}
"killing" when {
"attempting to kill a single instance" in {
implicit val killer = mock[TaskKiller]
val f = Fixture()
val instance = Instance(
Instance.Id.forRunSpec("/id1".toRootPath), Instance.AgentInfo("", None, Nil),
InstanceState(Condition.Running, Timestamp.now(), Some(Timestamp.now()), None),
Map.empty,
runSpecVersion = Timestamp.now(),
unreachableStrategy = UnreachableStrategy.default()
)
killer.kill(any, any, any)(any) returns Future.successful(Seq(instance))
val response = f.podsResource.killInstance("/id", instance.instanceId.toString, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_OK)
val killed = Json.fromJson[Instance](Json.parse(response.getEntity.asInstanceOf[String]))
killed.get should equal(instance)
}
}
"attempting to kill multiple instances" in {
implicit val killer = mock[TaskKiller]
val instances = Seq(
Instance(Instance.Id.forRunSpec("/id1".toRootPath), Instance.AgentInfo("", None, Nil),
InstanceState(Condition.Running, Timestamp.now(), Some(Timestamp.now()), None), Map.empty,
runSpecVersion = Timestamp.now(),
unreachableStrategy = UnreachableStrategy.default()
),
Instance(Instance.Id.forRunSpec("/id1".toRootPath), Instance.AgentInfo("", None, Nil),
InstanceState(Condition.Running, Timestamp.now(), Some(Timestamp.now()), None), Map.empty,
runSpecVersion = Timestamp.now(),
unreachableStrategy = UnreachableStrategy.default()))
val f = Fixture()
killer.kill(any, any, any)(any) returns Future.successful(instances)
val response = f.podsResource.killInstances(
"/id",
Json.stringify(Json.toJson(instances.map(_.instanceId.toString))).getBytes, f.auth.request)
withClue(s"response body: ${response.getEntity}") {
response.getStatus should be(HttpServletResponse.SC_OK)
val killed = Json.fromJson[Seq[Instance]](Json.parse(response.getEntity.asInstanceOf[String]))
killed.get should contain theSameElementsAs instances
}
}
}
}
"authentication and authorization is handled correctly" when {
"delete fails if not authorized" when {
"delete a pod without auth access" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
podSystem.find(any).returns(Some(PodDefinition()))
podSystem.delete(any, eq(false)).returns(Future.successful(DeploymentPlan.empty))
f.auth.authorized = false
val response = f.podsResource.remove("/mypod", force = false, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_UNAUTHORIZED)
}
}
}
"access without authentication is denied" when {
class UnAuthorizedFixture(authorized: Boolean, authenticated: Boolean) {
implicit val podSystem = mock[PodManager]
val fixture = Fixture()
podSystem.findAll(any).returns(Seq.empty)
podSystem.find(any).returns(Some(PodDefinition()))
podSystem.delete(any, any).returns(Future.successful(DeploymentPlan.empty))
podSystem.ids().returns(Set.empty)
podSystem.version(any, any).returns(Future.successful(Some(PodDefinition())))
fixture.auth.authorized = authorized
fixture.auth.authenticated = authenticated
}
"An unauthorized but authenticated request" when {
val f = new UnAuthorizedFixture(authorized = false, authenticated = true).fixture
"create a pod" in {
val response = f.podsResource.create(podSpecJson.getBytes, force = false, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_UNAUTHORIZED)
}
"update a pod" in {
val response = f.podsResource.update("mypod", podSpecJson.getBytes, force = false, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_UNAUTHORIZED)
}
"find a pod" in {
val response = f.podsResource.find("mypod", f.auth.request)
response.getStatus should be(HttpServletResponse.SC_UNAUTHORIZED)
}
"remove a pod" in {
val response = f.podsResource.remove("mypod", force = false, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_UNAUTHORIZED)
}
"status of a pod" in {
val response = f.podsResource.remove("mypod", force = false, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_UNAUTHORIZED)
}
"versions of a pod" in {
val response = f.podsResource.versions("mypod", f.auth.request)
response.getStatus should be(HttpServletResponse.SC_UNAUTHORIZED)
}
"version of a pod" in {
val response = f.podsResource.version("mypod", Timestamp.now().toString, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_UNAUTHORIZED)
}
}
"An unauthenticated (and therefore unauthorized) request" when {
val f = new UnAuthorizedFixture(authorized = false, authenticated = false).fixture
"create a pod" in {
val response = f.podsResource.create(podSpecJson.getBytes, force = false, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_FORBIDDEN)
}
"update a pod" in {
val response = f.podsResource.update("mypod", podSpecJson.getBytes, force = false, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_FORBIDDEN)
}
"find a pod" in {
val response = f.podsResource.find("mypod", f.auth.request)
response.getStatus should be(HttpServletResponse.SC_FORBIDDEN)
}
"remove a pod" in {
val response = f.podsResource.remove("mypod", force = false, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_FORBIDDEN)
}
"status of a pod" in {
val response = f.podsResource.remove("mypod", force = false, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_FORBIDDEN)
}
"versions of a pod" in {
val response = f.podsResource.versions("mypod", f.auth.request)
response.getStatus should be(HttpServletResponse.SC_FORBIDDEN)
}
"version of a pod" in {
val response = f.podsResource.version("mypod", Timestamp.now().toString, f.auth.request)
response.getStatus should be(HttpServletResponse.SC_FORBIDDEN)
}
}
}
}
case class Fixture(
podsResource: PodsResource,
auth: TestAuthFixture,
podSystem: PodManager
)
object Fixture {
def apply(
configArgs: Seq[String] = Seq.empty[String],
auth: TestAuthFixture = new TestAuthFixture()
)(implicit
podSystem: PodManager = mock[PodManager],
podStatusService: PodStatusService = mock[PodStatusService],
killService: TaskKiller = mock[TaskKiller],
eventBus: EventStream = mock[EventStream],
mat: Materializer = mock[Materializer],
scheduler: MarathonScheduler = mock[MarathonScheduler]): Fixture = {
val config = AllConf.withTestConfig(configArgs: _*)
implicit val authz: Authorizer = auth.auth
implicit val authn: Authenticator = auth.auth
implicit val clock = new SettableClock()
implicit val pluginManager: PluginManager = PluginManager.None
scheduler.mesosMasterVersion() returns Some(SemanticVersion(0, 0, 0))
new Fixture(
new PodsResource(config),
auth,
podSystem
)
}
}
}
| Caerostris/marathon | src/test/scala/mesosphere/marathon/api/v2/PodsResourceTest.scala | Scala | apache-2.0 | 37,753 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.io.{File, IOException, ObjectInputStream, ObjectOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap}
import scala.reflect.ClassTag
import com.esotericsoftware.kryo.KryoException
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.{FileSplit, TextInputFormat}
import org.apache.spark._
import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
import org.apache.spark.rdd.RDDSuiteUtils._
import org.apache.spark.util.{ThreadUtils, Utils}
class RDDSuite extends SparkFunSuite with SharedSparkContext {
var tempDir: File = _
override def beforeAll(): Unit = {
super.beforeAll()
tempDir = Utils.createTempDir()
}
override def afterAll(): Unit = {
try {
Utils.deleteRecursively(tempDir)
} finally {
super.afterAll()
}
}
test("basic operations") {
val nums = sc.makeRDD(Array(1, 2, 3, 4), 2)
assert(nums.getNumPartitions === 2)
assert(nums.collect().toList === List(1, 2, 3, 4))
assert(nums.toLocalIterator.toList === List(1, 2, 3, 4))
val dups = sc.makeRDD(Array(1, 1, 2, 2, 3, 3, 4, 4), 2)
assert(dups.distinct().count() === 4)
assert(dups.distinct().count === 4) // Can distinct and count be called without parentheses?
assert(dups.distinct().collect === dups.distinct().collect)
assert(dups.distinct(2).collect === dups.distinct().collect)
assert(nums.reduce(_ + _) === 10)
assert(nums.fold(0)(_ + _) === 10)
assert(nums.map(_.toString).collect().toList === List("1", "2", "3", "4"))
assert(nums.filter(_ > 2).collect().toList === List(3, 4))
assert(nums.flatMap(x => 1 to x).collect().toList === List(1, 1, 2, 1, 2, 3, 1, 2, 3, 4))
assert(nums.union(nums).collect().toList === List(1, 2, 3, 4, 1, 2, 3, 4))
assert(nums.glom().map(_.toList).collect().toList === List(List(1, 2), List(3, 4)))
assert(nums.collect({ case i if i >= 3 => i.toString }).collect().toList === List("3", "4"))
assert(nums.keyBy(_.toString).collect().toList === List(("1", 1), ("2", 2), ("3", 3), ("4", 4)))
assert(!nums.isEmpty())
assert(nums.max() === 4)
assert(nums.min() === 1)
val partitionSums = nums.mapPartitions(iter => Iterator(iter.sum))
assert(partitionSums.collect().toList === List(3, 7))
val partitionSumsWithSplit = nums.mapPartitionsWithIndex {
case(split, iter) => Iterator((split, iter.sum))
}
assert(partitionSumsWithSplit.collect().toList === List((0, 3), (1, 7)))
val partitionSumsWithIndex = nums.mapPartitionsWithIndex {
case(split, iter) => Iterator((split, iter.sum))
}
assert(partitionSumsWithIndex.collect().toList === List((0, 3), (1, 7)))
intercept[UnsupportedOperationException] {
nums.filter(_ > 5).reduce(_ + _)
}
}
test("serialization") {
val empty = new EmptyRDD[Int](sc)
val serial = Utils.serialize(empty)
val deserial: EmptyRDD[Int] = Utils.deserialize(serial)
assert(!deserial.toString().isEmpty())
}
test("countApproxDistinct") {
def error(est: Long, size: Long): Double = math.abs(est - size) / size.toDouble
val size = 1000
val uniformDistro = for (i <- 1 to 5000) yield i % size
val simpleRdd = sc.makeRDD(uniformDistro, 10)
assert(error(simpleRdd.countApproxDistinct(8, 0), size) < 0.2)
assert(error(simpleRdd.countApproxDistinct(12, 0), size) < 0.1)
assert(error(simpleRdd.countApproxDistinct(0.02), size) < 0.1)
assert(error(simpleRdd.countApproxDistinct(0.5), size) < 0.22)
}
test("SparkContext.union") {
val nums = sc.makeRDD(Array(1, 2, 3, 4), 2)
assert(sc.union(nums).collect().toList === List(1, 2, 3, 4))
assert(sc.union(nums, nums).collect().toList === List(1, 2, 3, 4, 1, 2, 3, 4))
assert(sc.union(Seq(nums)).collect().toList === List(1, 2, 3, 4))
assert(sc.union(Seq(nums, nums)).collect().toList === List(1, 2, 3, 4, 1, 2, 3, 4))
}
test("SparkContext.union parallel partition listing") {
val nums1 = sc.makeRDD(Array(1, 2, 3, 4), 2)
val nums2 = sc.makeRDD(Array(5, 6, 7, 8), 2)
val serialUnion = sc.union(nums1, nums2)
val expected = serialUnion.collect().toList
assert(serialUnion.asInstanceOf[UnionRDD[Int]].isPartitionListingParallel === false)
sc.conf.set("spark.rdd.parallelListingThreshold", "1")
val parallelUnion = sc.union(nums1, nums2)
val actual = parallelUnion.collect().toList
sc.conf.remove("spark.rdd.parallelListingThreshold")
assert(parallelUnion.asInstanceOf[UnionRDD[Int]].isPartitionListingParallel === true)
assert(expected === actual)
}
test("SparkContext.union creates UnionRDD if at least one RDD has no partitioner") {
val rddWithPartitioner = sc.parallelize(Seq(1 -> true)).partitionBy(new HashPartitioner(1))
val rddWithNoPartitioner = sc.parallelize(Seq(2 -> true))
val unionRdd = sc.union(rddWithNoPartitioner, rddWithPartitioner)
assert(unionRdd.isInstanceOf[UnionRDD[_]])
}
test("SparkContext.union creates PartitionAwareUnionRDD if all RDDs have partitioners") {
val rddWithPartitioner = sc.parallelize(Seq(1 -> true)).partitionBy(new HashPartitioner(1))
val unionRdd = sc.union(rddWithPartitioner, rddWithPartitioner)
assert(unionRdd.isInstanceOf[PartitionerAwareUnionRDD[_]])
}
test("PartitionAwareUnionRDD raises exception if at least one RDD has no partitioner") {
val rddWithPartitioner = sc.parallelize(Seq(1 -> true)).partitionBy(new HashPartitioner(1))
val rddWithNoPartitioner = sc.parallelize(Seq(2 -> true))
intercept[IllegalArgumentException] {
new PartitionerAwareUnionRDD(sc, Seq(rddWithNoPartitioner, rddWithPartitioner))
}
}
test("partitioner aware union") {
def makeRDDWithPartitioner(seq: Seq[Int]): RDD[Int] = {
sc.makeRDD(seq, 1)
.map(x => (x, null))
.partitionBy(new HashPartitioner(2))
.mapPartitions(_.map(_._1), true)
}
val nums1 = makeRDDWithPartitioner(1 to 4)
val nums2 = makeRDDWithPartitioner(5 to 8)
assert(nums1.partitioner == nums2.partitioner)
assert(new PartitionerAwareUnionRDD(sc, Seq(nums1)).collect().toSet === Set(1, 2, 3, 4))
val union = new PartitionerAwareUnionRDD(sc, Seq(nums1, nums2))
assert(union.collect().toSet === Set(1, 2, 3, 4, 5, 6, 7, 8))
val nums1Parts = nums1.collectPartitions()
val nums2Parts = nums2.collectPartitions()
val unionParts = union.collectPartitions()
assert(nums1Parts.length === 2)
assert(nums2Parts.length === 2)
assert(unionParts.length === 2)
assert((nums1Parts(0) ++ nums2Parts(0)).toList === unionParts(0).toList)
assert((nums1Parts(1) ++ nums2Parts(1)).toList === unionParts(1).toList)
assert(union.partitioner === nums1.partitioner)
}
test("UnionRDD partition serialized size should be small") {
val largeVariable = new Array[Byte](1000 * 1000)
val rdd1 = sc.parallelize(1 to 10, 2).map(i => largeVariable.length)
val rdd2 = sc.parallelize(1 to 10, 3)
val ser = SparkEnv.get.closureSerializer.newInstance()
val union = rdd1.union(rdd2)
// The UnionRDD itself should be large, but each individual partition should be small.
assert(ser.serialize(union).limit() > 2000)
assert(ser.serialize(union.partitions.head).limit() < 2000)
}
test("aggregate") {
val pairs = sc.makeRDD(Array(("a", 1), ("b", 2), ("a", 2), ("c", 5), ("a", 3)))
type StringMap = HashMap[String, Int]
val emptyMap = new StringMap {
override def default(key: String): Int = 0
}
val mergeElement: (StringMap, (String, Int)) => StringMap = (map, pair) => {
map(pair._1) += pair._2
map
}
val mergeMaps: (StringMap, StringMap) => StringMap = (map1, map2) => {
for ((key, value) <- map2) {
map1(key) += value
}
map1
}
val result = pairs.aggregate(emptyMap)(mergeElement, mergeMaps)
assert(result.toSet === Set(("a", 6), ("b", 2), ("c", 5)))
}
test("treeAggregate") {
val rdd = sc.makeRDD(-1000 until 1000, 10)
def seqOp: (Long, Int) => Long = (c: Long, x: Int) => c + x
def combOp: (Long, Long) => Long = (c1: Long, c2: Long) => c1 + c2
for (depth <- 1 until 10) {
val sum = rdd.treeAggregate(0L)(seqOp, combOp, depth)
assert(sum === -1000L)
}
}
test("treeReduce") {
val rdd = sc.makeRDD(-1000 until 1000, 10)
for (depth <- 1 until 10) {
val sum = rdd.treeReduce(_ + _, depth)
assert(sum === -1000)
}
}
test("basic caching") {
val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
assert(rdd.collect().toList === List(1, 2, 3, 4))
assert(rdd.collect().toList === List(1, 2, 3, 4))
assert(rdd.collect().toList === List(1, 2, 3, 4))
}
test("caching with failures") {
val onlySplit = new Partition { override def index: Int = 0 }
var shouldFail = true
val rdd = new RDD[Int](sc, Nil) {
override def getPartitions: Array[Partition] = Array(onlySplit)
override val getDependencies = List[Dependency[_]]()
override def compute(split: Partition, context: TaskContext): Iterator[Int] = {
throw new Exception("injected failure")
}
}.cache()
val thrown = intercept[Exception]{
rdd.collect()
}
assert(thrown.getMessage.contains("injected failure"))
}
test("empty RDD") {
val empty = new EmptyRDD[Int](sc)
assert(empty.count === 0)
assert(empty.collect().size === 0)
val thrown = intercept[UnsupportedOperationException]{
empty.reduce(_ + _)
}
assert(thrown.getMessage.contains("empty"))
val emptyKv = new EmptyRDD[(Int, Int)](sc)
val rdd = sc.parallelize(1 to 2, 2).map(x => (x, x))
assert(rdd.join(emptyKv).collect().size === 0)
assert(rdd.rightOuterJoin(emptyKv).collect().size === 0)
assert(rdd.leftOuterJoin(emptyKv).collect().size === 2)
assert(rdd.fullOuterJoin(emptyKv).collect().size === 2)
assert(rdd.cogroup(emptyKv).collect().size === 2)
assert(rdd.union(emptyKv).collect().size === 2)
}
test("repartitioned RDDs") {
val data = sc.parallelize(1 to 1000, 10)
intercept[IllegalArgumentException] {
data.repartition(0)
}
// Coalesce partitions
val repartitioned1 = data.repartition(2)
assert(repartitioned1.partitions.size == 2)
val partitions1 = repartitioned1.glom().collect()
assert(partitions1(0).length > 0)
assert(partitions1(1).length > 0)
assert(repartitioned1.collect().toSet === (1 to 1000).toSet)
// Split partitions
val repartitioned2 = data.repartition(20)
assert(repartitioned2.partitions.size == 20)
val partitions2 = repartitioned2.glom().collect()
assert(partitions2(0).length > 0)
assert(partitions2(19).length > 0)
assert(repartitioned2.collect().toSet === (1 to 1000).toSet)
}
test("repartitioned RDDs perform load balancing") {
// Coalesce partitions
val input = Array.fill(1000)(1)
val initialPartitions = 10
val data = sc.parallelize(input, initialPartitions)
val repartitioned1 = data.repartition(2)
assert(repartitioned1.partitions.size == 2)
val partitions1 = repartitioned1.glom().collect()
// some noise in balancing is allowed due to randomization
assert(math.abs(partitions1(0).length - 500) < initialPartitions)
assert(math.abs(partitions1(1).length - 500) < initialPartitions)
assert(repartitioned1.collect() === input)
def testSplitPartitions(input: Seq[Int], initialPartitions: Int, finalPartitions: Int) {
val data = sc.parallelize(input, initialPartitions)
val repartitioned = data.repartition(finalPartitions)
assert(repartitioned.partitions.size === finalPartitions)
val partitions = repartitioned.glom().collect()
// assert all elements are present
assert(repartitioned.collect().sortWith(_ > _).toSeq === input.toSeq.sortWith(_ > _).toSeq)
// assert no bucket is overloaded
for (partition <- partitions) {
val avg = input.size / finalPartitions
val maxPossible = avg + initialPartitions
assert(partition.length <= maxPossible)
}
}
testSplitPartitions(Array.fill(100)(1), 10, 20)
testSplitPartitions(Array.fill(10000)(1) ++ Array.fill(10000)(2), 20, 100)
}
test("coalesced RDDs") {
val data = sc.parallelize(1 to 10, 10)
intercept[IllegalArgumentException] {
data.coalesce(0)
}
val coalesced1 = data.coalesce(2)
assert(coalesced1.collect().toList === (1 to 10).toList)
assert(coalesced1.glom().collect().map(_.toList).toList ===
List(List(1, 2, 3, 4, 5), List(6, 7, 8, 9, 10)))
// Check that the narrow dependency is also specified correctly
assert(coalesced1.dependencies.head.asInstanceOf[NarrowDependency[_]].getParents(0).toList ===
List(0, 1, 2, 3, 4))
assert(coalesced1.dependencies.head.asInstanceOf[NarrowDependency[_]].getParents(1).toList ===
List(5, 6, 7, 8, 9))
val coalesced2 = data.coalesce(3)
assert(coalesced2.collect().toList === (1 to 10).toList)
assert(coalesced2.glom().collect().map(_.toList).toList ===
List(List(1, 2, 3), List(4, 5, 6), List(7, 8, 9, 10)))
val coalesced3 = data.coalesce(10)
assert(coalesced3.collect().toList === (1 to 10).toList)
assert(coalesced3.glom().collect().map(_.toList).toList ===
(1 to 10).map(x => List(x)).toList)
// If we try to coalesce into more partitions than the original RDD, it should just
// keep the original number of partitions.
val coalesced4 = data.coalesce(20)
assert(coalesced4.collect().toList === (1 to 10).toList)
assert(coalesced4.glom().collect().map(_.toList).toList ===
(1 to 10).map(x => List(x)).toList)
// we can optionally shuffle to keep the upstream parallel
val coalesced5 = data.coalesce(1, shuffle = true)
val isEquals = coalesced5.dependencies.head.rdd.dependencies.head.rdd.
asInstanceOf[ShuffledRDD[_, _, _]] != null
assert(isEquals)
// when shuffling, we can increase the number of partitions
val coalesced6 = data.coalesce(20, shuffle = true)
assert(coalesced6.partitions.size === 20)
assert(coalesced6.collect().toSet === (1 to 10).toSet)
}
test("coalesced RDDs with locality") {
val data3 = sc.makeRDD(List((1, List("a", "c")), (2, List("a", "b", "c")), (3, List("b"))))
val coal3 = data3.coalesce(3)
val list3 = coal3.partitions.flatMap(_.asInstanceOf[CoalescedRDDPartition].preferredLocation)
assert(list3.sorted === Array("a", "b", "c"), "Locality preferences are dropped")
// RDD with locality preferences spread (non-randomly) over 6 machines, m0 through m5
val data = sc.makeRDD((1 to 9).map(i => (i, (i to (i + 2)).map{ j => "m" + (j%6)})))
val coalesced1 = data.coalesce(3)
assert(coalesced1.collect().toList.sorted === (1 to 9).toList, "Data got *lost* in coalescing")
val splits = coalesced1.glom().collect().map(_.toList).toList
assert(splits.length === 3, "Supposed to coalesce to 3 but got " + splits.length)
assert(splits.forall(_.length >= 1) === true, "Some partitions were empty")
// If we try to coalesce into more partitions than the original RDD, it should just
// keep the original number of partitions.
val coalesced4 = data.coalesce(20)
val listOfLists = coalesced4.glom().collect().map(_.toList).toList
val sortedList = listOfLists.sortWith{ (x, y) => !x.isEmpty && (y.isEmpty || (x(0) < y(0))) }
assert(sortedList === (1 to 9).
map{x => List(x)}.toList, "Tried coalescing 9 partitions to 20 but didn't get 9 back")
}
test("coalesced RDDs with partial locality") {
// Make an RDD that has some locality preferences and some without. This can happen
// with UnionRDD
val data = sc.makeRDD((1 to 9).map(i => {
if (i > 4) {
(i, (i to (i + 2)).map { j => "m" + (j % 6) })
} else {
(i, Vector())
}
}))
val coalesced1 = data.coalesce(3)
assert(coalesced1.collect().toList.sorted === (1 to 9).toList, "Data got *lost* in coalescing")
val splits = coalesced1.glom().collect().map(_.toList).toList
assert(splits.length === 3, "Supposed to coalesce to 3 but got " + splits.length)
assert(splits.forall(_.length >= 1) === true, "Some partitions were empty")
// If we try to coalesce into more partitions than the original RDD, it should just
// keep the original number of partitions.
val coalesced4 = data.coalesce(20)
val listOfLists = coalesced4.glom().collect().map(_.toList).toList
val sortedList = listOfLists.sortWith{ (x, y) => !x.isEmpty && (y.isEmpty || (x(0) < y(0))) }
assert(sortedList === (1 to 9).
map{x => List(x)}.toList, "Tried coalescing 9 partitions to 20 but didn't get 9 back")
}
test("coalesced RDDs with locality, large scale (10K partitions)") {
// large scale experiment
import collection.mutable
val partitions = 10000
val numMachines = 50
val machines = mutable.ListBuffer[String]()
(1 to numMachines).foreach(machines += "m" + _)
val rnd = scala.util.Random
for (seed <- 1 to 5) {
rnd.setSeed(seed)
val blocks = (1 to partitions).map { i =>
(i, Array.fill(3)(machines(rnd.nextInt(machines.size))).toList)
}
val data2 = sc.makeRDD(blocks)
val coalesced2 = data2.coalesce(numMachines * 2)
// test that you get over 90% locality in each group
val minLocality = coalesced2.partitions
.map(part => part.asInstanceOf[CoalescedRDDPartition].localFraction)
.foldLeft(1.0)((perc, loc) => math.min(perc, loc))
assert(minLocality >= 0.90, "Expected 90% locality but got " +
(minLocality * 100.0).toInt + "%")
// test that the groups are load balanced with 100 +/- 20 elements in each
val maxImbalance = coalesced2.partitions
.map(part => part.asInstanceOf[CoalescedRDDPartition].parents.size)
.foldLeft(0)((dev, curr) => math.max(math.abs(100 - curr), dev))
assert(maxImbalance <= 20, "Expected 100 +/- 20 per partition, but got " + maxImbalance)
val data3 = sc.makeRDD(blocks).map(i => i * 2) // derived RDD to test *current* pref locs
val coalesced3 = data3.coalesce(numMachines * 2)
val minLocality2 = coalesced3.partitions
.map(part => part.asInstanceOf[CoalescedRDDPartition].localFraction)
.foldLeft(1.0)((perc, loc) => math.min(perc, loc))
assert(minLocality2 >= 0.90, "Expected 90% locality for derived RDD but got " +
(minLocality2 * 100.0).toInt + "%")
}
}
test("coalesced RDDs with partial locality, large scale (10K partitions)") {
// large scale experiment
import collection.mutable
val halfpartitions = 5000
val partitions = 10000
val numMachines = 50
val machines = mutable.ListBuffer[String]()
(1 to numMachines).foreach(machines += "m" + _)
val rnd = scala.util.Random
for (seed <- 1 to 5) {
rnd.setSeed(seed)
val firstBlocks = (1 to halfpartitions).map { i =>
(i, Array.fill(3)(machines(rnd.nextInt(machines.size))).toList)
}
val blocksNoLocality = (halfpartitions + 1 to partitions).map { i =>
(i, List())
}
val blocks = firstBlocks ++ blocksNoLocality
val data2 = sc.makeRDD(blocks)
// first try going to same number of partitions
val coalesced2 = data2.coalesce(partitions)
// test that we have 10000 partitions
assert(coalesced2.partitions.size == 10000, "Expected 10000 partitions, but got " +
coalesced2.partitions.size)
// test that we have 100 partitions
val coalesced3 = data2.coalesce(numMachines * 2)
assert(coalesced3.partitions.size == 100, "Expected 100 partitions, but got " +
coalesced3.partitions.size)
// test that the groups are load balanced with 100 +/- 20 elements in each
val maxImbalance3 = coalesced3.partitions
.map(part => part.asInstanceOf[CoalescedRDDPartition].parents.size)
.foldLeft(0)((dev, curr) => math.max(math.abs(100 - curr), dev))
assert(maxImbalance3 <= 20, "Expected 100 +/- 20 per partition, but got " + maxImbalance3)
}
}
// Test for SPARK-2412 -- ensure that the second pass of the algorithm does not throw an exception
test("coalesced RDDs with locality, fail first pass") {
val initialPartitions = 1000
val targetLen = 50
val couponCount = 2 * (math.log(targetLen)*targetLen + targetLen + 0.5).toInt // = 492
val blocks = (1 to initialPartitions).map { i =>
(i, List(if (i > couponCount) "m2" else "m1"))
}
val data = sc.makeRDD(blocks)
val coalesced = data.coalesce(targetLen)
assert(coalesced.partitions.length == targetLen)
}
test("zipped RDDs") {
val nums = sc.makeRDD(Array(1, 2, 3, 4), 2)
val zipped = nums.zip(nums.map(_ + 1.0))
assert(zipped.glom().map(_.toList).collect().toList ===
List(List((1, 2.0), (2, 3.0)), List((3, 4.0), (4, 5.0))))
intercept[IllegalArgumentException] {
nums.zip(sc.parallelize(1 to 4, 1)).collect()
}
intercept[SparkException] {
nums.zip(sc.parallelize(1 to 5, 2)).collect()
}
}
test("partition pruning") {
val data = sc.parallelize(1 to 10, 10)
// Note that split number starts from 0, so > 8 means only 10th partition left.
val prunedRdd = new PartitionPruningRDD(data, splitNum => splitNum > 8)
assert(prunedRdd.partitions.size === 1)
val prunedData = prunedRdd.collect()
assert(prunedData.size === 1)
assert(prunedData(0) === 10)
}
test("collect large number of empty partitions") {
// Regression test for SPARK-4019
assert(sc.makeRDD(0 until 10, 1000).repartition(2001).collect().toSet === (0 until 10).toSet)
}
test("take") {
var nums = sc.makeRDD(Range(1, 1000), 1)
assert(nums.take(0).size === 0)
assert(nums.take(1) === Array(1))
assert(nums.take(3) === Array(1, 2, 3))
assert(nums.take(500) === (1 to 500).toArray)
assert(nums.take(501) === (1 to 501).toArray)
assert(nums.take(999) === (1 to 999).toArray)
assert(nums.take(1000) === (1 to 999).toArray)
nums = sc.makeRDD(Range(1, 1000), 2)
assert(nums.take(0).size === 0)
assert(nums.take(1) === Array(1))
assert(nums.take(3) === Array(1, 2, 3))
assert(nums.take(500) === (1 to 500).toArray)
assert(nums.take(501) === (1 to 501).toArray)
assert(nums.take(999) === (1 to 999).toArray)
assert(nums.take(1000) === (1 to 999).toArray)
nums = sc.makeRDD(Range(1, 1000), 100)
assert(nums.take(0).size === 0)
assert(nums.take(1) === Array(1))
assert(nums.take(3) === Array(1, 2, 3))
assert(nums.take(500) === (1 to 500).toArray)
assert(nums.take(501) === (1 to 501).toArray)
assert(nums.take(999) === (1 to 999).toArray)
assert(nums.take(1000) === (1 to 999).toArray)
nums = sc.makeRDD(Range(1, 1000), 1000)
assert(nums.take(0).size === 0)
assert(nums.take(1) === Array(1))
assert(nums.take(3) === Array(1, 2, 3))
assert(nums.take(500) === (1 to 500).toArray)
assert(nums.take(501) === (1 to 501).toArray)
assert(nums.take(999) === (1 to 999).toArray)
assert(nums.take(1000) === (1 to 999).toArray)
nums = sc.parallelize(1 to 2, 2)
assert(nums.take(2147483638).size === 2)
assert(nums.takeAsync(2147483638).get.size === 2)
}
test("top with predefined ordering") {
val nums = Array.range(1, 100000)
val ints = sc.makeRDD(scala.util.Random.shuffle(nums), 2)
val topK = ints.top(5)
assert(topK.size === 5)
assert(topK === nums.reverse.take(5))
}
test("top with custom ordering") {
val words = Vector("a", "b", "c", "d")
implicit val ord = implicitly[Ordering[String]].reverse
val rdd = sc.makeRDD(words, 2)
val topK = rdd.top(2)
assert(topK.size === 2)
assert(topK.sorted === Array("b", "a"))
}
test("takeOrdered with predefined ordering") {
val nums = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val rdd = sc.makeRDD(nums, 2)
val sortedLowerK = rdd.takeOrdered(5)
assert(sortedLowerK.size === 5)
assert(sortedLowerK === Array(1, 2, 3, 4, 5))
}
test("takeOrdered with limit 0") {
val nums = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val rdd = sc.makeRDD(nums, 2)
val sortedLowerK = rdd.takeOrdered(0)
assert(sortedLowerK.size === 0)
}
test("takeOrdered with custom ordering") {
val nums = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
implicit val ord = implicitly[Ordering[Int]].reverse
val rdd = sc.makeRDD(nums, 2)
val sortedTopK = rdd.takeOrdered(5)
assert(sortedTopK.size === 5)
assert(sortedTopK === Array(10, 9, 8, 7, 6))
assert(sortedTopK === nums.sorted(ord).take(5))
}
test("isEmpty") {
assert(sc.emptyRDD.isEmpty())
assert(sc.parallelize(Seq[Int]()).isEmpty())
assert(!sc.parallelize(Seq(1)).isEmpty())
assert(sc.parallelize(Seq(1, 2, 3), 3).filter(_ < 0).isEmpty())
assert(!sc.parallelize(Seq(1, 2, 3), 3).filter(_ > 1).isEmpty())
}
test("sample preserves partitioner") {
val partitioner = new HashPartitioner(2)
val rdd = sc.parallelize(Seq((0, 1), (2, 3))).partitionBy(partitioner)
for (withReplacement <- Seq(true, false)) {
val sampled = rdd.sample(withReplacement, 1.0)
assert(sampled.partitioner === rdd.partitioner)
}
}
test("takeSample") {
val n = 1000000
val data = sc.parallelize(1 to n, 2)
for (num <- List(5, 20, 100)) {
val sample = data.takeSample(withReplacement = false, num = num)
assert(sample.size === num) // Got exactly num elements
assert(sample.toSet.size === num) // Elements are distinct
assert(sample.forall(x => 1 <= x && x <= n), s"elements not in [1, $n]")
}
for (seed <- 1 to 5) {
val sample = data.takeSample(withReplacement = false, 20, seed)
assert(sample.size === 20) // Got exactly 20 elements
assert(sample.toSet.size === 20) // Elements are distinct
assert(sample.forall(x => 1 <= x && x <= n), s"elements not in [1, $n]")
}
for (seed <- 1 to 5) {
val sample = data.takeSample(withReplacement = false, 100, seed)
assert(sample.size === 100) // Got only 100 elements
assert(sample.toSet.size === 100) // Elements are distinct
assert(sample.forall(x => 1 <= x && x <= n), s"elements not in [1, $n]")
}
for (seed <- 1 to 5) {
val sample = data.takeSample(withReplacement = true, 20, seed)
assert(sample.size === 20) // Got exactly 20 elements
assert(sample.forall(x => 1 <= x && x <= n), s"elements not in [1, $n]")
}
{
val sample = data.takeSample(withReplacement = true, num = 20)
assert(sample.size === 20) // Got exactly 20 elements
assert(sample.forall(x => 1 <= x && x <= n), s"elements not in [1, $n]")
}
{
val sample = data.takeSample(withReplacement = true, num = n)
assert(sample.size === n) // Got exactly n elements
// Chance of getting all distinct elements is astronomically low, so test we got < n
assert(sample.toSet.size < n, "sampling with replacement returned all distinct elements")
assert(sample.forall(x => 1 <= x && x <= n), s"elements not in [1, $n]")
}
for (seed <- 1 to 5) {
val sample = data.takeSample(withReplacement = true, n, seed)
assert(sample.size === n) // Got exactly n elements
// Chance of getting all distinct elements is astronomically low, so test we got < n
assert(sample.toSet.size < n, "sampling with replacement returned all distinct elements")
}
for (seed <- 1 to 5) {
val sample = data.takeSample(withReplacement = true, 2 * n, seed)
assert(sample.size === 2 * n) // Got exactly 2 * n elements
// Chance of getting all distinct elements is still quite low, so test we got < n
assert(sample.toSet.size < n, "sampling with replacement returned all distinct elements")
}
}
test("takeSample from an empty rdd") {
val emptySet = sc.parallelize(Seq.empty[Int], 2)
val sample = emptySet.takeSample(false, 20, 1)
assert(sample.length === 0)
}
test("randomSplit") {
val n = 600
val data = sc.parallelize(1 to n, 2)
for(seed <- 1 to 5) {
val splits = data.randomSplit(Array(1.0, 2.0, 3.0), seed)
assert(splits.size == 3, "wrong number of splits")
assert(splits.flatMap(_.collect()).sorted.toList == data.collect().toList,
"incomplete or wrong split")
val s = splits.map(_.count())
assert(math.abs(s(0) - 100) < 50) // std = 9.13
assert(math.abs(s(1) - 200) < 50) // std = 11.55
assert(math.abs(s(2) - 300) < 50) // std = 12.25
}
}
test("runJob on an invalid partition") {
intercept[IllegalArgumentException] {
sc.runJob(sc.parallelize(1 to 10, 2), {iter: Iterator[Int] => iter.size}, Seq(0, 1, 2))
}
}
test("sort an empty RDD") {
val data = sc.emptyRDD[Int]
assert(data.sortBy(x => x).collect() === Array.empty)
}
test("sortByKey") {
val data = sc.parallelize(Seq("5|50|A", "4|60|C", "6|40|B"))
val col1 = Array("4|60|C", "5|50|A", "6|40|B")
val col2 = Array("6|40|B", "5|50|A", "4|60|C")
val col3 = Array("5|50|A", "6|40|B", "4|60|C")
assert(data.sortBy(_.split("\\\\|")(0)).collect() === col1)
assert(data.sortBy(_.split("\\\\|")(1)).collect() === col2)
assert(data.sortBy(_.split("\\\\|")(2)).collect() === col3)
}
test("sortByKey ascending parameter") {
val data = sc.parallelize(Seq("5|50|A", "4|60|C", "6|40|B"))
val asc = Array("4|60|C", "5|50|A", "6|40|B")
val desc = Array("6|40|B", "5|50|A", "4|60|C")
assert(data.sortBy(_.split("\\\\|")(0), true).collect() === asc)
assert(data.sortBy(_.split("\\\\|")(0), false).collect() === desc)
}
test("sortByKey with explicit ordering") {
val data = sc.parallelize(Seq("Bob|Smith|50",
"Jane|Smith|40",
"Thomas|Williams|30",
"Karen|Williams|60"))
val ageOrdered = Array("Thomas|Williams|30",
"Jane|Smith|40",
"Bob|Smith|50",
"Karen|Williams|60")
// last name, then first name
val nameOrdered = Array("Bob|Smith|50",
"Jane|Smith|40",
"Karen|Williams|60",
"Thomas|Williams|30")
val parse = (s: String) => {
val split = s.split("\\\\|")
Person(split(0), split(1), split(2).toInt)
}
import scala.reflect.classTag
assert(data.sortBy(parse, true, 2)(AgeOrdering, classTag[Person]).collect() === ageOrdered)
assert(data.sortBy(parse, true, 2)(NameOrdering, classTag[Person]).collect() === nameOrdered)
}
test("repartitionAndSortWithinPartitions") {
val data = sc.parallelize(Seq((0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)), 2)
val partitioner = new Partitioner {
def numPartitions: Int = 2
def getPartition(key: Any): Int = key.asInstanceOf[Int] % 2
}
val repartitioned = data.repartitionAndSortWithinPartitions(partitioner)
val partitions = repartitioned.glom().collect()
assert(partitions(0) === Seq((0, 5), (0, 8), (2, 6)))
assert(partitions(1) === Seq((1, 3), (3, 8), (3, 8)))
}
test("intersection") {
val all = sc.parallelize(1 to 10)
val evens = sc.parallelize(2 to 10 by 2)
val intersection = Array(2, 4, 6, 8, 10)
// intersection is commutative
assert(all.intersection(evens).collect().sorted === intersection)
assert(evens.intersection(all).collect().sorted === intersection)
}
test("intersection strips duplicates in an input") {
val a = sc.parallelize(Seq(1, 2, 3, 3))
val b = sc.parallelize(Seq(1, 1, 2, 3))
val intersection = Array(1, 2, 3)
assert(a.intersection(b).collect().sorted === intersection)
assert(b.intersection(a).collect().sorted === intersection)
}
test("zipWithIndex") {
val n = 10
val data = sc.parallelize(0 until n, 3)
val ranked = data.zipWithIndex()
ranked.collect().foreach { x =>
assert(x._1 === x._2)
}
}
test("zipWithIndex with a single partition") {
val n = 10
val data = sc.parallelize(0 until n, 1)
val ranked = data.zipWithIndex()
ranked.collect().foreach { x =>
assert(x._1 === x._2)
}
}
test("zipWithIndex chained with other RDDs (SPARK-4433)") {
val count = sc.parallelize(0 until 10, 2).zipWithIndex().repartition(4).count()
assert(count === 10)
}
test("zipWithUniqueId") {
val n = 10
val data = sc.parallelize(0 until n, 3)
val ranked = data.zipWithUniqueId()
val ids = ranked.map(_._1).distinct().collect()
assert(ids.length === n)
}
test("retag with implicit ClassTag") {
val jsc: JavaSparkContext = new JavaSparkContext(sc)
val jrdd: JavaRDD[String] = jsc.parallelize(Seq("A", "B", "C").asJava)
jrdd.rdd.retag.collect()
}
test("parent method") {
val rdd1 = sc.parallelize(1 to 10, 2)
val rdd2 = rdd1.filter(_ % 2 == 0)
val rdd3 = rdd2.map(_ + 1)
val rdd4 = new UnionRDD(sc, List(rdd1, rdd2, rdd3))
assert(rdd4.parent(0).isInstanceOf[ParallelCollectionRDD[_]])
assert(rdd4.parent[Int](1) === rdd2)
assert(rdd4.parent[Int](2) === rdd3)
}
test("getNarrowAncestors") {
val rdd1 = sc.parallelize(1 to 100, 4)
val rdd2 = rdd1.filter(_ % 2 == 0).map(_ + 1)
val rdd3 = rdd2.map(_ - 1).filter(_ < 50).map(i => (i, i))
val rdd4 = rdd3.reduceByKey(_ + _)
val rdd5 = rdd4.mapValues(_ + 1).mapValues(_ + 2).mapValues(_ + 3)
val ancestors1 = rdd1.getNarrowAncestors
val ancestors2 = rdd2.getNarrowAncestors
val ancestors3 = rdd3.getNarrowAncestors
val ancestors4 = rdd4.getNarrowAncestors
val ancestors5 = rdd5.getNarrowAncestors
// Simple dependency tree with a single branch
assert(ancestors1.size === 0)
assert(ancestors2.size === 2)
assert(ancestors2.count(_ === rdd1) === 1)
assert(ancestors2.count(_.isInstanceOf[MapPartitionsRDD[_, _]]) === 1)
assert(ancestors3.size === 5)
assert(ancestors3.count(_.isInstanceOf[MapPartitionsRDD[_, _]]) === 4)
// Any ancestors before the shuffle are not considered
assert(ancestors4.size === 0)
assert(ancestors4.count(_.isInstanceOf[ShuffledRDD[_, _, _]]) === 0)
assert(ancestors5.size === 3)
assert(ancestors5.count(_.isInstanceOf[ShuffledRDD[_, _, _]]) === 1)
assert(ancestors5.count(_ === rdd3) === 0)
assert(ancestors5.count(_.isInstanceOf[MapPartitionsRDD[_, _]]) === 2)
}
test("getNarrowAncestors with multiple parents") {
val rdd1 = sc.parallelize(1 to 100, 5)
val rdd2 = sc.parallelize(1 to 200, 10).map(_ + 1)
val rdd3 = sc.parallelize(1 to 300, 15).filter(_ > 50)
val rdd4 = rdd1.map(i => (i, i))
val rdd5 = rdd2.map(i => (i, i))
val rdd6 = sc.union(rdd1, rdd2)
val rdd7 = sc.union(rdd1, rdd2, rdd3)
val rdd8 = sc.union(rdd6, rdd7)
val rdd9 = rdd4.join(rdd5)
val ancestors6 = rdd6.getNarrowAncestors
val ancestors7 = rdd7.getNarrowAncestors
val ancestors8 = rdd8.getNarrowAncestors
val ancestors9 = rdd9.getNarrowAncestors
// Simple dependency tree with multiple branches
assert(ancestors6.size === 3)
assert(ancestors6.count(_.isInstanceOf[ParallelCollectionRDD[_]]) === 2)
assert(ancestors6.count(_ === rdd2) === 1)
assert(ancestors7.size === 5)
assert(ancestors7.count(_.isInstanceOf[ParallelCollectionRDD[_]]) === 3)
assert(ancestors7.count(_ === rdd2) === 1)
assert(ancestors7.count(_ === rdd3) === 1)
// Dependency tree with duplicate nodes (e.g. rdd1 should not be reported twice)
assert(ancestors8.size === 7)
assert(ancestors8.count(_ === rdd2) === 1)
assert(ancestors8.count(_ === rdd3) === 1)
assert(ancestors8.count(_.isInstanceOf[UnionRDD[_]]) === 2)
assert(ancestors8.count(_.isInstanceOf[ParallelCollectionRDD[_]]) === 3)
assert(ancestors8.count(_ == rdd1) === 1)
assert(ancestors8.count(_ == rdd2) === 1)
assert(ancestors8.count(_ == rdd3) === 1)
// Any ancestors before the shuffle are not considered
assert(ancestors9.size === 2)
assert(ancestors9.count(_.isInstanceOf[CoGroupedRDD[_]]) === 1)
}
/**
* This tests for the pathological condition in which the RDD dependency graph is cyclical.
*
* Since RDD is part of the public API, applications may actually implement RDDs that allow
* such graphs to be constructed. In such cases, getNarrowAncestor should not simply hang.
*/
test("getNarrowAncestors with cycles") {
val rdd1 = new CyclicalDependencyRDD[Int]
val rdd2 = new CyclicalDependencyRDD[Int]
val rdd3 = new CyclicalDependencyRDD[Int]
val rdd4 = rdd3.map(_ + 1).filter(_ > 10).map(_ + 2).filter(_ % 5 > 1)
val rdd5 = rdd4.map(_ + 2).filter(_ > 20)
val rdd6 = sc.union(rdd1, rdd2, rdd3).map(_ + 4).union(rdd5).union(rdd4)
// Simple cyclical dependency
rdd1.addDependency(new OneToOneDependency[Int](rdd2))
rdd2.addDependency(new OneToOneDependency[Int](rdd1))
val ancestors1 = rdd1.getNarrowAncestors
val ancestors2 = rdd2.getNarrowAncestors
assert(ancestors1.size === 1)
assert(ancestors1.count(_ == rdd2) === 1)
assert(ancestors1.count(_ == rdd1) === 0)
assert(ancestors2.size === 1)
assert(ancestors2.count(_ == rdd1) === 1)
assert(ancestors2.count(_ == rdd2) === 0)
// Cycle involving a longer chain
rdd3.addDependency(new OneToOneDependency[Int](rdd4))
val ancestors3 = rdd3.getNarrowAncestors
val ancestors4 = rdd4.getNarrowAncestors
assert(ancestors3.size === 4)
assert(ancestors3.count(_.isInstanceOf[MapPartitionsRDD[_, _]]) === 4)
assert(ancestors3.count(_ == rdd3) === 0)
assert(ancestors4.size === 4)
assert(ancestors4.count(_.isInstanceOf[MapPartitionsRDD[_, _]]) === 3)
assert(ancestors4.count(_.isInstanceOf[CyclicalDependencyRDD[_]]) === 1)
assert(ancestors4.count(_ == rdd3) === 1)
assert(ancestors4.count(_ == rdd4) === 0)
// Cycles that do not involve the root
val ancestors5 = rdd5.getNarrowAncestors
assert(ancestors5.size === 6)
assert(ancestors5.count(_.isInstanceOf[MapPartitionsRDD[_, _]]) === 5)
assert(ancestors5.count(_.isInstanceOf[CyclicalDependencyRDD[_]]) === 1)
assert(ancestors4.count(_ == rdd3) === 1)
// Complex cyclical dependency graph (combination of all of the above)
val ancestors6 = rdd6.getNarrowAncestors
assert(ancestors6.size === 12)
assert(ancestors6.count(_.isInstanceOf[UnionRDD[_]]) === 2)
assert(ancestors6.count(_.isInstanceOf[MapPartitionsRDD[_, _]]) === 7)
assert(ancestors6.count(_.isInstanceOf[CyclicalDependencyRDD[_]]) === 3)
}
test("task serialization exception should not hang scheduler") {
class BadSerializable extends Serializable {
@throws(classOf[IOException])
private def writeObject(out: ObjectOutputStream): Unit =
throw new KryoException("Bad serialization")
@throws(classOf[IOException])
private def readObject(in: ObjectInputStream): Unit = {}
}
// Note that in the original bug, SPARK-4349, that this verifies, the job would only hang if
// there were more threads in the Spark Context than there were number of objects in this
// sequence.
intercept[Throwable] {
sc.parallelize(Seq(new BadSerializable, new BadSerializable)).collect()
}
// Check that the context has not crashed
sc.parallelize(1 to 100).map(x => x*2).collect
}
/** A contrived RDD that allows the manual addition of dependencies after creation. */
private class CyclicalDependencyRDD[T: ClassTag] extends RDD[T](sc, Nil) {
private val mutableDependencies: ArrayBuffer[Dependency[_]] = ArrayBuffer.empty
override def compute(p: Partition, c: TaskContext): Iterator[T] = Iterator.empty
override def getPartitions: Array[Partition] = Array.empty
override def getDependencies: Seq[Dependency[_]] = mutableDependencies
def addDependency(dep: Dependency[_]) {
mutableDependencies += dep
}
}
test("RDD.partitions() fails fast when partitions indicies are incorrect (SPARK-13021)") {
class BadRDD[T: ClassTag](prev: RDD[T]) extends RDD[T](prev) {
override def compute(part: Partition, context: TaskContext): Iterator[T] = {
prev.compute(part, context)
}
override protected def getPartitions: Array[Partition] = {
prev.partitions.reverse // breaks contract, which is that `rdd.partitions(i).index == i`
}
}
val rdd = new BadRDD(sc.parallelize(1 to 100, 100))
val e = intercept[IllegalArgumentException] {
rdd.partitions
}
assert(e.getMessage.contains("partitions"))
}
test("nested RDDs are not supported (SPARK-5063)") {
val rdd: RDD[Int] = sc.parallelize(1 to 100)
val rdd2: RDD[Int] = sc.parallelize(1 to 100)
val thrown = intercept[SparkException] {
val nestedRDD: RDD[RDD[Int]] = rdd.mapPartitions { x => Seq(rdd2.map(x => x)).iterator }
nestedRDD.count()
}
assert(thrown.getMessage.contains("SPARK-5063"))
}
test("actions cannot be performed inside of transformations (SPARK-5063)") {
val rdd: RDD[Int] = sc.parallelize(1 to 100)
val rdd2: RDD[Int] = sc.parallelize(1 to 100)
val thrown = intercept[SparkException] {
rdd.map(x => x * rdd2.count).collect()
}
assert(thrown.getMessage.contains("SPARK-5063"))
}
test("custom RDD coalescer") {
val maxSplitSize = 512
val outDir = new File(tempDir, "output").getAbsolutePath
sc.makeRDD(1 to 1000, 10).saveAsTextFile(outDir)
val hadoopRDD =
sc.hadoopFile(outDir, classOf[TextInputFormat], classOf[LongWritable], classOf[Text])
val coalescedHadoopRDD =
hadoopRDD.coalesce(2, partitionCoalescer = Option(new SizeBasedCoalescer(maxSplitSize)))
assert(coalescedHadoopRDD.partitions.size <= 10)
var totalPartitionCount = 0L
coalescedHadoopRDD.partitions.foreach(partition => {
var splitSizeSum = 0L
partition.asInstanceOf[CoalescedRDDPartition].parents.foreach(partition => {
val split = partition.asInstanceOf[HadoopPartition].inputSplit.value.asInstanceOf[FileSplit]
splitSizeSum += split.getLength
totalPartitionCount += 1
})
assert(splitSizeSum <= maxSplitSize)
})
assert(totalPartitionCount == 10)
}
test("SPARK-18406: race between end-of-task and completion iterator read lock release") {
val rdd = sc.parallelize(1 to 1000, 10)
rdd.cache()
rdd.mapPartitions { iter =>
ThreadUtils.runInNewThread("TestThread") {
// Iterate to the end of the input iterator, to cause the CompletionIterator completion to
// fire outside of the task's main thread.
while (iter.hasNext) {
iter.next()
}
iter
}
}.collect()
}
// NOTE
// Below tests calling sc.stop() have to be the last tests in this suite. If there are tests
// running after them and if they access sc those tests will fail as sc is already closed, because
// sc is shared (this suite mixins SharedSparkContext)
test("cannot run actions after SparkContext has been stopped (SPARK-5063)") {
val existingRDD = sc.parallelize(1 to 100)
sc.stop()
val thrown = intercept[IllegalStateException] {
existingRDD.count()
}
assert(thrown.getMessage.contains("shutdown"))
}
test("cannot call methods on a stopped SparkContext (SPARK-5063)") {
sc.stop()
def assertFails(block: => Any): Unit = {
val thrown = intercept[IllegalStateException] {
block
}
assert(thrown.getMessage.contains("stopped"))
}
assertFails { sc.parallelize(1 to 100) }
assertFails { sc.textFile("/nonexistent-path") }
}
}
/**
* Coalesces partitions based on their size assuming that the parent RDD is a [[HadoopRDD]].
* Took this class out of the test suite to prevent "Task not serializable" exceptions.
*/
class SizeBasedCoalescer(val maxSize: Int) extends PartitionCoalescer with Serializable {
override def coalesce(maxPartitions: Int, parent: RDD[_]): Array[PartitionGroup] = {
val partitions: Array[Partition] = parent.asInstanceOf[HadoopRDD[Any, Any]].getPartitions
val groups = ArrayBuffer[PartitionGroup]()
var currentGroup = new PartitionGroup()
var currentSum = 0L
var totalSum = 0L
var index = 0
// sort partitions based on the size of the corresponding input splits
partitions.sortWith((partition1, partition2) => {
val partition1Size = partition1.asInstanceOf[HadoopPartition].inputSplit.value.getLength
val partition2Size = partition2.asInstanceOf[HadoopPartition].inputSplit.value.getLength
partition1Size < partition2Size
})
def updateGroups(): Unit = {
groups += currentGroup
currentGroup = new PartitionGroup()
currentSum = 0
}
def addPartition(partition: Partition, splitSize: Long): Unit = {
currentGroup.partitions += partition
currentSum += splitSize
totalSum += splitSize
}
while (index < partitions.size) {
val partition = partitions(index)
val fileSplit =
partition.asInstanceOf[HadoopPartition].inputSplit.value.asInstanceOf[FileSplit]
val splitSize = fileSplit.getLength
if (currentSum + splitSize < maxSize) {
addPartition(partition, splitSize)
index += 1
if (index == partitions.size) {
updateGroups
}
} else {
if (currentGroup.partitions.size == 0) {
addPartition(partition, splitSize)
index += 1
} else {
updateGroups
}
}
}
groups.toArray
}
}
| wangyixiaohuihui/spark2-annotation | core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala | Scala | apache-2.0 | 47,863 |
package com.softwaremill.bootzooka.user.application
import java.util.UUID
import com.softwaremill.bootzooka.test.{FlatSpecWithDb, TestHelpers}
import com.typesafe.scalalogging.StrictLogging
import com.flowy.fomoapi.database.postgres.SqlUserDao
import com.flowy.common.models.User
import org.scalatest.Matchers
import scala.language.implicitConversions
class UserDaoSpec extends FlatSpecWithDb with StrictLogging with TestHelpers with Matchers {
behavior of "UserDao"
implicit val ec = scala.concurrent.ExecutionContext.Implicits.global
val userDao = new SqlUserDao(sqlDatabase)
lazy val randomIds = List.fill(3)(UUID.randomUUID())
override def beforeEach() {
super.beforeEach()
for (i <- 1 to randomIds.size) {
val first = "first" + i
val last = "last" + i
val password = "pass" + i
val salt = "salt" + i
userDao
.add(User(randomIds(i - 1), i + "email@sml.com", first, last, password, salt))
.futureValue
}
}
it should "add new user" in {
// Given
val first = "test_person"
val last = "test_last"
val login = "newuser"
val email = "test@sml.com"
// When
userDao.add(newUser(first, last, email, "pass", "salt")).futureValue
// Then
userDao.findByEmail(email).futureValue should be('defined)
}
it should "fail with exception when trying to add user with existing email" in {
// Given
val first = "test1"
val last = "test2"
val email = "anotherEmaill@sml.com"
userDao.add(newUser(first, last, email, "somePass", "someSalt")).futureValue
// When & then
userDao.add(newUser(first, last, email, "pass", "salt")).failed.futureValue
}
it should "find by email" in {
// Given
val email = "1email@sml.com"
// When
val userOpt = userDao.findByEmail(email).futureValue
// Then
userOpt.map(_.email) should equal(Some(email))
}
it should "find by uppercase email" in {
// Given
val email = "2email@sml.com".toUpperCase
// When
val userOpt = userDao.findByEmail(email).futureValue
// Then
userOpt.map(_.email) should equal(Some(email.toLowerCase))
}
it should "change password" in {
// Given
val email = "1email@sml.com"
val password = User.encryptPassword("pass11", "salt1")
val user = userDao.findByEmail(email).futureValue.get
// When
userDao.changePassword(user.id, password).futureValue
val postModifyUserOpt = userDao.findByEmail(email).futureValue
val u = postModifyUserOpt.get
// Then
u should be(user.copy(passwordHash = password))
}
it should "change email" in {
// Given
val newEmail = "newmail@sml.pl"
val user = userDao.findByEmail("1email@sml.com").futureValue
val u = user.get
// When
userDao.changeEmail(u.id, newEmail).futureValue
// Then
userDao.findByEmail(newEmail).futureValue should equal(Some(u.copy(email = newEmail)))
}
}
| asciiu/fomo | api/src/test/scala/com/softwaremill/bootzooka/user/application/UserDaoSpec.scala | Scala | apache-2.0 | 2,989 |
package models.hms
import play.api.libs.json._
/**
* author: cvandrei
* since: 2016-02-17
*/
case class Transcode(SourceType: String,
Sources: List[Source],
Collapsed: Option[Boolean],
CollapsedName: Option[String],
DownloadProvision: String,
PushFinishedNotification: Boolean,
PushErrorNotification: Boolean,
PushStatusNotification: Boolean,
PushNotificationCallback: String
)
object Transcode {
implicit object TranscodeReads extends Format[Transcode] {
override def reads(json: JsValue): JsResult[Transcode] = {
val collapsed = (json \\ "Collapsed").asOpt[String] match {
case Some("true") => Some(true)
case Some("false") => Some(false)
case _ => None
}
val transcode = Transcode(
(json \\ "SourceType").as[String],
(json \\ "Sources").as[List[Source]],
collapsed,
(json \\ "CollapsedName").asOpt[String],
(json \\ "DownloadProvision").as[String],
(json \\ "PushFinishedNotification").as[String].toBoolean,
(json \\ "PushErrorNotification").as[String].toBoolean,
(json \\ "PushStatusNotification").as[String].toBoolean,
(json \\ "PushNotificationCallback").as[String]
)
JsSuccess(transcode)
}
override def writes(t: Transcode): JsValue = {
val collapsed: Option[String] = if (t.Collapsed.isDefined) Some(t.Collapsed.get.toString) else None
Json.obj(
"SourceType" -> t.SourceType,
"Sources" -> t.Sources,
"Collapsed" -> collapsed,
"CollapsedName" -> t.CollapsedName,
"DownloadProvision" -> t.DownloadProvision,
"PushFinishedNotification" -> t.PushFinishedNotification.toString,
"PushErrorNotification" -> t.PushErrorNotification.toString,
"PushStatusNotification" -> t.PushStatusNotification.toString,
"PushNotificationCallback" -> t.PushNotificationCallback
)
}
}
}
| indarium/hbbTVPlugin | app/models/hms/Transcode.scala | Scala | agpl-3.0 | 2,105 |
// Copyright (c) 2015 Ben Zimmer. All rights reserved.
// Implicit classes for string extension methods.
package bdzimmer.util
object StringUtils {
val slash = "/"
implicit class StringPath(val s: String) extends AnyVal {
// method for joining path segments
def /(x: String): String = {
s + "/" + x
}
}
implicit class StringConvertSafe(val s: String) extends AnyVal {
import scala.util.Try
// safe boolean conversion
def toBooleanSafe: Boolean = {
if ("true".equals(s)) {
true
} else {
false
}
}
// safe integer conversion
def toIntSafe(default: Int = 0): Int = {
Try(s.toInt).getOrElse(default)
}
// safe double conversion
def toDoubleSafe(default: Double = 0.0): Double = {
Try(s.toDouble).getOrElse(default)
}
}
}
| bdzimmer/util-scala | src/main/scala/bdzimmer/util/StringUtils.scala | Scala | bsd-3-clause | 845 |
package scalaTutorial
/**
* Scala 程序员的平衡感
* 崇尚 val,不可变对象和没有副作用的方法。
* 首先想到它们。只有在特定需要和判断之后才选择 var,可变对象和有副作用的方法。
*
* 所谓伴生对象, 也是一个Scala中的单例对象, 使用object关键字修饰。
* 除此之外, 还有一个使用class关键字定义的同名类, 这个类和单例对象存在于同一个文件中, 这个类就叫做这个单例对象的伴生类, 相对来说, 这个单例对象叫做伴生类的伴生对象。
*/
class HelloWorld {
}
object HelloWorld {
def scale = 5
def scaleCalc = 7 * scale
def pi = 3.141592653589793
def radius = 10
def piCalc = 2 * pi * radius
def max(x: Int, y: Int) = if (x > y) x else y
def greet() = println("Hello, world!")
def greet_1(str: String) = println(str)
def printStr(args: String) = args.foreach(arg => println(arg))
def main(args: Array[String]) {
println("Hello world!")
//Everything is an object
//Numbers are objects
//Functions are objects
var x = 10
println("1 + 2 * 3 / x =" + 1 + 2 * 3 / x)
var msg = "Goodbye cruel world!"
val msgd = "Goodbye cruel world!"
val name: String = "test"
println("isLower:" + name.exists(_.isLower))
println("isLD: " + name.exists(_.isLetterOrDigit))
//val 1 = 2
println(piCalc)
println(scaleCalc)
}
}
| slieer/scala-tutorials | src/main/scala/scalaTutorial/Ch01_HelloWorld.scala | Scala | apache-2.0 | 1,517 |
package org.scalacheck.ops.time.joda
import org.joda.time.{LocalDateTime, ReadableDuration}
import org.scalacheck.ops.time.joda.ChronologyOps._
sealed trait JodaLocalDateTimeGenerators extends JodaAbstractDateTimeGenerators
with UTCTimeZoneDefault
with ISOChronologyDefault {
override type InstantType = LocalDateTime
override protected[time] def asInstant(millis: Long)(implicit params: JodaTimeParams): LocalDateTime =
new LocalDateTime(millis, params.chronology)
override protected[time] def addToCeil(instant: LocalDateTime, duration: ReadableDuration)
(implicit params: JodaTimeParams): LocalDateTime = {
try instant plus duration
catch {
case tooLarge: ArithmeticException => params.chronology.maxLocalDateTime
}
}
override protected[time] def subtractToFloor(instant: LocalDateTime, duration: ReadableDuration)
(implicit params: JodaTimeParams): LocalDateTime = {
try instant minus duration
catch {
case tooSmall: ArithmeticException => params.chronology.minLocalDateTime
}
}
override protected[time] def asLong(instant: LocalDateTime)(implicit params: JodaTimeParams): Long = {
instant.toDateTime(params.dateTimeZone).getMillis
}
}
object JodaLocalDateTimeGenerators extends JodaLocalDateTimeGenerators
| gloriousfutureio/scalacheck-ops | joda/src/main/scala/org/scalacheck/ops/time/joda/JodaLocalDateTimeGenerators.scala | Scala | apache-2.0 | 1,290 |
import java.io.{PrintWriter, File}
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
/**
* Created by pnagarjuna on 06/11/15.
*/
object Main {
def main(args: Array[String]): Unit = {
println(s"Version Make Model")
val years = (2004 to 2015).toList
val filename = args(0)
if (args.length != 1) {
println(s"Provide filename as the commandline argument")
sys.exit
}
store(getPath(filename), getPath(s"$filename-errors")) { (writer, errors) =>
years.map { year =>
val f = getMakesList(year).flatMap { makes =>
getModelsList(year, makes)
}.map { eitherList =>
eitherList.flatMap { either =>
either match {
case Right(models) => models
case Left(fails) => List.empty[Atomic]
}
}
}.flatMap { models =>
getVersionsList(year, models)
}.flatMap { versions =>
Future.successful {
writer.println(s"$year ${make.}")
}
}
Await.result(f, 100 minutes)
}
}
}
def getPath(filename: String): File = new File(s"${sys.props.get("user.name")}/$filename.csv")
def store(dataFile: File, errorsFile: File)(f: (PrintWriter, PrintWriter) => Unit): Unit = {
val writer = new PrintWriter(dataFile)
val errors = new PrintWriter(errorsFile)
f(writer, errors)
writer.flush()
errors.flush()
errors.close()
writer.close()
}
def getMakesList(year: Int): Future[List[Atomic]] = {
Utils.getMakes(year).flatMap { res =>
Utils.parse(res.body.toString)
}
}
def getModelsList(year: Int, makes: List[Atomic]): Future[List[Either[(Int, Atomic), List[Atomic]]]] = {
Future.sequence {
makes.map { make =>
val f = Utils.getModels(year, make.Value).flatMap { res =>
Utils.parse(res.body.toString)
}.map { models => Right(models) }
f.recover { case th => Left((year, make)) }
}}
}
def getVersionsList(year: Int, models: List[Atomic]): Future[List[Either[(Int, Atomic), List[Atomic]]]] = {
Future.sequence {
models.map { model =>
val f = Utils.getVersions(year, model.Value).flatMap { res =>
Utils.parse(res.body.toString)
}.map { versions => Right(versions) }
f.recover { case th => Left((year, model)) }
}
}
}
}
| pamu/make-model-versoin | src/main/scala/Main.scala | Scala | apache-2.0 | 2,393 |
/******************************************************************************
* Copyright (c) 2014, Equal Experts Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of the Midas Project.
******************************************************************************/
package com.ee.midas
import org.specs2.mutable.Specification
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import org.specs2.mock.Mockito
import java.net._
import com.ee.midas.model.Configuration
import scala.util.Try
import java.io.{IOException, File}
import java.util.concurrent.TimeUnit
import java.util.concurrent.TimeUnit._
import org.specs2.specification.Scope
@RunWith(classOf[JUnitRunner])
class MidasServerSpecs extends Specification with Mockito {
trait Setup extends Scope {
val mockConfiguration = mock[Configuration]
val mockServer = mock[ServerSocket]
val mockClient = mock[Socket]
val mockMongoSocket = mock[Socket]
val cmdConfig = CmdConfig(baseDeltasDir = deltasDir.toURI, mongoPort = 27050, midasPort = 27021)
var errorLog: String = ""
}
val deltasDir = new File("deltas")
"Midas Server" should {
"start the configuration" in new Setup {
//given
val server = new MidasServer(cmdConfig) {
override def parse(url: URL, configFileName: String) = Try {
mockConfiguration
}
override def createServerSocket: Try[ServerSocket] = Try {
mockServer
}
}
mockServer.accept() throws new IOException("stop accepting clients.")
//when
server.start
//then
there was one(mockConfiguration).start
}
"accept a new connection if mongo is available" in new Setup {
//given
val server = new MidasServer(cmdConfig) {
override def parse(url: URL, configFileName: String) = Try {
mockConfiguration
}
override def createServerSocket: Try[ServerSocket] = Try {
mockServer
}
override def createMongoSocket: Try[Socket] = Try {
mockMongoSocket
}
}
mockServer.accept() returns mockClient thenThrows new IOException("stop accepting clients.")
//when
server.start
//then
there was one(mockConfiguration).processNewConnection(mockClient, mockMongoSocket)
}
"reject an incoming connection if mongo is not reachable" in new Setup {
//given
val server = new MidasServer(cmdConfig) {
override def parse(url: URL, configFileName: String) = Try {
mockConfiguration
}
override def logError(msg: String) = {
errorLog = s"$errorLog, $msg"
}
override def createServerSocket: Try[ServerSocket] = Try {
mockServer
}
override def createMongoSocket: Try[Socket] = Try {
throw new IOException("mongo not available.")
}
}
mockServer.accept() returns mockClient thenThrows new IOException("stop accepting clients.")
//when
server.start
//then
errorLog must contain (s"MongoDB on ${cmdConfig.mongoHost}:${cmdConfig.mongoPort} is not available!")
there was one(mockClient).close()
}
"terminate the configuration when the server is stopped" in new Setup {
//given
val server = new MidasServer(cmdConfig) {
override def parse(url: URL, configFileName: String) = Try {
mockConfiguration
}
override def createServerSocket: Try[ServerSocket] = Try {
mockServer
}
}
mockServer.accept() returns mockClient thenReturn mock[Socket]
//when
MidasTerminator(server, stopAfter = 200, MILLISECONDS).start()
server.start
//then
there was one(mockConfiguration).stop
there was one(mockServer).close()
server.isActive must beFalse
}
}
}
case class MidasTerminator(server: MidasServer, stopAfter: Int, unit: TimeUnit) extends Thread {
override def run() = {
unit.sleep(stopAfter)
server.stop
}
} | EqualExperts/Midas | src/test/scala/com/ee/midas/MidasServerSpecs.scala | Scala | bsd-2-clause | 5,498 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.