code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.github.alexadewit.scala_oauth2
import scalaz._, scalaz.syntax.either._, scalaz.concurrent._
import org.http4s.Uri
trait ProviderFormat[P]
| AlexaDeWit/scala-oauth2 | src/main/scala/Provider.scala | Scala | bsd-3-clause | 152 |
// @SOURCE:/home/eva/digits/conf/routes
// @HASH:191b1e11d2641e11fc2fa287beb6243a788da730
// @DATE:Mon Oct 14 20:53:38 HST 2013
import Routes.{prefix => _prefix, defaultPrefix => _defaultPrefix}
import play.core._
import play.core.Router._
import play.core.j._
import play.api.mvc._
import play.libs.F
import Router.queryString
// @LINE:10
// @LINE:7
// @LINE:6
package controllers {
// @LINE:10
class ReverseAssets {
// @LINE:10
def at(file:String): Call = {
Call("GET", _prefix + { _defaultPrefix } + "assets/" + implicitly[PathBindable[String]].unbind("file", file))
}
}
// @LINE:7
// @LINE:6
class ReverseApplication {
// @LINE:7
def page1(): Call = {
Call("GET", _prefix + { _defaultPrefix } + "page1")
}
// @LINE:6
def index(): Call = {
Call("GET", _prefix)
}
}
}
// @LINE:10
// @LINE:7
// @LINE:6
package controllers.javascript {
// @LINE:10
class ReverseAssets {
// @LINE:10
def at : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Assets.at",
"""
function(file) {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "assets/" + (""" + implicitly[PathBindable[String]].javascriptUnbind + """)("file", file)})
}
"""
)
}
// @LINE:7
// @LINE:6
class ReverseApplication {
// @LINE:7
def page1 : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Application.page1",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "page1"})
}
"""
)
// @LINE:6
def index : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Application.index",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + """"})
}
"""
)
}
}
// @LINE:10
// @LINE:7
// @LINE:6
package controllers.ref {
// @LINE:10
class ReverseAssets {
// @LINE:10
def at(path:String, file:String): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Assets.at(path, file), HandlerDef(this, "controllers.Assets", "at", Seq(classOf[String], classOf[String]), "GET", """ Map static resources from the /public folder to the /assets URL path""", _prefix + """assets/$file<.+>""")
)
}
// @LINE:7
// @LINE:6
class ReverseApplication {
// @LINE:7
def page1(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Application.page1(), HandlerDef(this, "controllers.Application", "page1", Seq(), "GET", """""", _prefix + """page1""")
)
// @LINE:6
def index(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Application.index(), HandlerDef(this, "controllers.Application", "index", Seq(), "GET", """ Home page""", _prefix + """""")
)
}
}
| evashek/digits | target/scala-2.10/src_managed/main/routes_reverseRouting.scala | Scala | mit | 3,197 |
package org.transkop.ast
import org.objectweb.asm.MethodVisitor
import org.objectweb.asm.Opcodes._
import org.transkop.SymbolTable
case class StringNode(value: String) extends OperandNode {
def generate(mv: MethodVisitor, symbolTable: SymbolTable) {
mv.visitLdcInsn(value)
}
}
| dzinot/transkOP | src/main/scala/org/transkop/ast/StringNode.scala | Scala | mit | 287 |
import leon.annotation._
import leon.lang._
object InsertionSort {
sealed abstract class Option[T]
case class Some[T](value: T) extends Option[T]
case class None[T]() extends Option[T]
sealed abstract class List {
def size: BigInt = (this match {
case Nil() => 0
case Cons(_, t) => 1 + t.size
})
def content: Set[BigInt] = this match {
case Nil() => Set()
case Cons(h, t) => Set(h) ++ t.content
}
def min: Option[BigInt] = this match {
case Nil() => None()
case Cons(h, t) => t.min match {
case None() => Some(h)
case Some(m) => if(h < m) Some(h) else Some(m)
}
}
def isSorted: Boolean = this match {
case Nil() => true
case Cons(h, Nil()) => true
case Cons(h1, t1 @ Cons(h2, t2)) => h1 <= h2 && t1.isSorted
}
/* Inserting element 'e' into a sorted list 'l' produces a sorted list with
* the expected content and size */
def insert(e: BigInt): List = {
require(isSorted)
this match {
case Nil() => Cons(e, Nil())
case Cons(h, t) =>
if (h <= e) {
Cons(h, t.insert(e))
} else {
Cons(e, this)
}
}
}
/* Insertion sort yields a sorted list of same size and content as the input
* list */
def sort: List = (this match {
case Nil() => Nil()
case Cons(h, t) => t.sort.insert(h)
}) ensuring(res => res.content == this.content
&& res.isSorted
&& res.size == this.size
)
}
case class Cons(h: BigInt, t: List) extends List
case class Nil() extends List
}
| ericpony/scala-examples | testcases/web/sav15/04_Exercise4.scala | Scala | mit | 1,659 |
package com.querydsl.scala.sql
import com.querydsl.scala._
import com.querydsl.sql.codegen._
import org.junit._
object MetaDataExporterTest {
private var connection: java.sql.Connection = _
@BeforeClass
def setUp() {
Class.forName("org.h2.Driver")
val url = "jdbc:h2:mem:testdb" + System.currentTimeMillis()
connection = java.sql.DriverManager.getConnection(url, "sa", "")
val stmt = connection.createStatement()
try {
stmt.execute("create table reserved (id int, while int)")
stmt.execute("create table underscore (e_id int, c_id int)")
stmt.execute("create table beangen1 (\"SEP_Order\" int)")
stmt.execute("create table definstance (id int, definstance int, definstance1 int)")
stmt.execute("create table pkfk (id int primary key, pk int, fk int)")
stmt.execute("create table \"camelCase\" (id int)")
stmt.execute("create table \"vwServiceName\" (id int)")
stmt.execute("create table date_test (d date)")
stmt.execute("create table date_time_test (dt datetime)")
stmt.execute("create table survey (id int, name varchar(30))")
stmt.execute("create table typetest (type int, constraint pk_typetest primary key(type))")
stmt.execute("""create table employee(
id INT, firstname VARCHAR(50), lastname VARCHAR(50), salary DECIMAL(10, 2),
datefield DATE, timefield TIME,
superior_id int, survey_id int, survey_name varchar(30),
CONSTRAINT PK_employee PRIMARY KEY (id),
CONSTRAINT FK_superior FOREIGN KEY (superior_id) REFERENCES employee(id))""")
// table with count column
stmt.execute("create table count_table(count int)")
// multi primary key
stmt.execute("create table multikey(id INT, id2 VARCHAR, id3 INT," +
" CONSTRAINT pk_multikey PRIMARY KEY (id, id2, id3) )")
// multi foreign key
stmt.execute("create table multikey2(id INT, id2 INT, id3 INT, id4 INT, id5 INT, id6 INT," +
" CONSTRAINT pk_multikey2 FOREIGN KEY (id4, id5, id6) REFERENCES multikey2(id, id2, id3) )")
} finally {
stmt.close()
}
}
@AfterClass
def tearDown() {
connection.close()
}
}
class MetaDataExporterTest {
import MetaDataExporterTest._
@Test
def Generate_Without_BeanTypes() {
val directory = new java.io.File("target/jdbcgen1")
val exporter = new MetaDataExporter()
exporter.setNamePrefix("Q")
exporter.setPackageName("com.querydsl")
exporter.setSchemaPattern("PUBLIC")
exporter.setTargetFolder(directory)
exporter.setSerializerClass(classOf[ScalaMetaDataSerializer])
exporter.setCreateScalaSources(true)
exporter.setTypeMappings(ScalaTypeMappings.create)
exporter.export(connection.getMetaData)
CompileTestUtils.assertCompileSuccess(directory)
}
@Test
def Generate_With_BeanTypes() {
val directory = new java.io.File("target/jdbcgen2")
val exporter = new MetaDataExporter()
exporter.setNamePrefix("Q")
exporter.setPackageName("com.querydsl")
exporter.setSchemaPattern("PUBLIC")
exporter.setTargetFolder(directory)
exporter.setSerializerClass(classOf[ScalaMetaDataSerializer])
exporter.setBeanSerializerClass(classOf[ScalaBeanSerializer])
exporter.setCreateScalaSources(true)
exporter.setTypeMappings(ScalaTypeMappings.create)
exporter.export(connection.getMetaData)
CompileTestUtils.assertCompileSuccess(directory)
}
@Test
def Generate_With_Schema() {
val directory = new java.io.File("target/jdbcgen3")
val exporter = new MetaDataExporter()
exporter.setNamePrefix("Q")
exporter.setPackageName("com.querydsl")
exporter.setSchemaPattern("PUBLIC")
exporter.setSchemaToPackage(true)
exporter.setTargetFolder(directory)
exporter.setSerializerClass(classOf[ScalaMetaDataSerializer])
exporter.setCreateScalaSources(true)
exporter.setTypeMappings(ScalaTypeMappings.create)
exporter.export(connection.getMetaData)
CompileTestUtils.assertCompileSuccess(directory)
}
@Test
def Generate_With_BeanTypes_And_Schema() {
val directory = new java.io.File("target/jdbcgen4")
val exporter = new MetaDataExporter()
exporter.setNamePrefix("Q")
exporter.setPackageName("com.querydsl")
exporter.setSchemaPattern("PUBLIC")
exporter.setSchemaToPackage(true)
exporter.setTargetFolder(directory)
exporter.setSerializerClass(classOf[ScalaMetaDataSerializer])
exporter.setBeanSerializerClass(classOf[ScalaBeanSerializer])
exporter.setCreateScalaSources(true)
exporter.setTypeMappings(ScalaTypeMappings.create)
exporter.export(connection.getMetaData)
CompileTestUtils.assertCompileSuccess(directory)
}
} | tomforster/querydsl | querydsl-scala/src/test/scala/com/querydsl/scala/sql/MetaDataExporterTest.scala | Scala | apache-2.0 | 4,773 |
package ch.descabato.core.util
import akka.util.ByteString
import ch.descabato.utils.{BytesWrapper, Hash}
import com.fasterxml.jackson.core.{JsonFactory, JsonGenerator, JsonParser, Version}
import com.fasterxml.jackson.databind.deser.std.StdDeserializer
import com.fasterxml.jackson.databind.module.SimpleModule
import com.fasterxml.jackson.databind.ser.std.StdSerializer
import com.fasterxml.jackson.databind.{DeserializationContext, JavaType, ObjectMapper, SerializerProvider}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
object Json {
val mapper = new CustomObjectMapper()
/*
def main(args: Array[String]): Unit = {
tryMap
}
private def tryMap = {
var map: Map[Hash, StoredChunk] = Map.empty
val hash = Hash("Hello".getBytes("UTF-8"))
map += hash -> StoredChunk("Hello", hash, 0, 500)
roundTrip(map.toSeq)
}
private def tryHash = {
val hash = (5, Hash("Hello".getBytes("UTF-8")))
roundTrip(hash)
}
private def roundTrip[T: Manifest](map: T) = {
println(map)
val json = mapper.writeValueAsString(map)
println(json)
val clazz = manifest[T].runtimeClass
println(s"Class is $clazz")
val deserialized = mapper.readValue[T](json)
println(deserialized)
}
*/
}
class CustomObjectMapper(val jsonFactory: JsonFactory = new JsonFactory()) extends ObjectMapper(jsonFactory) with ScalaObjectMapper {
override def constructType[T](implicit m: Manifest[T]): JavaType = {
val ByteStringName = classOf[ByteString].getName
m.runtimeClass.getName match {
case ByteStringName => constructType(classOf[ByteString])
case _ => super.constructType[T]
}
}
registerModule(DefaultScalaModule)
class BaWrapperDeserializer extends StdDeserializer[BytesWrapper](classOf[BytesWrapper]) {
def deserialize(jp: JsonParser, ctx: DeserializationContext): BytesWrapper = {
val bytes = jp.readValueAs(classOf[Array[Byte]])
BytesWrapper(bytes)
}
}
class BaWrapperSerializer extends StdSerializer[BytesWrapper](classOf[BytesWrapper]) {
def serialize(ba: BytesWrapper, jg: JsonGenerator, prov: SerializerProvider): Unit = {
jg.writeBinary(ba.array, ba.offset, ba.length)
}
}
class HashWrapperDeserializer extends StdDeserializer[Hash](classOf[Hash]) {
def deserialize(jp: JsonParser, ctx: DeserializationContext): Hash = {
val bytes = jp.readValueAs(classOf[Array[Byte]])
Hash(bytes)
}
}
class HashWrapperSerializer extends StdSerializer[Hash](classOf[Hash]) {
def serialize(ba: Hash, jg: JsonGenerator, prov: SerializerProvider): Unit = {
val wrap = ba.wrap()
jg.writeBinary(wrap.array, wrap.offset, wrap.length)
}
}
val testModule = new SimpleModule("DeScaBaTo", new Version(0, 5, 0, null, "ch.descabato", "core"))
testModule.addDeserializer(classOf[BytesWrapper], new BaWrapperDeserializer())
testModule.addSerializer(classOf[BytesWrapper], new BaWrapperSerializer())
testModule.addDeserializer(classOf[Hash], new HashWrapperDeserializer())
testModule.addSerializer(classOf[Hash], new HashWrapperSerializer())
registerModule(testModule)
}
| Stivo/DeScaBaTo | core/src/main/scala/ch/descabato/core/util/Json.scala | Scala | gpl-3.0 | 3,216 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io.File
import java.nio.ByteBuffer
import java.util.concurrent.{Semaphore, TimeUnit}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._
import org.apache.spark.SparkConf
import org.apache.spark.internal.config.UI._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.storage.StreamBlockId
import org.apache.spark.streaming.receiver._
import org.apache.spark.streaming.receiver.WriteAheadLogBasedBlockHandler._
import org.apache.spark.util.Utils
/** Testsuite for testing the network receiver behavior */
class ReceiverSuite extends TestSuiteBase with TimeLimits with Serializable {
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val signaler: Signaler = ThreadSignaler
test("receiver life cycle") {
val receiver = new FakeReceiver
val executor = new FakeReceiverSupervisor(receiver)
val executorStarted = new Semaphore(0)
assert(executor.isAllEmpty)
// Thread that runs the executor
val executingThread = new Thread() {
override def run() {
executor.start()
executorStarted.release(1)
executor.awaitTermination()
}
}
// Start the receiver
executingThread.start()
// Verify that the receiver
intercept[Exception] {
failAfter(200 millis) {
executingThread.join()
}
}
// Ensure executor is started
executorStarted.acquire()
// Verify that receiver was started
assert(receiver.onStartCalled)
assert(executor.isReceiverStarted)
assert(receiver.isStarted)
assert(!receiver.isStopped())
assert(receiver.otherThread.isAlive)
eventually(timeout(100 millis), interval(10 millis)) {
assert(receiver.receiving)
}
// Verify whether the data stored by the receiver was sent to the executor
val byteBuffer = ByteBuffer.allocate(100)
val arrayBuffer = new ArrayBuffer[Int]()
val iterator = arrayBuffer.iterator
receiver.store(1)
receiver.store(byteBuffer)
receiver.store(arrayBuffer)
receiver.store(iterator)
assert(executor.singles.size === 1)
assert(executor.singles.head === 1)
assert(executor.byteBuffers.size === 1)
assert(executor.byteBuffers.head.eq(byteBuffer))
assert(executor.iterators.size === 1)
assert(executor.iterators.head.eq(iterator))
assert(executor.arrayBuffers.size === 1)
assert(executor.arrayBuffers.head.eq(arrayBuffer))
// Verify whether the exceptions reported by the receiver was sent to the executor
val exception = new Exception
receiver.reportError("Error", exception)
assert(executor.errors.size === 1)
assert(executor.errors.head.eq(exception))
// Verify restarting actually stops and starts the receiver
receiver.restart("restarting", null, 600)
eventually(timeout(300 millis), interval(10 millis)) {
// receiver will be stopped async
assert(receiver.isStopped)
assert(receiver.onStopCalled)
}
eventually(timeout(1000 millis), interval(10 millis)) {
// receiver will be started async
assert(receiver.onStartCalled)
assert(executor.isReceiverStarted)
assert(receiver.isStarted)
assert(!receiver.isStopped)
assert(receiver.receiving)
}
// Verify that stopping actually stops the thread
failAfter(100 millis) {
receiver.stop("test")
assert(receiver.isStopped)
assert(!receiver.otherThread.isAlive)
// The thread that started the executor should complete
// as stop() stops everything
executingThread.join()
}
}
ignore("block generator throttling") {
val blockGeneratorListener = new FakeBlockGeneratorListener
val blockIntervalMs = 100
val maxRate = 1001
val conf = new SparkConf().set("spark.streaming.blockInterval", s"${blockIntervalMs}ms").
set("spark.streaming.receiver.maxRate", maxRate.toString)
val blockGenerator = new BlockGenerator(blockGeneratorListener, 1, conf)
val expectedBlocks = 20
val waitTime = expectedBlocks * blockIntervalMs
val expectedMessages = maxRate * waitTime / 1000
val expectedMessagesPerBlock = maxRate * blockIntervalMs / 1000
val generatedData = new ArrayBuffer[Int]
// Generate blocks
val startTimeNs = System.nanoTime()
blockGenerator.start()
var count = 0
while(System.nanoTime() - startTimeNs < TimeUnit.MILLISECONDS.toNanos(waitTime)) {
blockGenerator.addData(count)
generatedData += count
count += 1
}
blockGenerator.stop()
val recordedBlocks = blockGeneratorListener.arrayBuffers
val recordedData = recordedBlocks.flatten
assert(blockGeneratorListener.arrayBuffers.size > 0, "No blocks received")
assert(recordedData.toSet === generatedData.toSet, "Received data not same")
// recordedData size should be close to the expected rate; use an error margin proportional to
// the value, so that rate changes don't cause a brittle test
val minExpectedMessages = expectedMessages - 0.05 * expectedMessages
val maxExpectedMessages = expectedMessages + 0.05 * expectedMessages
val numMessages = recordedData.size
assert(
numMessages >= minExpectedMessages && numMessages <= maxExpectedMessages,
s"#records received = $numMessages, not between $minExpectedMessages and $maxExpectedMessages"
)
// XXX Checking every block would require an even distribution of messages across blocks,
// which throttling code does not control. Therefore, test against the average.
val minExpectedMessagesPerBlock = expectedMessagesPerBlock - 0.05 * expectedMessagesPerBlock
val maxExpectedMessagesPerBlock = expectedMessagesPerBlock + 0.05 * expectedMessagesPerBlock
val receivedBlockSizes = recordedBlocks.map { _.size }.mkString(",")
// the first and last block may be incomplete, so we slice them out
val validBlocks = recordedBlocks.drop(1).dropRight(1)
val averageBlockSize = validBlocks.map(block => block.size).sum / validBlocks.size
assert(
averageBlockSize >= minExpectedMessagesPerBlock &&
averageBlockSize <= maxExpectedMessagesPerBlock,
s"# records in received blocks = [$receivedBlockSizes], not between " +
s"$minExpectedMessagesPerBlock and $maxExpectedMessagesPerBlock, on average"
)
}
/**
* Test whether write ahead logs are generated by received,
* and automatically cleaned up. The clean up must be aware of the
* remember duration of the input streams. E.g., input streams on which window()
* has been applied must remember the data for longer, and hence corresponding
* WALs should be cleaned later.
*/
test("write ahead log - generating and cleaning") {
val sparkConf = new SparkConf()
.setMaster("local[4]") // must be at least 3 as we are going to start 2 receivers
.setAppName(framework)
.set(UI_ENABLED, true)
.set("spark.streaming.receiver.writeAheadLog.enable", "true")
.set("spark.streaming.receiver.writeAheadLog.rollingIntervalSecs", "1")
val batchDuration = Milliseconds(500)
val tempDirectory = Utils.createTempDir()
val logDirectory1 = new File(checkpointDirToLogDir(tempDirectory.getAbsolutePath, 0))
val logDirectory2 = new File(checkpointDirToLogDir(tempDirectory.getAbsolutePath, 1))
val allLogFiles1 = new mutable.HashSet[String]()
val allLogFiles2 = new mutable.HashSet[String]()
logInfo("Temp checkpoint directory = " + tempDirectory)
def getBothCurrentLogFiles(): (Seq[String], Seq[String]) = {
(getCurrentLogFiles(logDirectory1), getCurrentLogFiles(logDirectory2))
}
def getCurrentLogFiles(logDirectory: File): Seq[String] = {
try {
if (logDirectory.exists()) {
logDirectory.listFiles().filter { _.getName.startsWith("log") }.map { _.toString }
} else {
Seq.empty
}
} catch {
case e: Exception =>
Seq.empty
}
}
def printLogFiles(message: String, files: Seq[String]) {
logInfo(s"$message (${files.size} files):\\n" + files.mkString("\\n"))
}
withStreamingContext(new StreamingContext(sparkConf, batchDuration)) { ssc =>
val receiver1 = new FakeReceiver(sendData = true)
val receiver2 = new FakeReceiver(sendData = true)
val receiverStream1 = ssc.receiverStream(receiver1)
val receiverStream2 = ssc.receiverStream(receiver2)
receiverStream1.register()
receiverStream2.window(batchDuration * 6).register() // 3 second window
ssc.checkpoint(tempDirectory.getAbsolutePath())
ssc.start()
// Run until sufficient WAL files have been generated and
// the first WAL files has been deleted
eventually(timeout(20 seconds), interval(batchDuration.milliseconds millis)) {
val (logFiles1, logFiles2) = getBothCurrentLogFiles()
allLogFiles1 ++= logFiles1
allLogFiles2 ++= logFiles2
if (allLogFiles1.size > 0) {
assert(!logFiles1.contains(allLogFiles1.toSeq.sorted.head))
}
if (allLogFiles2.size > 0) {
assert(!logFiles2.contains(allLogFiles2.toSeq.sorted.head))
}
assert(allLogFiles1.size >= 7)
assert(allLogFiles2.size >= 7)
}
ssc.stop(stopSparkContext = true, stopGracefully = true)
val sortedAllLogFiles1 = allLogFiles1.toSeq.sorted
val sortedAllLogFiles2 = allLogFiles2.toSeq.sorted
val (leftLogFiles1, leftLogFiles2) = getBothCurrentLogFiles()
printLogFiles("Receiver 0: all", sortedAllLogFiles1)
printLogFiles("Receiver 0: left", leftLogFiles1)
printLogFiles("Receiver 1: all", sortedAllLogFiles2)
printLogFiles("Receiver 1: left", leftLogFiles2)
// Verify that necessary latest log files are not deleted
// receiverStream1 needs to retain just the last batch = 1 log file
// receiverStream2 needs to retain 3 seconds (3-seconds window) = 3 log files
assert(sortedAllLogFiles1.takeRight(1).forall(leftLogFiles1.contains))
assert(sortedAllLogFiles2.takeRight(3).forall(leftLogFiles2.contains))
}
}
/**
* An implementation of NetworkReceiverExecutor used for testing a NetworkReceiver.
* Instead of storing the data in the BlockManager, it stores all the data in a local buffer
* that can used for verifying that the data has been forwarded correctly.
*/
class FakeReceiverSupervisor(receiver: FakeReceiver)
extends ReceiverSupervisor(receiver, new SparkConf()) {
val singles = new ArrayBuffer[Any]
val byteBuffers = new ArrayBuffer[ByteBuffer]
val iterators = new ArrayBuffer[Iterator[_]]
val arrayBuffers = new ArrayBuffer[ArrayBuffer[_]]
val errors = new ArrayBuffer[Throwable]
/** Check if all data structures are clean */
def isAllEmpty: Boolean = {
singles.isEmpty && byteBuffers.isEmpty && iterators.isEmpty &&
arrayBuffers.isEmpty && errors.isEmpty
}
def pushSingle(data: Any) {
singles += data
}
def pushBytes(
bytes: ByteBuffer,
optionalMetadata: Option[Any],
optionalBlockId: Option[StreamBlockId]) {
byteBuffers += bytes
}
def pushIterator(
iterator: Iterator[_],
optionalMetadata: Option[Any],
optionalBlockId: Option[StreamBlockId]) {
iterators += iterator
}
def pushArrayBuffer(
arrayBuffer: ArrayBuffer[_],
optionalMetadata: Option[Any],
optionalBlockId: Option[StreamBlockId]) {
arrayBuffers += arrayBuffer
}
def reportError(message: String, throwable: Throwable) {
errors += throwable
}
override protected def onReceiverStart(): Boolean = true
override def createBlockGenerator(
blockGeneratorListener: BlockGeneratorListener): BlockGenerator = {
null
}
}
/**
* An implementation of BlockGeneratorListener that is used to test the BlockGenerator.
*/
class FakeBlockGeneratorListener(pushDelay: Long = 0) extends BlockGeneratorListener {
// buffer of data received as ArrayBuffers
val arrayBuffers = new ArrayBuffer[ArrayBuffer[Int]]
val errors = new ArrayBuffer[Throwable]
def onAddData(data: Any, metadata: Any) { }
def onGenerateBlock(blockId: StreamBlockId) { }
def onPushBlock(blockId: StreamBlockId, arrayBuffer: ArrayBuffer[_]) {
val bufferOfInts = arrayBuffer.map(_.asInstanceOf[Int])
arrayBuffers += bufferOfInts
Thread.sleep(0)
}
def onError(message: String, throwable: Throwable) {
errors += throwable
}
}
}
/**
* An implementation of Receiver that is used for testing a receiver's life cycle.
*/
class FakeReceiver(sendData: Boolean = false) extends Receiver[Int](StorageLevel.MEMORY_ONLY) {
@volatile var otherThread: Thread = null
@volatile var receiving = false
@volatile var onStartCalled = false
@volatile var onStopCalled = false
def onStart() {
otherThread = new Thread() {
override def run() {
receiving = true
var count = 0
while(!isStopped()) {
if (sendData) {
store(count)
count += 1
}
Thread.sleep(10)
}
}
}
onStartCalled = true
otherThread.start()
}
def onStop() {
onStopCalled = true
otherThread.join()
}
def reset() {
receiving = false
onStartCalled = false
onStopCalled = false
}
}
| yanboliang/spark | streaming/src/test/scala/org/apache/spark/streaming/ReceiverSuite.scala | Scala | apache-2.0 | 14,444 |
package scorex.transaction
import scorex.block.Block
import scorex.block.Block.BlockId
import scorex.transaction.History.BlockchainScore
import scorex.utils.ScorexLogging
trait BlockChain extends History with ScorexLogging {
def blockAt(height: Int): Option[Block]
def genesisBlock: Option[Block] = blockAt(1)
override def parent(block: Block, back: Int = 1): Option[Block] = {
require(back > 0)
heightOf(block.referenceField.value).flatMap(referenceHeight => blockAt(referenceHeight - back + 1))
}
private[transaction] def discardBlock(): BlockChain
override def lastBlocks(howMany: Int): Seq[Block] =
(Math.max(1, height() - howMany + 1) to height()).flatMap(blockAt).reverse
def lookForward(parentSignature: BlockId, howMany: Int): Seq[BlockId] =
heightOf(parentSignature).map { h =>
(h + 1).to(Math.min(height(), h + howMany: Int)).flatMap(blockAt).map(_.uniqueId)
}.getOrElse(Seq())
def children(block: Block): Seq[Block]
override lazy val genesis: Block = blockAt(1).get
}
| ScorexProject/Scorex | scorex-basics/src/main/scala/scorex/transaction/BlockChain.scala | Scala | cc0-1.0 | 1,033 |
package io.vamp.http_api
import akka.http.scaladsl.model.StatusCodes.{ NotFound, OK }
import akka.util.Timeout
import io.vamp.common.Namespace
import io.vamp.common.http.HttpApiDirectives
import io.vamp.operation.controller.MetricsController
trait MetricsRoute extends AbstractRoute with MetricsController {
this: HttpApiDirectives ⇒
def metricsRoutes(implicit namespace: Namespace, timeout: Timeout) = pathPrefix("metrics") {
get {
path("gateways" / Segment / Segment) { (gateway, metrics) ⇒
pathEndOrSingleSlash {
onSuccess(gatewayMetrics(gateway, metrics)) {
case Some(result) ⇒ respondWith(OK, result)
case _ ⇒ respondWith(NotFound, None)
}
}
} ~ path("gateways" / Segment / "routes" / Segment / Segment) { (gateway, route, metrics) ⇒
pathEndOrSingleSlash {
onSuccess(routeMetrics(gateway, route, metrics)) {
case Some(result) ⇒ respondWith(OK, result)
case _ ⇒ respondWith(NotFound, None)
}
}
} ~ path("deployments" / Segment / "clusters" / Segment / "ports" / Segment / Segment) { (deployment, cluster, port, metrics) ⇒
pathEndOrSingleSlash {
onSuccess(clusterMetrics(deployment, cluster, port, metrics)) {
case Some(result) ⇒ respondWith(OK, result)
case _ ⇒ respondWith(NotFound, None)
}
}
} ~ path("deployments" / Segment / "clusters" / Segment / "services" / Segment / "ports" / Segment / Segment) { (deployment, cluster, service, port, metrics) ⇒
pathEndOrSingleSlash {
onSuccess(serviceMetrics(deployment, cluster, service, port, metrics)) {
case Some(result) ⇒ respondWith(OK, result)
case _ ⇒ respondWith(NotFound, None)
}
}
} ~ path("deployments" / Segment / "clusters" / Segment / "services" / Segment / "instances" / Segment / "ports" / Segment / Segment) { (deployment, cluster, service, instance, port, metrics) ⇒
pathEndOrSingleSlash {
onSuccess(instanceMetrics(deployment, cluster, service, instance, port, metrics)) {
case Some(result) ⇒ respondWith(OK, result)
case _ ⇒ respondWith(NotFound, None)
}
}
}
}
}
}
| dragoslav/vamp | http_api/src/main/scala/io/vamp/http_api/MetricsRoute.scala | Scala | apache-2.0 | 2,373 |
// 1EC Graph Parser
// Copyright (c) University of California
// Copyright (c) Jonathan Kummerfeld
//
// This software is covered by a license. See the LICENSE.txt file in the
// top-level directory of this distribution or at
// https://github.com/jkkummerfeld/1ec-graph-parser for the full text of the
// license.
package edu.berkeley.nlp.graphparser
import scala.util.hashing.MurmurHash3
import java.lang.Long.{ rotateLeft => rotl64 }
import java.lang.Integer.{ rotateLeft => rotl }
/** A few useful functions for computing hash values.
*/
object Hash {
// Based on the public domain MurmurHash3_x64_128:
// https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
final val c1 : Long = 0x87c37b91114253d5L
final val c2 : Long = 0x4cf5ad432745937fL
@inline final def fmix64(oh: Long) = {
var h = oh ^ (oh >>> 33);
h *= 0xff51afd7ed558ccdL
h = h ^ (h >>> 33);
h *= 0xc4ceb9fe1a85ec53L
h ^ (h >>> 33);
}
@inline final def hashToLong(nums: Int*) : Long = {
val len : Long = nums.length * 8
var h1 : Long = 0L
var h2 : Long = 1L
// body
var i = 1
while (i < nums.length) {
var k1 : Long = nums(i - 1)
var k2 : Long = nums(i)
i += 2
k1 *= c1
k1 = rotl64(k1,31)
k1 *= c2
h1 = h1 ^ k1
h1 = rotl64(h1,27)
h1 += h2
h1 = h1*5+0x52dce729
k2 *= c2
k2 = rotl64(k2,33)
k2 *= c1
h2 = h2 ^ k2
h2 = rotl64(h2,31)
h2 += h1
h2 = h2*5+0x38495ab5
}
// tail
if ((nums.length & 1) == 1) {
var k1 : Long = nums.last
k1 *= c1
k1 = rotl64(k1,31)
k1 *= c2
h1 = h1 ^ k1
}
// finalization
h1 = h1 ^ len
h2 = h2 ^ len
h1 += h2
h2 += h1
h1 = fmix64(h1)
h2 = fmix64(h2)
h1 += h2
h2 += h1
h2
}
@inline final def hashLongsToLong(nums: Long*) : Long = {
val len : Long = nums.length * 16
var h1 : Long = 0L
var h2 : Long = 1L
// body
var i = 1
while (i < nums.length) {
var k1 : Long = nums(i) & 0xffffffffL
var k2 : Long = nums(i) >>> 32
i += 1
k1 *= c1
k1 = rotl64(k1,31)
k1 *= c2
h1 = h1 ^ k1
h1 = rotl64(h1,27)
h1 += h2
h1 = h1*5+0x52dce729
k2 *= c2
k2 = rotl64(k2,33)
k2 *= c1
h2 = h2 ^ k2
h2 = rotl64(h2,31)
h2 += h1
h2 = h2*5+0x38495ab5
}
// finalization
// TODO: Are these XORs correct? Seems very odd
h1 = h1 ^ len
h2 = h2 ^ len
h1 += h2
h2 += h1
h1 = fmix64(h1)
h2 = fmix64(h2)
h1 += h2
h2 += h1
h2
}
@inline final def hashLongToInt(num: Long) = {
val data1 : Int = (num & 0xffffffffL).toInt
val data2 : Int = (num >>> 32).toInt
// Mix data1
var k : Int = data1
k *= 0xcc9e2d51
k = rotl(k, 15)
k *= 0x1b873593
var h : Int = k
h = rotl(h, 13)
h = h * 5 + 0xe6546b64
// Mix data2
k = data2
k *= 0xcc9e2d51
k = rotl(k, 15)
k *= 0x1b873593
h ^= k
h = rotl(h, 13)
h = h * 5 + 0xe6546b64
// Finalize
h ^= 2
h ^= h >>> 16
h *= 0x85ebca6b
h ^= h >>> 13
h *= 0xc2b2ae35
h ^= h >>> 16
h
}
// Int hash, using scala built-ins
final val initHash = 83007 // == "Seq".hashCode
@inline final def progressHash(num: Int, curHash: Int) =
MurmurHash3.mix(curHash, num)
@inline final def finishHash(curHash: Int, curHashCount: Int) =
MurmurHash3.finalizeHash(curHash, curHashCount)
@inline final def hash(nums: Int*) = {
var curHash = initHash
var count = 0
while (count < nums.length) {
curHash = progressHash(nums(count), curHash)
count += 1
}
finishHash(curHash, count)
}
@inline final def hashN(num0: Int) = {
var curHash = initHash
curHash = progressHash(num0, curHash)
finishHash(curHash, 1)
}
@inline final def hashN(num0: Int, num1: Int) = {
var curHash = initHash
curHash = progressHash(num0, curHash)
curHash = progressHash(num1, curHash)
finishHash(curHash, 2)
}
@inline final def hashN(num0: Int, num1: Int, num2: Int) = {
var curHash = initHash
curHash = progressHash(num0, curHash)
curHash = progressHash(num1, curHash)
curHash = progressHash(num2, curHash)
finishHash(curHash, 3)
}
@inline final def hashN(num0: Int, num1: Int, num2: Int, num3: Int) = {
var curHash = initHash
curHash = progressHash(num0, curHash)
curHash = progressHash(num1, curHash)
curHash = progressHash(num2, curHash)
curHash = progressHash(num3, curHash)
finishHash(curHash, 4)
}
@inline final def hashN(
num0: Int, num1: Int, num2: Int, num3: Int, num4: Int
) = {
var curHash = initHash
curHash = progressHash(num0, curHash)
curHash = progressHash(num1, curHash)
curHash = progressHash(num2, curHash)
curHash = progressHash(num3, curHash)
curHash = progressHash(num4, curHash)
finishHash(curHash, 5)
}
@inline final def hashN(
num0: Int, num1: Int, num2: Int, num3: Int, num4: Int, num5: Int
) = {
var curHash = initHash
curHash = progressHash(num0, curHash)
curHash = progressHash(num1, curHash)
curHash = progressHash(num2, curHash)
curHash = progressHash(num3, curHash)
curHash = progressHash(num4, curHash)
curHash = progressHash(num5, curHash)
finishHash(curHash, 6)
}
@inline final def hashN(
num0: Int, num1: Int, num2: Int, num3: Int, num4: Int, num5: Int, num6: Int
) = {
var curHash = initHash
curHash = progressHash(num0, curHash)
curHash = progressHash(num1, curHash)
curHash = progressHash(num2, curHash)
curHash = progressHash(num3, curHash)
curHash = progressHash(num4, curHash)
curHash = progressHash(num5, curHash)
curHash = progressHash(num6, curHash)
finishHash(curHash, 7)
}
@inline final def hashN(
num0: Int, num1: Int, num2: Int, num3: Int, num4: Int, num5: Int, num6: Int,
num7: Int
) = {
var curHash = initHash
curHash = progressHash(num0, curHash)
curHash = progressHash(num1, curHash)
curHash = progressHash(num2, curHash)
curHash = progressHash(num3, curHash)
curHash = progressHash(num4, curHash)
curHash = progressHash(num5, curHash)
curHash = progressHash(num6, curHash)
curHash = progressHash(num7, curHash)
finishHash(curHash, 8)
}
@inline final def hashN(
num0: Int, num1: Int, num2: Int, num3: Int, num4: Int, num5: Int, num6: Int,
num7: Int, num8: Int
) = {
var curHash = initHash
curHash = progressHash(num0, curHash)
curHash = progressHash(num1, curHash)
curHash = progressHash(num2, curHash)
curHash = progressHash(num3, curHash)
curHash = progressHash(num4, curHash)
curHash = progressHash(num5, curHash)
curHash = progressHash(num6, curHash)
curHash = progressHash(num7, curHash)
curHash = progressHash(num8, curHash)
finishHash(curHash, 9)
}
// Convenience methods for combining (possibly with loss) multiple values in
// an int or long
// Of course, they are not fully represented
@inline final def combineInLong(key1: Int, key2: Int) = {
val lkey1 = key1.toLong & 0xffffffffL
val lkey2 = key2.toLong & 0xffffffffL
(lkey1 << 32) | lkey2
}
@inline final def combineInLong(key1: Int, key2: Int, key3: Int) = {
val lkey1 = key1.toLong & 0x1fffffL
val lkey2 = key2.toLong & 0x1fffffL
val lkey3 = key3.toLong & 0x1fffffL
(lkey1 << 42) | (lkey2 << 21) | lkey3
}
@inline final def combineInLong(key1: Int, key2: Int, key3: Int, key4: Int) = {
val lkey1 = key1.toLong & 0xffffL
val lkey2 = key2.toLong & 0xffffL
val lkey3 = key3.toLong & 0xffffL
val lkey4 = key4.toLong & 0xffffL
(lkey1 << 48) | (lkey2 << 32) | (lkey3 << 16) | lkey4
}
@inline final def combineInLong(key: (Int, Int)) = {
val lkey1 = key._1.toLong & 0xffffffffL
val lkey2 = key._2.toLong & 0xffffffffL
(lkey1 << 32) | lkey2
}
@inline final def combineInLong(key: (Int, Int, Int)) = {
val lkey1 = key._1.toLong & 0x1fffffL
val lkey2 = key._2.toLong & 0x1fffffL
val lkey3 = key._3.toLong & 0x1fffffL
(lkey1 << 42) | (lkey2 << 21) | lkey3
}
@inline final def combineInLong(key: (Int, Int, Int, Int)) = {
val lkey1 = key._1.toLong & 0xffffL
val lkey2 = key._2.toLong & 0xffffL
val lkey3 = key._3.toLong & 0xffffL
val lkey4 = key._4.toLong & 0xffffL
(lkey1 << 48) | (lkey2 << 32) | (lkey3 << 16) | lkey4
}
@inline final def combineInInt(key1: Int, key2: Int) = {
val lkey1 = key1 & 0xffff
val lkey2 = key2 & 0xffff
(lkey1 << 16) | lkey2
}
@inline final def combineInInt(key1: Int, key2: Int, key3: Int) = {
val lkey1 = key1 & 0x3ff
val lkey2 = key2 & 0x3ff
val lkey3 = key3 & 0x3ff
(lkey1 << 20) | (lkey2 << 10) | lkey3
}
@inline final def combineInInt(key1: Int, key2: Int, key3: Int, key4: Int) = {
val lkey1 = key1 & 0xff
val lkey2 = key2 & 0xff
val lkey3 = key3 & 0xff
val lkey4 = key4 & 0xff
(lkey1 << 24) | (lkey2 << 16) | (lkey3 << 8) | lkey4
}
@inline final def combineInInt(key: (Int, Int)) = {
val lkey1 = key._1 & 0xffff
val lkey2 = key._2 & 0xffff
(lkey1 << 16) | lkey2
}
@inline final def combineInInt(key: (Int, Int, Int)) = {
val lkey1 = key._1 & 0x3ff
val lkey2 = key._2 & 0x3ff
val lkey3 = key._3 & 0x3ff
(lkey1 << 20) | (lkey2 << 10) | lkey3
}
@inline final def combineInInt(key: (Int, Int, Int, Int)) = {
val lkey1 = key._1 & 0xff
val lkey2 = key._2 & 0xff
val lkey3 = key._3 & 0xff
val lkey4 = key._4 & 0xff
(lkey1 << 24) | (lkey2 << 16) | (lkey3 << 8) | lkey4
}
// Corresponding methods for reversing the process
@inline final def splitFromLong2(num: Long) = {
val num1 = (num >> 32).toInt
val num2 = (num & 0xffffffffL).toInt
(num1, num2)
}
@inline final def splitFromLong3(num: Long) = {
val num1 = (num >> 42).toInt
val num2 = ((num >> 21) & 0x1fffffL).toInt
val num3 = (num & 0x1fffffL).toInt
(num1, num2, num3)
}
@inline final def splitFromLong4(num: Long) = {
val num1 = (num >> 48).toInt
val num2 = ((num >> 32) & 0xffffL).toInt
val num3 = ((num >> 16) & 0xffffL).toInt
val num4 = (num & 0xffffL).toInt
(num1, num2, num3, num4)
}
@inline final def splitFromInt2(num: Int) = {
val num1 = (num >> 16).toInt
val num2 = (num & 0xffffL).toInt
(num1, num2)
}
@inline final def splitFromInt3(num: Int) = {
val num1 = (num >> 20).toInt
val num2 = ((num >> 10) & 0x3ffL).toInt
val num3 = (num & 0x3ffL).toInt
(num1, num2, num3)
}
@inline final def splitFromInt4(num: Int) = {
val num1 = (num >> 24).toInt
val num2 = ((num >> 16) & 0xffL).toInt
val num3 = ((num >> 8) & 0xffL).toInt
val num4 = (num & 0xffL).toInt
(num1, num2, num3, num4)
}
}
| jkkummerfeld/1ec-graph-parser | parser/src/main/scala/hash.scala | Scala | isc | 10,936 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v3
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600e.v3.retriever.CT600EBoxRetriever
case class E60(value: Option[Int]) extends CtBoxIdentifier("Income UK land and buildings – excluding any amounts included on form CT600") with CtOptionalInteger with Input
object E60 extends ValidatableBox[CT600EBoxRetriever] {
override def validate(boxRetriever: CT600EBoxRetriever): Set[CtValidation] = {
import boxRetriever._
(retrieveE100(), retrieveE60()) match {
case (E100(Some(e100)), e60) if e100 > 0 && e60.orZero <= 0 => Set(CtValidation(boxId = Some("E60"), errorMessageKey = "error.E60.must.be.positive.when.E100.positive"))
case _ => Set.empty
}
}
} | ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600e/v3/E60.scala | Scala | apache-2.0 | 1,323 |
package net.machinemuse.utils.render
import org.lwjgl.opengl._
import org.lwjgl.opengl.ARBShaderObjects._
import java.net.URL
import net.machinemuse.powersuits.common.CommonProxy
import scala.io.Source
import net.machinemuse.numina.general.MuseLogger
import java.nio.FloatBuffer
/**
* Author: MachineMuse (Claire Semple)
* Created: 3:31 AM, 5/16/13
*/
class ShaderProgram(vertSource: String, fragSource: String) {
var compiled: Boolean = false
val program: Int = ARBShaderObjects.glCreateProgramObjectARB
if (program == 0) MuseLogger.logError("No shader program ID")
val vert = mk(vertSource, ARBVertexShader.GL_VERTEX_SHADER_ARB)
val frag = mk(fragSource, ARBFragmentShader.GL_FRAGMENT_SHADER_ARB)
glAttachObjectARB(program, vert)
glAttachObjectARB(program, frag)
compile()
def compile() {
glLinkProgramARB(program)
if (glGetObjectParameteriARB(program, GL_OBJECT_LINK_STATUS_ARB) == GL11.GL_FALSE) {
MuseLogger.logError(getLogInfo(program))
}
glValidateProgramARB(program)
if (glGetObjectParameteriARB(program, GL_OBJECT_VALIDATE_STATUS_ARB) == GL11.GL_FALSE) {
MuseLogger.logError(getLogInfo(program))
}
compiled = true
}
def bind() {
if (compiled) {
ARBShaderObjects.glUseProgramObjectARB(program)
}
}
def unbind() {
if (compiled) {
ARBShaderObjects.glUseProgramObjectARB(0)
}
}
def setUniform2f(name: String, f1: Float, f2: Float) {
val pointer = GL20.glGetUniformLocation(program, name)
if (pointer < 0) MuseLogger.logError("UNABLE TO ACCESS FLOATS " + name + " !!!")
GL20.glUniform2f(pointer, f1, f2)
}
def setUniform3f(name: String, f1:Float, f2:Float, f3:Float) {
val pointer = GL20.glGetUniformLocation(program, name)
if (pointer < 0) MuseLogger.logError("UNABLE TO ACCESS FLOATS " + name + " !!!")
GL20.glUniform3f(pointer, f1, f2, f3)
}
def setUniformMatrix4(name: String, fb:FloatBuffer) {
val pointer = GL20.glGetUniformLocation(program, name)
if (pointer < 0) MuseLogger.logError("UNABLE TO ACCESS FLOATS " + name + " !!!")
GL20.glUniformMatrix4(pointer, false, fb)
}
def setTexUnit(name: String, i: Int) {
val pointer = GL20.glGetUniformLocation(program, name)
if (pointer < 0) MuseLogger.logError("UNABLE TO ACCESS TEX UNIT " + name + " !!!")
GL20.glUniform1i(pointer, i)
}
def getLogInfo(obj: Int): String = {
glGetInfoLogARB(obj, glGetObjectParameteriARB(obj, GL_OBJECT_INFO_LOG_LENGTH_ARB))
}
def getResource(url: String): URL = {
classOf[CommonProxy].getResource(url)
}
def mk(filename: String, shaderType: Int): Int = {
val resource: URL = getResource(filename)
val shader = glCreateShaderObjectARB(shaderType)
val shaderProg = Source.fromURL(resource).mkString
// MuseLogger.logDebug("Created shader object with ID " + shader + " and text: \n" + shaderProg)
glShaderSourceARB(shader, shaderProg)
glCompileShaderARB(shader)
if (ARBShaderObjects.glGetObjectParameteriARB(shader, GL_OBJECT_COMPILE_STATUS_ARB) == GL11.GL_FALSE) throw new RuntimeException("Error creating shader: " + getLogInfo(shader))
shader
}
}
| QMXTech/MachineMusePowersuits | src/main/scala/net/machinemuse/utils/render/ShaderProgram.scala | Scala | bsd-3-clause | 3,161 |
package bubblewrap
import java.net.URL
import java.util.concurrent.TimeUnit
import io.netty.handler.codec.http.HttpHeaders.Names._
import io.netty.handler.codec.http.HttpHeaders.Values._
import io.netty.handler.codec.http.cookie.ClientCookieDecoder
import io.netty.handler.ssl.util.InsecureTrustManagerFactory
import io.netty.handler.ssl.{SslContextBuilder, SslProvider}
import io.netty.util.HashedWheelTimer
import org.asynchttpclient.filter.ThrottleRequestFilter
import org.asynchttpclient.proxy.ProxyServer
import org.asynchttpclient.{DefaultAsyncHttpClient, DefaultAsyncHttpClientConfig, Realm}
import scala.collection.JavaConverters._
import scala.concurrent.{ExecutionContext, Promise}
class HttpClient(clientSettings: ClientSettings = ClientSettings()) {
val lenientSSLContext = SslContextBuilder.forClient().sslProvider(SslProvider.JDK).trustManager(InsecureTrustManagerFactory.INSTANCE).build()
val timer = new HashedWheelTimer(100, TimeUnit.MILLISECONDS, 100)
private val asyncHttpClientConfigBuilder: DefaultAsyncHttpClientConfig.Builder = new DefaultAsyncHttpClientConfig.Builder()
.setConnectTimeout(clientSettings.connectionTimeout)
.setRequestTimeout(clientSettings.requestTimeout)
.setReadTimeout(clientSettings.readTimeout)
.setSslContext(lenientSSLContext)
.setUseInsecureTrustManager(true)
.setMaxRequestRetry(clientSettings.retries)
.setFollowRedirect(false)
.setKeepAlive(clientSettings.keepAlive).setNettyTimer(timer)
if(clientSettings.maxTotalConnections > 0) asyncHttpClientConfigBuilder.addRequestFilter(new ThrottleRequestFilter(clientSettings.maxTotalConnections, clientSettings.requestThrottleTimeout))
val client = new DefaultAsyncHttpClient(asyncHttpClientConfigBuilder.build())
def get(url: WebUrl, config: CrawlConfig)(implicit ec: ExecutionContext) = {
val httpResponse = Promise[HttpResponse]()
val handler = new HttpHandler(config, url, httpResponse)
val request = client.prepareGet(url.toString)
config.proxy.foreach {
case PlainProxy(host, port) => request.setProxyServer(new ProxyServer.Builder(host, port).build())
case proxy@ProxyWithAuth(host, port, user, pass, scheme) => {
val proxyServerBuilder = new ProxyServer.Builder(host, port)
val realm = new Realm.Builder(user, pass).setScheme(scheme.toNingScheme).setUsePreemptiveAuth(true).build()
proxyServerBuilder.setRealm(realm)
request.setProxyServer(proxyServerBuilder.build())
request.setRealm(realm)
}
}
if (!config.customHeaders.headers.contains(ACCEPT_ENCODING))
request.addHeader(ACCEPT_ENCODING, GZIP)
request
.addHeader(USER_AGENT, config.userAgent)
.setCookies(HttpClient.cookies(config, url.toString).asJava)
config.customHeaders.headers.foreach(header => request.addHeader(header._1.trim(), header._2))
request.execute(handler)
val responseFuture = httpResponse.future
// HashedWheelTimer maintains NettyResponseFutures till timeout task reaps them.
// AsyncHandlers are designed in a way to just store code, but storing HTTPResponse inside handler will bloat up the heap.
responseFuture.onComplete(_ => handler.httpResponse = null)
responseFuture
}
def shutDown() = {
client.close()
}
}
object HttpClient {
val oneYear = 360l * 24 * 60 * 60 * 1000
def cookies(config: CrawlConfig, url: String) = {
config.cookies.cookies
.map(cookie => {
val _cookie = ClientCookieDecoder.LAX.decode(s"${cookie._1}=${cookie._2}")
_cookie.setHttpOnly(true)
_cookie.setSecure(url.startsWith("https"))
_cookie.setMaxAge(oneYear)
_cookie.setWrap(false)
_cookie.setDomain(host(url))
_cookie.setPath("/")
_cookie
})
.toList
}
def host(url: String) = new URL(url).getHost
}
| ind9/bubblewrap | src/main/scala/bubblewrap/HttpClient.scala | Scala | apache-2.0 | 3,847 |
import akka.actor.Actor
import akka.testkit.TestActorRef
import scala.concurrent.Future
import org.specs2.concurrent.{ ExecutionEnv => EE }
import reactivemongo.core.actors.StandardDBSystem
import reactivemongo.core.nodeset.{ Authenticate, Connection, Node }
import reactivemongo.core.protocol.Response
import reactivemongo.api.{
MongoConnection,
MongoConnectionOptions,
MongoDriver,
ReadPreference
}
class MonitorSpec extends org.specs2.mutable.Specification {
"Monitor" title
import reactivemongo.api.tests._
import Common.{ timeout, tryUntil }
"Monitor" should {
"manage a single node DB system" in { implicit ee: EE =>
val expectFactor = 3L
val opts = Common.DefaultOptions.copy(
nbChannelsPerNode = 3,
monitorRefreshMS = 3600000 // disable refreshAll/connectAll during test
)
withConAndSys(options = opts) { (con, sysRef) =>
@inline def dbsystem = sysRef.underlyingActor
waitIsAvailable(con, Common.failoverStrategy).map { _ =>
Thread.sleep(250)
val history1 = history(dbsystem)
val nodeset1 = nodeSet(dbsystem)
val primary1 = nodeset1.primary
val authCon1 = primary1.toVector.flatMap {
_.authenticatedConnections.subject
}
var chanId1 = -1
// #1
history1 aka "history #1" must not(beEmpty) and {
primary1 aka "primary #1" must beSome[Node]
} and {
authCon1.size aka "authed connections #1" must beLike[Int] {
case number => number must beGreaterThan(1) and (
number must beLessThanOrEqualTo(opts.nbChannelsPerNode)
)
}
} and { // #2
nodeset1.pick(ReadPreference.Primary).
aka("channel #1") must beSome[(Node, Connection)].like {
case (node, chan) =>
val primary2 = nodeSet(dbsystem).primary
val authCon2 = primary2.toVector.flatMap {
_.authenticatedConnections.subject
}
node.name aka "node #1" must_== Common.primaryHost and {
// After one node is picked up
primary2.map(_.name) aka "primary #2" must beSome(
primary1.get.name
)
} and {
// After one connection is picked up...
chanId1 = chan.channel.getId
authCon2.size aka "authed connections #2" must beLike[Int] {
case number => number must beGreaterThan(1) and (
number must beLessThanOrEqualTo(opts.nbChannelsPerNode)
)
}
}
}
} and { // #3
chanId1 aka "channel ID #1" must not(beEqualTo(-1)) and {
dbsystem.receive(channelClosed(chanId1)) must_== {}
} and {
val nodeSet3 = nodeSet(dbsystem)
val primary3 = nodeSet3.primary
primary3.map(_.name) aka "primary #3 (after ChannelClosed)" must (
beSome(primary1.get.name)
) and {
nodeSet3.pick(ReadPreference.Primary).
aka("channel #2") must beSome[(Node, Connection)].like {
case (_, chan) =>
val chanId2 = chan.channel.getId
chanId2 must not(beEqualTo(-1)) and (
chanId2 must not(beEqualTo(chanId1))
)
}
}
}
}
}
}.await(0, timeout * expectFactor)
}
"manage unhandled Actor exception and Akka Restart" in { implicit ee: EE =>
val expectFactor = 5L
val opts = Common.DefaultOptions.copy(
nbChannelsPerNode = 3,
monitorRefreshMS = 3600000 // disable refreshAll/connectAll during test
)
withConAndSys(options = opts) { (con, sysRef) =>
@inline def dbsystem = sysRef.underlyingActor
waitIsAvailable(con, Common.failoverStrategy).map { _ =>
Thread.sleep(250)
val nodeset1 = nodeSet(dbsystem)
val primary1 = nodeset1.primary
val authCon1 = primary1.toVector.flatMap {
_.authenticatedConnections.subject
}
// #1
primary1 aka "primary #1" must beSome[Node] and {
authCon1 aka "connections #1" must not(beEmpty)
} and {
nodeset1.pick(ReadPreference.Primary).
aka("channel #1") must beSome[(Node, Connection)]
} and { // #2
val respWithNulls = Response(null, null, null, null)
dbsystem.receive(respWithNulls).
aka("invalid response") must throwA[NullPointerException] and {
sysRef.tell(respWithNulls, Actor.noSender) must_== {}
}
} and { // #3
// Akka Restart on unhandled exception (see issue 558)
tryUntil[Traversable[(Long, String)]](
List(125, 250, 500, 1000, 2125)
)(
history(dbsystem), _.exists(_._2.startsWith("Restart("))
) aka "history #3" must beTrue
} and { // #4 (see issue 558)
tryUntil[Option[Node]](List(125, 250, 500, 1000, 2125))(
nodeSet(dbsystem).primary, _.isDefined
) aka "primary #4" must beTrue
} and { // #5
val nodeSet5 = nodeSet(dbsystem)
val primary5 = nodeSet5.primary
primary5.map(_.name) aka "primary #5 (after Akka Restart)" must (
beSome(primary1.get.name)
) and {
nodeSet5.pick(ReadPreference.Primary).
aka("channel #5") must beSome[(Node, Connection)]
}
}
}
}.await(0, timeout * expectFactor)
}
}
// ---
def withConAndSys[T](nodes: Seq[String] = Seq(Common.primaryHost), options: MongoConnectionOptions = Common.DefaultOptions, drv: MongoDriver = Common.driver, authentications: Seq[Authenticate] = Seq.empty[Authenticate])(f: (MongoConnection, TestActorRef[StandardDBSystem]) => Future[T])(implicit ee: EE): Future[T] = {
// See MongoDriver#connection
val supervisorName = s"Supervisor-${System identityHashCode ee}"
val poolName = s"Connection-${System identityHashCode f}"
implicit def sys: akka.actor.ActorSystem = drv.system
lazy val mongosystem = TestActorRef[StandardDBSystem](
standardDBSystem(
supervisorName, poolName, nodes, authentications, options
), poolName
)
def connection = addConnection(
drv,
poolName, nodes, options, mongosystem
).mapTo[MongoConnection]
connection.flatMap { con =>
f(con, mongosystem).andThen { case _ => con.close() }
}
}
}
| maxime-gautre/ReactiveMongo | driver/src/test/scala/MonitorSpec.scala | Scala | apache-2.0 | 6,908 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.lang.scala.subjects
import rx.lang.scala.Subject
/**
* Subject that emits the most recent item it has observed and all subsequent observed items to each subscribed
* `Observer`.
* <p>
* <img width="640" height="405" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/S.BehaviorSubject.png" alt="" />
* <p>
* @example
{{{
// observer will receive all events.
val subject = BehaviorSubject[String]("default")
subject.subscribe(observer)
subject.onNext("one")
subject.onNext("two")
subject.onNext("three")
// observer will receive the "one", "two" and "three" events, but not "zero"
val subject = BehaviorSubject[String]("default")
subject.onNext("zero")
subject.onNext("one")
subject.subscribe(observer)
subject.onNext("two")
subject.onNext("three")
// observer will receive only onCompleted
val subject = BehaviorSubject[String]("default")
subject.onNext("zero")
subject.onNext("one")
subject.onCompleted()
subject.subscribe(observer)
// observer will receive only onError
val subject = BehaviorSubject[String]("default")
subject.onNext("zero")
subject.onNext("one")
subject.onError(new RuntimeException("error"))
subject.subscribe(observer)
}}}
*/
object BehaviorSubject {
/**
* Creates a `BehaviorSubject` without a default item.
*
* @return the constructed `BehaviorSubject`
*/
def apply[T](): BehaviorSubject[T] = {
new BehaviorSubject[T](rx.subjects.BehaviorSubject.create())
}
/**
* Creates a `BehaviorSubject` that emits the last item it observed and all subsequent items to each
* `Observer` that subscribes to it.
*
* @param defaultValue the item that will be emitted first to any `Observer` as long as the
* `BehaviorSubject` has not yet observed any items from its source `Observable`
* @return the constructed `BehaviorSubject`
*/
def apply[T](defaultValue: T): BehaviorSubject[T] = {
new BehaviorSubject[T](rx.subjects.BehaviorSubject.create(defaultValue))
}
}
class BehaviorSubject[T] private[scala] (val asJavaSubject: rx.subjects.BehaviorSubject[T]) extends Subject[T] {}
| joohnnie/RxScala | src/main/scala/rx/lang/scala/subjects/BehaviorSubject.scala | Scala | apache-2.0 | 2,747 |
/*
* Copyright (c) 2015-2016 Joseph Earl & contributors.
* All rights reserved.
*
* Copyright (c) 2010-2014 Joachim Hofer & contributors
* All rights reserved.
*
* This program and the accompanying materials are made available under the terms of the Eclipse
* Public License v1.0 which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html.
*/
package uk.co.josephearl.sbt.findbugs
import java.io.File
case class FindBugsXSLTTransformation(xslt: File, output: File)
| lenioapp/sbt-findbugs-plugin | src/main/scala/uk/co/josephearl/sbt/findbugs/FindBugsXSLTTransformation.scala | Scala | epl-1.0 | 519 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms
import org.apache.commons.pool.BasePoolableObjectFactory
import org.orbeon.exception.OrbeonFormatter
import org.orbeon.oxf.util._
import java.io.ByteArrayInputStream
import java.io.ByteArrayOutputStream
import java.io.OutputStream
import java.util.zip.Deflater
import java.util.zip.GZIPInputStream
import java.util.zip.GZIPOutputStream
import scala.util.control.NonFatal
object XFormsCompressor extends Logging {
private implicit val Logger = Loggers.getIndentedLogger("utils")
// Use a Deflater pool as creating deflaters is expensive
private val deflaterPool = new SoftReferenceObjectPool(new DeflaterPoolableObjectFactory)
private val BUFFER_SIZE = 1024 * 8
private val TRAILER_SIZE = 8
def compressBytes(bytesToEncode: Array[Byte], level: Int) = {
val deflater = deflaterPool.borrowObject
try {
deflater.setLevel(level)
val os = new ByteArrayOutputStream
val gzipOS = new DeflaterGZIPOutputStream(deflater, os, BUFFER_SIZE)
gzipOS.write(bytesToEncode)
gzipOS.close()
os.toByteArray
} finally {
deflaterPool.returnObject(deflater)
}
}
// Compress using BEST_SPEED as serializing state quickly has been determined to be more important than saving extra
// memory. Even this way compression typically is more than 10X.
def compressBytes(bytesToEncode: Array[Byte]): Array[Byte] = compressBytes(bytesToEncode, Deflater.BEST_SPEED)
// Example of effective compression ratios and speeds for XML inputs:
//
// Sizes in bytes:
//
// Input | SPEED | DEFAULT | COMPRESSION
// ----------+---------+---------+------------
// 1,485,020 | 130,366 | 90,323 | 85,118
// 955,373 | 117,509 | 78,538 | 76,461
// 511,776 | 49,751 | 35,309 | 33,858
// 178,796 | 17,321 | 12,234 | 11,973
//
// Times in ms per compression:
//
// Input | SPEED | DEFAULT | COMPRESSION
// ----------+---------+---------+------------
// 1,485,020 | 15 | 37 | 122
// 955,373 | 12 | 31 | 108
// 511,776 | 6 | 13 | 42
// 178,796 | 2 | 5 | 12
def compressBytesMeasurePerformance(bytesToEncode: Array[Byte]): Array[Byte] = {
val settings = Map(
Deflater.BEST_SPEED → "BEST_SPEED",
Deflater.DEFAULT_COMPRESSION → "DEFAULT_COMPRESSION",
Deflater.BEST_COMPRESSION → "BEST_COMPRESSION"
)
for ((level, description) ← settings)
withDebug(description) {
for (v ← 1 to 100)
compressBytes(bytesToEncode, level)
}
compressBytes(bytesToEncode, Deflater.BEST_SPEED)
}
def uncompressBytes(bytesToDecode: Array[Byte]) = {
val is = new GZIPInputStream(new ByteArrayInputStream(bytesToDecode))
val os = new ByteArrayOutputStream(BUFFER_SIZE)
NetUtils.copyStream(is, os)
os.toByteArray
}
private class DeflaterPoolableObjectFactory extends BasePoolableObjectFactory[Deflater] {
def makeObject = {
debug("creating new Deflater")
// Use BEST_SPEED as profiler shows that DEFAULT_COMPRESSION is slower
new Deflater(Deflater.BEST_SPEED, true)
}
override def passivateObject(o: Deflater): Unit =
try o.reset()
catch {
case NonFatal(t) ⇒
error("exception while passivating Deflater", Seq("throwable" → OrbeonFormatter.format(t)))
}
}
// GZIPOutputStream which uses a custom Deflater
private class DeflaterGZIPOutputStream(deflater: Deflater, out: OutputStream, size: Int) extends GZIPOutputStream(out, size) {
// Super creates deflater, but doesn't yet do anything with it so we override it here
`def` = deflater
private var closed = false
// Override because default implementation calls def.close()
override def close() =
if (!closed) {
finish()
out.close()
closed = true
}
// Override because IBM implementation calls def.end()
override def finish(): Unit = {
def writeTrailer(buf: Array[Byte], offset: Int): Unit = {
def writeInt(i: Int, offset: Int): Unit = {
def writeShort(s: Int, offset: Int): Unit = {
buf(offset) = (s & 0xff).asInstanceOf[Byte]
buf(offset + 1) = ((s >> 8) & 0xff).asInstanceOf[Byte]
}
writeShort(i & 0xffff, offset)
writeShort((i >> 16) & 0xffff, offset + 2)
}
writeInt(crc.getValue.toInt, offset) // CRC-32 of uncompr. data
writeInt(deflater.getTotalIn, offset + 4) // Number of uncompr. bytes
}
if (!deflater.finished) {
deflater.finish()
while (!deflater.finished) {
var len = deflater.deflate(buf, 0, buf.length)
if (deflater.finished && len <= buf.length - TRAILER_SIZE) {
// last deflater buffer. Fit trailer at the end
writeTrailer(buf, len)
len = len + TRAILER_SIZE
out.write(buf, 0, len)
return
}
if (len > 0)
out.write(buf, 0, len)
}
// if we can't fit the trailer at the end of the last
// deflater buffer, we write it separately
val trailer = new Array[Byte](TRAILER_SIZE)
writeTrailer(trailer, 0)
out.write(trailer)
}
}
}
}
| ajw625/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/XFormsCompressor.scala | Scala | lgpl-2.1 | 6,370 |
package net.fwbrasil.smirror
import scala.reflect.runtime.currentMirror
class SClassInnerSpecTestClass {
class InnerClass(val m1: String)
def newInner = new InnerClass("1")
}
class SClassInnerSpec extends SMirrorSpec {
val instance = new SClassInnerSpecTestClass
val inner = instance.newInner
"A inner SClass" should "invoke constructor" in
test[SClassInnerSpecTestClass#InnerClass] { (sClass, jClass) =>
sClass.constructors.head.invoke(instance, "2").m1 should
equal("2")
}
"A inner SClass" should "return val" in
test[SClassInnerSpecTestClass#InnerClass] { (sClass, jClass) =>
sClass.vals.head.get(inner) should
equal("1")
}
} | fwbrasil/smirror | src/test/scala/net/fwbrasil/smirror/SClassInnerSpec.scala | Scala | lgpl-2.1 | 747 |
package com.btcontract.wallet
import android.os.Bundle
import android.view.View
import scala.util.Try
class EmergencyActivity extends BaseActivity {
override def START(state: Bundle): Unit = setContentView(R.layout.activity_emergency)
def shareErrorReport(view: View): Unit = Try(getIntent getStringExtra UncaughtHandler.ERROR_REPORT).foreach(share)
}
| btcontract/wallet | app/src/main/java/com/btcontract/wallet/EmergencyActivity.scala | Scala | apache-2.0 | 359 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "comicserver"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
jdbc,
anorm,
"com.top10" %% "scala-redis-client" % "1.13.0",
"org.scalaj" % "scalaj-http_2.10" % "0.3.9" exclude("junit", "junit"),
"com.github.nscala-time" %% "nscala-time" % "0.4.2"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// Add your own project settings here
// resolvers += "Sonatype" at "https:/u/oss.sonatype.org/service/local/staging/deploy/maven2/"
resolvers += "Sonatype OSS Releases" at "http://oss.sonatype.org/content/repositories/releases/"
)
}
| kypeli/scala-comic-server | project/Build.scala | Scala | gpl-2.0 | 782 |
package toguru.api
import toguru.impl.{All, AlwaysOffCondition, AlwaysOnCondition, UuidDistributionCondition, Attribute => Att}
trait Condition {
def applies(clientInfo: ClientInfo): Boolean
}
object Condition {
val On: Condition = AlwaysOnCondition
val Off: Condition = AlwaysOffCondition
def UuidRange(range: Range): Condition =
UuidDistributionCondition(List(range), UuidDistributionCondition.defaultUuidToIntProjection)
def Attribute(name: String, values: String*): Condition = Att(name, values)
def apply(conditions: Condition*): Condition =
conditions match {
case Nil => Condition.On
case Seq(c) => c
case cs => All(cs.toSet)
}
}
| AutoScout24/toguru-scala-client | core/src/main/scala/toguru/api/Condition.scala | Scala | mit | 694 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.params.exceptions
case class TypeConversionException(source: Any, targetTypeName: String)
extends ValidationException(s"Cannot convert ${source.getClass} to $targetTypeName.")
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/params/exceptions/TypeConversionException.scala | Scala | apache-2.0 | 808 |
object TypeClassDemo extends App {
class K {
override def equals (that: Any): Boolean = false } // DTTAH
val o = new K()
println(( o == o, o != o, o eq o )) // (false,true,true)
object K extends Ordering[K] {
override def compare (x: K, y: K): Int = -601 } // 1.6 bits used
val k = o
println(( o < k, k < o )) // (true,true)
println( for (x <- -1 to 1; y <- 4 to 6) yield x * y )
// Vector(-4, -5, -6, 0, 0, 0, 4, 5, 6)
}
| sm-haskell-users-group/sm-haskell-users-group.github.io | srcs/TypeClassDemo/TypeClassDemo.scala | Scala | mit | 455 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.linker.testutils
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits.global
import org.scalajs.linker.StandardImpl
import org.scalajs.linker.interface.IRFile
object TestIRRepo {
val minilib: Future[Seq[IRFile]] = load(StdlibHolder.minilib)
val fulllib: Future[Seq[IRFile]] = load(StdlibHolder.fulllib)
val empty: Future[Seq[IRFile]] = Future.successful(Nil)
val previousLibs: Map[String, Future[Seq[IRFile]]] =
StdlibHolder.previousLibs.map(x => x._1 -> load(x._2))
private def load(stdlibPath: String) = {
val globalIRCache = StandardImpl.irFileCache()
Platform.loadJar(stdlibPath)
.flatMap(globalIRCache.newCache.cached _)
}
}
| scala-js/scala-js | linker/shared/src/test/scala/org/scalajs/linker/testutils/TestIRRepo.scala | Scala | apache-2.0 | 982 |
package com.twitter.finagle.http
import com.twitter.util.{Await, Promise, Future}
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.{Service, ServiceFactory, Stack}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TlsFilterTest extends FunSuite {
import Version._
import Method._
def svc(p: Promise[Request]) = Service.mk { (req: Request) =>
p.setValue(req)
Future.never
}
test("filter") {
val host = "test.host"
val tls = new TlsFilter(host)
val req = Request(Http11, Get, "/")
val p = new Promise[Request]
(tls andThen svc(p))(req)
assert(Await.result(p).headerMap.get("Host") === Some(host))
}
test("module") {
val host = "test.host"
val p = new Promise[Request]
val stk = TlsFilter.module.toStack(
Stack.Leaf(TlsFilter.role, ServiceFactory.const(svc(p))))
val fac = stk.make(Stack.Params.empty + Transporter.TLSHostname(Some(host)))
Await.result(fac())(Request(Http11, Get, "/"))
assert(Await.result(p).headerMap.get("Host") === Some(host))
}
}
| zfy0701/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/TlsFilterTest.scala | Scala | apache-2.0 | 1,144 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.characters.player.perks.skills
import com.anathema_roguelike.entities.characters.perks.{PassivePerk, PassthroughPerk}
class Attunement() extends Skill[PassivePerk] {
override protected def createPerk: PassivePerk = ??? // TODO Auto-generated method stub
} | carlminden/anathema-roguelike | src/com/anathema_roguelike/entities/characters/player/perks/skills/Attunement.scala | Scala | gpl-3.0 | 1,154 |
/*
* Copyright 2015 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.communication.actors
import akka.actor.{Actor, ActorRef}
import akka.util.ByteString
import com.ibm.spark.communication.{ZMQMessage, SocketManager}
import com.ibm.spark.utils.LogLike
import org.zeromq.ZMQ
/**
* Represents an actor containing a dealer socket.
*
* @param connection The address to connect to
* @param listener The actor to send incoming messages back to
*/
class DealerSocketActor(connection: String, listener: ActorRef)
extends Actor with LogLike
{
logger.debug(s"Initializing dealer socket actor for $connection")
private val manager: SocketManager = new SocketManager
private val socket = manager.newDealerSocket(connection, (message: Seq[String]) => {
listener ! ZMQMessage(message.map(ByteString.apply): _*)
})
override def postStop(): Unit = {
manager.closeSocket(socket)
}
override def receive: Actor.Receive = {
case zmqMessage: ZMQMessage =>
val frames = zmqMessage.frames.map(byteString =>
new String(byteString.toArray, ZMQ.CHARSET))
socket.send(frames: _*)
}
}
| yeghishe/spark-kernel | communication/src/main/scala/com/ibm/spark/communication/actors/DealerSocketActor.scala | Scala | apache-2.0 | 1,671 |
package org.garage.guru.domain
import scala.util.Try
trait Repository[FreeLot, TakenLot, Vehicle, VehicleId] {
def findFreeLot(vehicle: Vehicle): Try[FreeLot]
def findTakenLot(vehicleId: VehicleId): Try[TakenLot]
def save[L <: ParkingLot](parkingLot: L): Try[L]
def freeLots(): Try[FreeParkingLots]
}
| ddd-fun/garage-guru-fun | src/main/scala/org/garage/guru/domain/Repository.scala | Scala | apache-2.0 | 323 |
package com.example
object DeviceManager {
final case class RequestTrackDevice(groupId: String, deviceId: String)
case object DeviceRegistered
}
class DeviceManager {
}
| matija94/show-me-the-code | iot_device_manager/src/main/scala/com/example/DeviceManager.scala | Scala | mit | 176 |
// Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.pantsbuild.testproject.non_exports;
class B extends A {
def foo_b() {
}
}
| pombredanne/pants | testprojects/tests/scala/org/pantsbuild/testproject/non_exports/B.scala | Scala | apache-2.0 | 225 |
package mesosphere.marathon
package raml
import org.apache.mesos.{ Protos => Mesos }
import scala.collection.immutable.Map
trait EnvVarConversion {
implicit val envVarRamlWrites: Writes[Map[String, state.EnvVarValue], Map[String, EnvVarValueOrSecret]] =
Writes {
_.map {
case (name, state.EnvVarString(v)) => name -> EnvVarValue(v)
case (name, state.EnvVarSecretRef(v)) => name -> EnvVarSecretRef(v)
}
}
implicit val envVarReads: Reads[Map[String, EnvVarValueOrSecret], Map[String, state.EnvVarValue]] =
Reads {
_.map {
case (name, EnvVarValue(v)) => name -> state.EnvVarString(v)
case (name, EnvVarSecretRef(v)) => name -> state.EnvVarSecretRef(v)
}
}
implicit val envProtoRamlWrites: Writes[(Seq[Mesos.Environment.Variable], Seq[Protos.EnvVarReference]), Map[String, EnvVarValueOrSecret]] =
Writes {
case (env, refs) =>
val vanillaEnv: Map[String, EnvVarValueOrSecret] = env.map { item =>
item.getName -> EnvVarValue(item.getValue)
}(collection.breakOut)
vanillaEnv ++ refs.withFilter(_.getType == Protos.EnvVarReference.Type.SECRET).map { secretRef =>
secretRef.getName -> EnvVarSecretRef(secretRef.getSecretRef.getSecretId)
}
}
}
object EnvVarConversion extends EnvVarConversion
| natemurthy/marathon | src/main/scala/mesosphere/marathon/raml/EnvVarConversion.scala | Scala | apache-2.0 | 1,334 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.BindReferences.bindReferences
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.BooleanType
import org.apache.spark.util.collection.BitSet
/**
* Performs a sort merge join of two child relations.
*/
case class SortMergeJoinExec(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
joinType: JoinType,
condition: Option[Expression],
left: SparkPlan,
right: SparkPlan,
isSkewJoin: Boolean = false) extends ShuffledJoin {
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override def outputOrdering: Seq[SortOrder] = joinType match {
// For inner join, orders of both sides keys should be kept.
case _: InnerLike =>
val leftKeyOrdering = getKeyOrdering(leftKeys, left.outputOrdering)
val rightKeyOrdering = getKeyOrdering(rightKeys, right.outputOrdering)
leftKeyOrdering.zip(rightKeyOrdering).map { case (lKey, rKey) =>
// Also add expressions from right side sort order
val sameOrderExpressions = ExpressionSet(lKey.sameOrderExpressions ++ rKey.children)
SortOrder(lKey.child, Ascending, sameOrderExpressions.toSeq)
}
// For left and right outer joins, the output is ordered by the streamed input's join keys.
case LeftOuter => getKeyOrdering(leftKeys, left.outputOrdering)
case RightOuter => getKeyOrdering(rightKeys, right.outputOrdering)
// There are null rows in both streams, so there is no order.
case FullOuter => Nil
case LeftExistence(_) => getKeyOrdering(leftKeys, left.outputOrdering)
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
/**
* The utility method to get output ordering for left or right side of the join.
*
* Returns the required ordering for left or right child if childOutputOrdering does not
* satisfy the required ordering; otherwise, which means the child does not need to be sorted
* again, returns the required ordering for this child with extra "sameOrderExpressions" from
* the child's outputOrdering.
*/
private def getKeyOrdering(keys: Seq[Expression], childOutputOrdering: Seq[SortOrder])
: Seq[SortOrder] = {
val requiredOrdering = requiredOrders(keys)
if (SortOrder.orderingSatisfies(childOutputOrdering, requiredOrdering)) {
keys.zip(childOutputOrdering).map { case (key, childOrder) =>
val sameOrderExpressionsSet = ExpressionSet(childOrder.children) - key
SortOrder(key, Ascending, sameOrderExpressionsSet.toSeq)
}
} else {
requiredOrdering
}
}
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
requiredOrders(leftKeys) :: requiredOrders(rightKeys) :: Nil
private def requiredOrders(keys: Seq[Expression]): Seq[SortOrder] = {
// This must be ascending in order to agree with the `keyOrdering` defined in `doExecute()`.
keys.map(SortOrder(_, Ascending))
}
private def createLeftKeyGenerator(): Projection =
UnsafeProjection.create(leftKeys, left.output)
private def createRightKeyGenerator(): Projection =
UnsafeProjection.create(rightKeys, right.output)
private def getSpillThreshold: Int = {
conf.sortMergeJoinExecBufferSpillThreshold
}
// Flag to only buffer first matched row, to avoid buffering unnecessary rows.
private val onlyBufferFirstMatchedRow = (joinType, condition) match {
case (LeftExistence(_), None) => true
case _ => false
}
private def getInMemoryThreshold: Int = {
if (onlyBufferFirstMatchedRow) {
1
} else {
conf.sortMergeJoinExecBufferInMemoryThreshold
}
}
protected override def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
val spillThreshold = getSpillThreshold
val inMemoryThreshold = getInMemoryThreshold
left.execute().zipPartitions(right.execute()) { (leftIter, rightIter) =>
val boundCondition: (InternalRow) => Boolean = {
condition.map { cond =>
Predicate.create(cond, left.output ++ right.output).eval _
}.getOrElse {
(r: InternalRow) => true
}
}
// An ordering that can be used to compare keys from both sides.
val keyOrdering = RowOrdering.createNaturalAscendingOrdering(leftKeys.map(_.dataType))
val resultProj: InternalRow => InternalRow = UnsafeProjection.create(output, output)
joinType match {
case _: InnerLike =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] var currentRightMatches: ExternalAppendOnlyUnsafeRowArray = _
private[this] var rightMatchesIterator: Iterator[UnsafeRow] = null
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources
)
private[this] val joinRow = new JoinedRow
if (smjScanner.findNextInnerJoinRows()) {
currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
rightMatchesIterator = currentRightMatches.generateIterator()
}
override def advanceNext(): Boolean = {
while (rightMatchesIterator != null) {
if (!rightMatchesIterator.hasNext) {
if (smjScanner.findNextInnerJoinRows()) {
currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
rightMatchesIterator = currentRightMatches.generateIterator()
} else {
currentRightMatches = null
currentLeftRow = null
rightMatchesIterator = null
return false
}
}
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
numOutputRows += 1
return true
}
}
false
}
override def getRow: InternalRow = resultProj(joinRow)
}.toScala
case LeftOuter =>
val smjScanner = new SortMergeJoinScanner(
streamedKeyGenerator = createLeftKeyGenerator(),
bufferedKeyGenerator = createRightKeyGenerator(),
keyOrdering,
streamedIter = RowIterator.fromScala(leftIter),
bufferedIter = RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources
)
val rightNullRow = new GenericInternalRow(right.output.length)
new LeftOuterIterator(
smjScanner, rightNullRow, boundCondition, resultProj, numOutputRows).toScala
case RightOuter =>
val smjScanner = new SortMergeJoinScanner(
streamedKeyGenerator = createRightKeyGenerator(),
bufferedKeyGenerator = createLeftKeyGenerator(),
keyOrdering,
streamedIter = RowIterator.fromScala(rightIter),
bufferedIter = RowIterator.fromScala(leftIter),
inMemoryThreshold,
spillThreshold,
cleanupResources
)
val leftNullRow = new GenericInternalRow(left.output.length)
new RightOuterIterator(
smjScanner, leftNullRow, boundCondition, resultProj, numOutputRows).toScala
case FullOuter =>
val leftNullRow = new GenericInternalRow(left.output.length)
val rightNullRow = new GenericInternalRow(right.output.length)
val smjScanner = new SortMergeFullOuterJoinScanner(
leftKeyGenerator = createLeftKeyGenerator(),
rightKeyGenerator = createRightKeyGenerator(),
keyOrdering,
leftIter = RowIterator.fromScala(leftIter),
rightIter = RowIterator.fromScala(rightIter),
boundCondition,
leftNullRow,
rightNullRow)
new FullOuterIterator(
smjScanner,
resultProj,
numOutputRows).toScala
case LeftSemi =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources,
onlyBufferFirstMatchedRow
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextInnerJoinRows()) {
val currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
if (currentRightMatches != null && currentRightMatches.length > 0) {
val rightMatchesIterator = currentRightMatches.generateIterator()
while (rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
numOutputRows += 1
return true
}
}
}
}
false
}
override def getRow: InternalRow = currentLeftRow
}.toScala
case LeftAnti =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources,
onlyBufferFirstMatchedRow
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextOuterJoinRows()) {
currentLeftRow = smjScanner.getStreamedRow
val currentRightMatches = smjScanner.getBufferedMatches
if (currentRightMatches == null || currentRightMatches.length == 0) {
numOutputRows += 1
return true
}
var found = false
val rightMatchesIterator = currentRightMatches.generateIterator()
while (!found && rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
found = true
}
}
if (!found) {
numOutputRows += 1
return true
}
}
false
}
override def getRow: InternalRow = currentLeftRow
}.toScala
case j: ExistenceJoin =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val result: InternalRow = new GenericInternalRow(Array[Any](null))
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold,
cleanupResources,
onlyBufferFirstMatchedRow
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextOuterJoinRows()) {
currentLeftRow = smjScanner.getStreamedRow
val currentRightMatches = smjScanner.getBufferedMatches
var found = false
if (currentRightMatches != null && currentRightMatches.length > 0) {
val rightMatchesIterator = currentRightMatches.generateIterator()
while (!found && rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
found = true
}
}
}
result.setBoolean(0, found)
numOutputRows += 1
return true
}
false
}
override def getRow: InternalRow = resultProj(joinRow(currentLeftRow, result))
}.toScala
case x =>
throw new IllegalArgumentException(
s"SortMergeJoin should not take $x as the JoinType")
}
}
}
private lazy val ((streamedPlan, streamedKeys), (bufferedPlan, bufferedKeys)) = joinType match {
case _: InnerLike | LeftOuter | FullOuter | LeftExistence(_) =>
((left, leftKeys), (right, rightKeys))
case RightOuter => ((right, rightKeys), (left, leftKeys))
case x =>
throw new IllegalArgumentException(
s"SortMergeJoin.streamedPlan/bufferedPlan should not take $x as the JoinType")
}
private lazy val streamedOutput = streamedPlan.output
private lazy val bufferedOutput = bufferedPlan.output
override def supportCodegen: Boolean = joinType match {
case FullOuter => conf.getConf(SQLConf.ENABLE_FULL_OUTER_SORT_MERGE_JOIN_CODEGEN)
case _: ExistenceJoin => conf.getConf(SQLConf.ENABLE_EXISTENCE_SORT_MERGE_JOIN_CODEGEN)
case _ => true
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
streamedPlan.execute() :: bufferedPlan.execute() :: Nil
}
private def createJoinKey(
ctx: CodegenContext,
row: String,
keys: Seq[Expression],
input: Seq[Attribute]): Seq[ExprCode] = {
ctx.INPUT_ROW = row
ctx.currentVars = null
bindReferences(keys, input).map(_.genCode(ctx))
}
private def copyKeys(ctx: CodegenContext, vars: Seq[ExprCode]): Seq[ExprCode] = {
vars.zipWithIndex.map { case (ev, i) =>
ctx.addBufferedState(leftKeys(i).dataType, "value", ev.value)
}
}
private def genComparison(ctx: CodegenContext, a: Seq[ExprCode], b: Seq[ExprCode]): String = {
val comparisons = a.zip(b).zipWithIndex.map { case ((l, r), i) =>
s"""
|if (comp == 0) {
| comp = ${ctx.genComp(leftKeys(i).dataType, l.value, r.value)};
|}
""".stripMargin.trim
}
s"""
|comp = 0;
|${comparisons.mkString("\\n")}
""".stripMargin
}
/**
* Generate a function to scan both sides to find a match, returns:
* 1. the function name
* 2. the term for matched one row from streamed side
* 3. the term for buffered rows from buffered side
*/
private def genScanner(ctx: CodegenContext): (String, String, String) = {
// Create class member for next row from both sides.
// Inline mutable state since not many join operations in a task
val streamedRow = ctx.addMutableState("InternalRow", "streamedRow", forceInline = true)
val bufferedRow = ctx.addMutableState("InternalRow", "bufferedRow", forceInline = true)
// Create variables for join keys from both sides.
val streamedKeyVars = createJoinKey(ctx, streamedRow, streamedKeys, streamedOutput)
val streamedAnyNull = streamedKeyVars.map(_.isNull).mkString(" || ")
val bufferedKeyTmpVars = createJoinKey(ctx, bufferedRow, bufferedKeys, bufferedOutput)
val bufferedAnyNull = bufferedKeyTmpVars.map(_.isNull).mkString(" || ")
// Copy the buffered key as class members so they could be used in next function call.
val bufferedKeyVars = copyKeys(ctx, bufferedKeyTmpVars)
// A list to hold all matched rows from buffered side.
val clsName = classOf[ExternalAppendOnlyUnsafeRowArray].getName
val spillThreshold = getSpillThreshold
val inMemoryThreshold = getInMemoryThreshold
// Inline mutable state since not many join operations in a task
val matches = ctx.addMutableState(clsName, "matches",
v => s"$v = new $clsName($inMemoryThreshold, $spillThreshold);", forceInline = true)
// Copy the streamed keys as class members so they could be used in next function call.
val matchedKeyVars = copyKeys(ctx, streamedKeyVars)
// Handle the case when streamed rows has any NULL keys.
val handleStreamedAnyNull = joinType match {
case _: InnerLike | LeftSemi =>
// Skip streamed row.
s"""
|$streamedRow = null;
|continue;
""".stripMargin
case LeftOuter | RightOuter | LeftAnti | ExistenceJoin(_) =>
// Eagerly return streamed row. Only call `matches.clear()` when `matches.isEmpty()` is
// false, to reduce unnecessary computation.
s"""
|if (!$matches.isEmpty()) {
| $matches.clear();
|}
|return false;
""".stripMargin
case x =>
throw new IllegalArgumentException(
s"SortMergeJoin.genScanner should not take $x as the JoinType")
}
// Handle the case when streamed keys has no match with buffered side.
val handleStreamedWithoutMatch = joinType match {
case _: InnerLike | LeftSemi =>
// Skip streamed row.
s"$streamedRow = null;"
case LeftOuter | RightOuter | LeftAnti | ExistenceJoin(_) =>
// Eagerly return with streamed row.
"return false;"
case x =>
throw new IllegalArgumentException(
s"SortMergeJoin.genScanner should not take $x as the JoinType")
}
val addRowToBuffer =
if (onlyBufferFirstMatchedRow) {
s"""
|if ($matches.isEmpty()) {
| $matches.add((UnsafeRow) $bufferedRow);
|}
""".stripMargin
} else {
s"$matches.add((UnsafeRow) $bufferedRow);"
}
// Generate a function to scan both streamed and buffered sides to find a match.
// Return whether a match is found.
//
// `streamedIter`: the iterator for streamed side.
// `bufferedIter`: the iterator for buffered side.
// `streamedRow`: the current row from streamed side.
// When `streamedIter` is empty, `streamedRow` is null.
// `matches`: the rows from buffered side already matched with `streamedRow`.
// `matches` is buffered and reused for all `streamedRow`s having same join keys.
// If there is no match with `streamedRow`, `matches` is empty.
// `bufferedRow`: the current matched row from buffered side.
//
// The function has the following step:
// - Step 1: Find the next `streamedRow` with non-null join keys.
// For `streamedRow` with null join keys (`handleStreamedAnyNull`):
// 1. Inner and Left Semi join: skip the row. `matches` will be cleared later when
// hitting the next `streamedRow` with non-null join
// keys.
// 2. Left/Right Outer, Left Anti and Existence join: clear the previous `matches`
// if needed, keep the row, and
// return false.
//
// - Step 2: Find the `matches` from buffered side having same join keys with `streamedRow`.
// Clear `matches` if we hit a new `streamedRow`, as we need to find new matches.
// Use `bufferedRow` to iterate buffered side to put all matched rows into
// `matches` (`addRowToBuffer`). Return true when getting all matched rows.
// For `streamedRow` without `matches` (`handleStreamedWithoutMatch`):
// 1. Inner and Left Semi join: skip the row.
// 2. Left/Right Outer, Left Anti and Existence join: keep the row and return false
// (with `matches` being empty).
val findNextJoinRowsFuncName = ctx.freshName("findNextJoinRows")
ctx.addNewFunction(findNextJoinRowsFuncName,
s"""
|private boolean $findNextJoinRowsFuncName(
| scala.collection.Iterator streamedIter,
| scala.collection.Iterator bufferedIter) {
| $streamedRow = null;
| int comp = 0;
| while ($streamedRow == null) {
| if (!streamedIter.hasNext()) return false;
| $streamedRow = (InternalRow) streamedIter.next();
| ${streamedKeyVars.map(_.code).mkString("\\n")}
| if ($streamedAnyNull) {
| $handleStreamedAnyNull
| }
| if (!$matches.isEmpty()) {
| ${genComparison(ctx, streamedKeyVars, matchedKeyVars)}
| if (comp == 0) {
| return true;
| }
| $matches.clear();
| }
|
| do {
| if ($bufferedRow == null) {
| if (!bufferedIter.hasNext()) {
| ${matchedKeyVars.map(_.code).mkString("\\n")}
| return !$matches.isEmpty();
| }
| $bufferedRow = (InternalRow) bufferedIter.next();
| ${bufferedKeyTmpVars.map(_.code).mkString("\\n")}
| if ($bufferedAnyNull) {
| $bufferedRow = null;
| continue;
| }
| ${bufferedKeyVars.map(_.code).mkString("\\n")}
| }
| ${genComparison(ctx, streamedKeyVars, bufferedKeyVars)}
| if (comp > 0) {
| $bufferedRow = null;
| } else if (comp < 0) {
| if (!$matches.isEmpty()) {
| ${matchedKeyVars.map(_.code).mkString("\\n")}
| return true;
| } else {
| $handleStreamedWithoutMatch
| }
| } else {
| $addRowToBuffer
| $bufferedRow = null;
| }
| } while ($streamedRow != null);
| }
| return false; // unreachable
|}
""".stripMargin, inlineToOuterClass = true)
(findNextJoinRowsFuncName, streamedRow, matches)
}
/**
* Creates variables and declarations for streamed part of result row.
*
* In order to defer the access after condition and also only access once in the loop,
* the variables should be declared separately from accessing the columns, we can't use the
* codegen of BoundReference here.
*/
private def createStreamedVars(
ctx: CodegenContext,
streamedRow: String): (Seq[ExprCode], Seq[String]) = {
ctx.INPUT_ROW = streamedRow
streamedPlan.output.zipWithIndex.map { case (a, i) =>
val value = ctx.freshName("value")
val valueCode = CodeGenerator.getValue(streamedRow, a.dataType, i.toString)
val javaType = CodeGenerator.javaType(a.dataType)
val defaultValue = CodeGenerator.defaultValue(a.dataType)
if (a.nullable) {
val isNull = ctx.freshName("isNull")
val code =
code"""
|$isNull = $streamedRow.isNullAt($i);
|$value = $isNull ? $defaultValue : ($valueCode);
""".stripMargin
val streamedVarsDecl =
s"""
|boolean $isNull = false;
|$javaType $value = $defaultValue;
""".stripMargin
(ExprCode(code, JavaCode.isNullVariable(isNull), JavaCode.variable(value, a.dataType)),
streamedVarsDecl)
} else {
val code = code"$value = $valueCode;"
val streamedVarsDecl = s"""$javaType $value = $defaultValue;"""
(ExprCode(code, FalseLiteral, JavaCode.variable(value, a.dataType)), streamedVarsDecl)
}
}.unzip
}
/**
* Splits variables based on whether it's used by condition or not, returns the code to create
* these variables before the condition and after the condition.
*
* Only a few columns are used by condition, then we can skip the accessing of those columns
* that are not used by condition also filtered out by condition.
*/
private def splitVarsByCondition(
attributes: Seq[Attribute],
variables: Seq[ExprCode]): (String, String) = {
if (condition.isDefined) {
val condRefs = condition.get.references
val (used, notUsed) = attributes.zip(variables).partition{ case (a, ev) =>
condRefs.contains(a)
}
val beforeCond = evaluateVariables(used.map(_._2))
val afterCond = evaluateVariables(notUsed.map(_._2))
(beforeCond, afterCond)
} else {
(evaluateVariables(variables), "")
}
}
override def needCopyResult: Boolean = true
override def doProduce(ctx: CodegenContext): String = {
// Specialize `doProduce` code for full outer join, because full outer join needs to
// buffer both sides of join.
if (joinType == FullOuter) {
return codegenFullOuter(ctx)
}
// Inline mutable state since not many join operations in a task
val streamedInput = ctx.addMutableState("scala.collection.Iterator", "streamedInput",
v => s"$v = inputs[0];", forceInline = true)
val bufferedInput = ctx.addMutableState("scala.collection.Iterator", "bufferedInput",
v => s"$v = inputs[1];", forceInline = true)
val (findNextJoinRowsFuncName, streamedRow, matches) = genScanner(ctx)
// Create variables for row from both sides.
val (streamedVars, streamedVarDecl) = createStreamedVars(ctx, streamedRow)
val bufferedRow = ctx.freshName("bufferedRow")
val setDefaultValue = joinType == LeftOuter || joinType == RightOuter
val bufferedVars = genOneSideJoinVars(ctx, bufferedRow, bufferedPlan, setDefaultValue)
// Create variable name for Existence join.
val existsVar = joinType match {
case ExistenceJoin(_) => Some(ctx.freshName("exists"))
case _ => None
}
val iterator = ctx.freshName("iterator")
val numOutput = metricTerm(ctx, "numOutputRows")
val resultVars = joinType match {
case _: InnerLike | LeftOuter =>
streamedVars ++ bufferedVars
case RightOuter =>
bufferedVars ++ streamedVars
case LeftSemi | LeftAnti =>
streamedVars
case ExistenceJoin(_) =>
streamedVars ++ Seq(ExprCode.forNonNullValue(
JavaCode.variable(existsVar.get, BooleanType)))
case x =>
throw new IllegalArgumentException(
s"SortMergeJoin.doProduce should not take $x as the JoinType")
}
val (streamedBeforeLoop, condCheck, loadStreamed) = if (condition.isDefined) {
// Split the code of creating variables based on whether it's used by condition or not.
val loaded = ctx.freshName("loaded")
val (streamedBefore, streamedAfter) = splitVarsByCondition(streamedOutput, streamedVars)
val (bufferedBefore, bufferedAfter) = splitVarsByCondition(bufferedOutput, bufferedVars)
// Generate code for condition
ctx.currentVars = streamedVars ++ bufferedVars
val cond = BindReferences.bindReference(
condition.get, streamedPlan.output ++ bufferedPlan.output).genCode(ctx)
// Evaluate the columns those used by condition before loop
val before = joinType match {
case LeftAnti =>
// No need to initialize `loaded` variable for Left Anti join.
streamedBefore.trim
case _ =>
s"""
|boolean $loaded = false;
|$streamedBefore
""".stripMargin
}
val loadStreamedAfterCondition = joinType match {
case LeftAnti =>
// No need to evaluate columns not used by condition from streamed side, as for Left Anti
// join, streamed row with match is not outputted.
""
case _ =>
s"""
|if (!$loaded) {
| $loaded = true;
| $streamedAfter
|}
""".stripMargin
}
val loadBufferedAfterCondition = joinType match {
case LeftExistence(_) =>
// No need to evaluate columns not used by condition from buffered side
""
case _ => bufferedAfter
}
val checking =
s"""
|$bufferedBefore
|if ($bufferedRow != null) {
| ${cond.code}
| if (${cond.isNull} || !${cond.value}) {
| continue;
| }
|}
|$loadStreamedAfterCondition
|$loadBufferedAfterCondition
""".stripMargin
(before, checking.trim, streamedAfter.trim)
} else {
(evaluateVariables(streamedVars), "", "")
}
val beforeLoop =
s"""
|${streamedVarDecl.mkString("\\n")}
|${streamedBeforeLoop.trim}
|scala.collection.Iterator<UnsafeRow> $iterator = $matches.generateIterator();
""".stripMargin
val outputRow =
s"""
|$numOutput.add(1);
|${consume(ctx, resultVars)}
""".stripMargin
val findNextJoinRows = s"$findNextJoinRowsFuncName($streamedInput, $bufferedInput)"
val thisPlan = ctx.addReferenceObj("plan", this)
val eagerCleanup = s"$thisPlan.cleanupResources();"
joinType match {
case _: InnerLike =>
codegenInner(findNextJoinRows, beforeLoop, iterator, bufferedRow, condCheck, outputRow,
eagerCleanup)
case LeftOuter | RightOuter =>
codegenOuter(streamedInput, findNextJoinRows, beforeLoop, iterator, bufferedRow, condCheck,
ctx.freshName("hasOutputRow"), outputRow, eagerCleanup)
case LeftSemi =>
codegenSemi(findNextJoinRows, beforeLoop, iterator, bufferedRow, condCheck,
ctx.freshName("hasOutputRow"), outputRow, eagerCleanup)
case LeftAnti =>
codegenAnti(streamedInput, findNextJoinRows, beforeLoop, iterator, bufferedRow, condCheck,
loadStreamed, ctx.freshName("hasMatchedRow"), outputRow, eagerCleanup)
case ExistenceJoin(_) =>
codegenExistence(streamedInput, findNextJoinRows, beforeLoop, iterator, bufferedRow,
condCheck, loadStreamed, existsVar.get, outputRow, eagerCleanup)
case x =>
throw new IllegalArgumentException(
s"SortMergeJoin.doProduce should not take $x as the JoinType")
}
}
/**
* Generates the code for Inner join.
*/
private def codegenInner(
findNextJoinRows: String,
beforeLoop: String,
matchIterator: String,
bufferedRow: String,
conditionCheck: String,
outputRow: String,
eagerCleanup: String): String = {
s"""
|while ($findNextJoinRows) {
| $beforeLoop
| while ($matchIterator.hasNext()) {
| InternalRow $bufferedRow = (InternalRow) $matchIterator.next();
| $conditionCheck
| $outputRow
| }
| if (shouldStop()) return;
|}
|$eagerCleanup
""".stripMargin
}
/**
* Generates the code for Left or Right Outer join.
*/
private def codegenOuter(
streamedInput: String,
findNextJoinRows: String,
beforeLoop: String,
matchIterator: String,
bufferedRow: String,
conditionCheck: String,
hasOutputRow: String,
outputRow: String,
eagerCleanup: String): String = {
s"""
|while ($streamedInput.hasNext()) {
| $findNextJoinRows;
| $beforeLoop
| boolean $hasOutputRow = false;
|
| // the last iteration of this loop is to emit an empty row if there is no matched rows.
| while ($matchIterator.hasNext() || !$hasOutputRow) {
| InternalRow $bufferedRow = $matchIterator.hasNext() ?
| (InternalRow) $matchIterator.next() : null;
| $conditionCheck
| $hasOutputRow = true;
| $outputRow
| }
| if (shouldStop()) return;
|}
|$eagerCleanup
""".stripMargin
}
/**
* Generates the code for Left Semi join.
*/
private def codegenSemi(
findNextJoinRows: String,
beforeLoop: String,
matchIterator: String,
bufferedRow: String,
conditionCheck: String,
hasOutputRow: String,
outputRow: String,
eagerCleanup: String): String = {
s"""
|while ($findNextJoinRows) {
| $beforeLoop
| boolean $hasOutputRow = false;
|
| while (!$hasOutputRow && $matchIterator.hasNext()) {
| InternalRow $bufferedRow = (InternalRow) $matchIterator.next();
| $conditionCheck
| $hasOutputRow = true;
| $outputRow
| }
| if (shouldStop()) return;
|}
|$eagerCleanup
""".stripMargin
}
/**
* Generates the code for Left Anti join.
*/
private def codegenAnti(
streamedInput: String,
findNextJoinRows: String,
beforeLoop: String,
matchIterator: String,
bufferedRow: String,
conditionCheck: String,
loadStreamed: String,
hasMatchedRow: String,
outputRow: String,
eagerCleanup: String): String = {
s"""
|while ($streamedInput.hasNext()) {
| $findNextJoinRows;
| $beforeLoop
| boolean $hasMatchedRow = false;
|
| while (!$hasMatchedRow && $matchIterator.hasNext()) {
| InternalRow $bufferedRow = (InternalRow) $matchIterator.next();
| $conditionCheck
| $hasMatchedRow = true;
| }
|
| if (!$hasMatchedRow) {
| // load all values of streamed row, because the values not in join condition are not
| // loaded yet.
| $loadStreamed
| $outputRow
| }
| if (shouldStop()) return;
|}
|$eagerCleanup
""".stripMargin
}
/**
* Generates the code for Existence join.
*/
private def codegenExistence(
streamedInput: String,
findNextJoinRows: String,
beforeLoop: String,
matchIterator: String,
bufferedRow: String,
conditionCheck: String,
loadStreamed: String,
exists: String,
outputRow: String,
eagerCleanup: String): String = {
s"""
|while ($streamedInput.hasNext()) {
| $findNextJoinRows;
| $beforeLoop
| boolean $exists = false;
|
| while (!$exists && $matchIterator.hasNext()) {
| InternalRow $bufferedRow = (InternalRow) $matchIterator.next();
| $conditionCheck
| $exists = true;
| }
|
| if (!$exists) {
| // load all values of streamed row, because the values not in join condition are not
| // loaded yet.
| $loadStreamed
| }
| $outputRow
|
| if (shouldStop()) return;
|}
|$eagerCleanup
""".stripMargin
}
/**
* Generates the code for Full Outer join.
*/
private def codegenFullOuter(ctx: CodegenContext): String = {
// Inline mutable state since not many join operations in a task.
// Create class member for input iterator from both sides.
val leftInput = ctx.addMutableState("scala.collection.Iterator", "leftInput",
v => s"$v = inputs[0];", forceInline = true)
val rightInput = ctx.addMutableState("scala.collection.Iterator", "rightInput",
v => s"$v = inputs[1];", forceInline = true)
// Create class member for next input row from both sides.
val leftInputRow = ctx.addMutableState("InternalRow", "leftInputRow", forceInline = true)
val rightInputRow = ctx.addMutableState("InternalRow", "rightInputRow", forceInline = true)
// Create variables for join keys from both sides.
val leftKeyVars = createJoinKey(ctx, leftInputRow, leftKeys, left.output)
val leftAnyNull = leftKeyVars.map(_.isNull).mkString(" || ")
val rightKeyVars = createJoinKey(ctx, rightInputRow, rightKeys, right.output)
val rightAnyNull = rightKeyVars.map(_.isNull).mkString(" || ")
val matchedKeyVars = copyKeys(ctx, leftKeyVars)
val leftMatchedKeyVars = createJoinKey(ctx, leftInputRow, leftKeys, left.output)
val rightMatchedKeyVars = createJoinKey(ctx, rightInputRow, rightKeys, right.output)
// Create class member for next output row from both sides.
val leftOutputRow = ctx.addMutableState("InternalRow", "leftOutputRow", forceInline = true)
val rightOutputRow = ctx.addMutableState("InternalRow", "rightOutputRow", forceInline = true)
// Create class member for buffers of rows with same join keys from both sides.
val bufferClsName = "java.util.ArrayList<InternalRow>"
val leftBuffer = ctx.addMutableState(bufferClsName, "leftBuffer",
v => s"$v = new $bufferClsName();", forceInline = true)
val rightBuffer = ctx.addMutableState(bufferClsName, "rightBuffer",
v => s"$v = new $bufferClsName();", forceInline = true)
val matchedClsName = classOf[BitSet].getName
val leftMatched = ctx.addMutableState(matchedClsName, "leftMatched",
v => s"$v = new $matchedClsName(1);", forceInline = true)
val rightMatched = ctx.addMutableState(matchedClsName, "rightMatched",
v => s"$v = new $matchedClsName(1);", forceInline = true)
val leftIndex = ctx.freshName("leftIndex")
val rightIndex = ctx.freshName("rightIndex")
// Generate code for join condition
val leftResultVars = genOneSideJoinVars(
ctx, leftOutputRow, left, setDefaultValue = true)
val rightResultVars = genOneSideJoinVars(
ctx, rightOutputRow, right, setDefaultValue = true)
val resultVars = leftResultVars ++ rightResultVars
val (_, conditionCheck, _) =
getJoinCondition(ctx, leftResultVars, left, right, Some(rightOutputRow))
// Generate code for result output in separate function, as we need to output result from
// multiple places in join code.
val consumeFullOuterJoinRow = ctx.freshName("consumeFullOuterJoinRow")
ctx.addNewFunction(consumeFullOuterJoinRow,
s"""
|private void $consumeFullOuterJoinRow() throws java.io.IOException {
| ${metricTerm(ctx, "numOutputRows")}.add(1);
| ${consume(ctx, resultVars)}
|}
""".stripMargin)
// Handle the case when input row has no match.
val outputLeftNoMatch =
s"""
|$leftOutputRow = $leftInputRow;
|$rightOutputRow = null;
|$leftInputRow = null;
|$consumeFullOuterJoinRow();
""".stripMargin
val outputRightNoMatch =
s"""
|$rightOutputRow = $rightInputRow;
|$leftOutputRow = null;
|$rightInputRow = null;
|$consumeFullOuterJoinRow();
""".stripMargin
// Generate a function to scan both sides to find rows with matched join keys.
// The matched rows from both sides are copied in buffers separately. This function assumes
// either non-empty `leftIter` and `rightIter`, or non-null `leftInputRow` and `rightInputRow`.
//
// The function has the following steps:
// - Step 1: Find the next `leftInputRow` and `rightInputRow` with non-null join keys.
// Output row with null join keys (`outputLeftNoMatch` and `outputRightNoMatch`).
//
// - Step 2: Compare and find next same join keys from between `leftInputRow` and
// `rightInputRow`.
// Output row with smaller join keys (`outputLeftNoMatch` and `outputRightNoMatch`).
//
// - Step 3: Buffer rows with same join keys from both sides into `leftBuffer` and
// `rightBuffer`. Reset bit sets for both buffers accordingly (`leftMatched` and
// `rightMatched`).
val findNextJoinRowsFuncName = ctx.freshName("findNextJoinRows")
ctx.addNewFunction(findNextJoinRowsFuncName,
s"""
|private void $findNextJoinRowsFuncName(
| scala.collection.Iterator leftIter,
| scala.collection.Iterator rightIter) throws java.io.IOException {
| int comp = 0;
| $leftBuffer.clear();
| $rightBuffer.clear();
|
| if ($leftInputRow == null) {
| $leftInputRow = (InternalRow) leftIter.next();
| }
| if ($rightInputRow == null) {
| $rightInputRow = (InternalRow) rightIter.next();
| }
|
| ${leftKeyVars.map(_.code).mkString("\\n")}
| if ($leftAnyNull) {
| // The left row join key is null, join it with null row
| $outputLeftNoMatch
| return;
| }
|
| ${rightKeyVars.map(_.code).mkString("\\n")}
| if ($rightAnyNull) {
| // The right row join key is null, join it with null row
| $outputRightNoMatch
| return;
| }
|
| ${genComparison(ctx, leftKeyVars, rightKeyVars)}
| if (comp < 0) {
| // The left row join key is smaller, join it with null row
| $outputLeftNoMatch
| return;
| } else if (comp > 0) {
| // The right row join key is smaller, join it with null row
| $outputRightNoMatch
| return;
| }
|
| ${matchedKeyVars.map(_.code).mkString("\\n")}
| $leftBuffer.add($leftInputRow.copy());
| $rightBuffer.add($rightInputRow.copy());
| $leftInputRow = null;
| $rightInputRow = null;
|
| // Buffer rows from both sides with same join key
| while (leftIter.hasNext()) {
| $leftInputRow = (InternalRow) leftIter.next();
| ${leftMatchedKeyVars.map(_.code).mkString("\\n")}
| ${genComparison(ctx, leftMatchedKeyVars, matchedKeyVars)}
| if (comp == 0) {
|
| $leftBuffer.add($leftInputRow.copy());
| $leftInputRow = null;
| } else {
| break;
| }
| }
| while (rightIter.hasNext()) {
| $rightInputRow = (InternalRow) rightIter.next();
| ${rightMatchedKeyVars.map(_.code).mkString("\\n")}
| ${genComparison(ctx, rightMatchedKeyVars, matchedKeyVars)}
| if (comp == 0) {
| $rightBuffer.add($rightInputRow.copy());
| $rightInputRow = null;
| } else {
| break;
| }
| }
|
| // Reset bit sets of buffers accordingly
| if ($leftBuffer.size() <= $leftMatched.capacity()) {
| $leftMatched.clearUntil($leftBuffer.size());
| } else {
| $leftMatched = new $matchedClsName($leftBuffer.size());
| }
| if ($rightBuffer.size() <= $rightMatched.capacity()) {
| $rightMatched.clearUntil($rightBuffer.size());
| } else {
| $rightMatched = new $matchedClsName($rightBuffer.size());
| }
|}
""".stripMargin)
// Scan the left and right buffers to find all matched rows.
val matchRowsInBuffer =
s"""
|int $leftIndex;
|int $rightIndex;
|
|for ($leftIndex = 0; $leftIndex < $leftBuffer.size(); $leftIndex++) {
| $leftOutputRow = (InternalRow) $leftBuffer.get($leftIndex);
| for ($rightIndex = 0; $rightIndex < $rightBuffer.size(); $rightIndex++) {
| $rightOutputRow = (InternalRow) $rightBuffer.get($rightIndex);
| $conditionCheck {
| $consumeFullOuterJoinRow();
| $leftMatched.set($leftIndex);
| $rightMatched.set($rightIndex);
| }
| }
|
| if (!$leftMatched.get($leftIndex)) {
|
| $rightOutputRow = null;
| $consumeFullOuterJoinRow();
| }
|}
|
|$leftOutputRow = null;
|for ($rightIndex = 0; $rightIndex < $rightBuffer.size(); $rightIndex++) {
| if (!$rightMatched.get($rightIndex)) {
| // The right row has never matched any left row, join it with null row
| $rightOutputRow = (InternalRow) $rightBuffer.get($rightIndex);
| $consumeFullOuterJoinRow();
| }
|}
""".stripMargin
s"""
|while (($leftInputRow != null || $leftInput.hasNext()) &&
| ($rightInputRow != null || $rightInput.hasNext())) {
| $findNextJoinRowsFuncName($leftInput, $rightInput);
| $matchRowsInBuffer
| if (shouldStop()) return;
|}
|
|// The right iterator has no more rows, join left row with null
|while ($leftInputRow != null || $leftInput.hasNext()) {
| if ($leftInputRow == null) {
| $leftInputRow = (InternalRow) $leftInput.next();
| }
| $outputLeftNoMatch
| if (shouldStop()) return;
|}
|
|// The left iterator has no more rows, join right row with null
|while ($rightInputRow != null || $rightInput.hasNext()) {
| if ($rightInputRow == null) {
| $rightInputRow = (InternalRow) $rightInput.next();
| }
| $outputRightNoMatch
| if (shouldStop()) return;
|}
""".stripMargin
}
override protected def withNewChildrenInternal(
newLeft: SparkPlan, newRight: SparkPlan): SortMergeJoinExec =
copy(left = newLeft, right = newRight)
}
/**
* Helper class that is used to implement [[SortMergeJoinExec]].
*
* To perform an inner (outer) join, users of this class call [[findNextInnerJoinRows()]]
* ([[findNextOuterJoinRows()]]), which returns `true` if a result has been produced and `false`
* otherwise. If a result has been produced, then the caller may call [[getStreamedRow]] to return
* the matching row from the streamed input and may call [[getBufferedMatches]] to return the
* sequence of matching rows from the buffered input (in the case of an outer join, this will return
* an empty sequence if there are no matches from the buffered input). For efficiency, both of these
* methods return mutable objects which are re-used across calls to the `findNext*JoinRows()`
* methods.
*
* @param streamedKeyGenerator a projection that produces join keys from the streamed input.
* @param bufferedKeyGenerator a projection that produces join keys from the buffered input.
* @param keyOrdering an ordering which can be used to compare join keys.
* @param streamedIter an input whose rows will be streamed.
* @param bufferedIter an input whose rows will be buffered to construct sequences of rows that
* have the same join key.
* @param inMemoryThreshold Threshold for number of rows guaranteed to be held in memory by
* internal buffer
* @param spillThreshold Threshold for number of rows to be spilled by internal buffer
* @param eagerCleanupResources the eager cleanup function to be invoked when no join row found
* @param onlyBufferFirstMatch [[bufferMatchingRows]] should buffer only the first matching row
*/
private[joins] class SortMergeJoinScanner(
streamedKeyGenerator: Projection,
bufferedKeyGenerator: Projection,
keyOrdering: Ordering[InternalRow],
streamedIter: RowIterator,
bufferedIter: RowIterator,
inMemoryThreshold: Int,
spillThreshold: Int,
eagerCleanupResources: () => Unit,
onlyBufferFirstMatch: Boolean = false) {
private[this] var streamedRow: InternalRow = _
private[this] var streamedRowKey: InternalRow = _
private[this] var bufferedRow: InternalRow = _
// Note: this is guaranteed to never have any null columns:
private[this] var bufferedRowKey: InternalRow = _
/**
* The join key for the rows buffered in `bufferedMatches`, or null if `bufferedMatches` is empty
*/
private[this] var matchJoinKey: InternalRow = _
/** Buffered rows from the buffered side of the join. This is empty if there are no matches. */
private[this] val bufferedMatches: ExternalAppendOnlyUnsafeRowArray =
new ExternalAppendOnlyUnsafeRowArray(inMemoryThreshold, spillThreshold)
// Initialization (note: do _not_ want to advance streamed here).
advancedBufferedToRowWithNullFreeJoinKey()
// --- Public methods ---------------------------------------------------------------------------
def getStreamedRow: InternalRow = streamedRow
def getBufferedMatches: ExternalAppendOnlyUnsafeRowArray = bufferedMatches
/**
* Advances both input iterators, stopping when we have found rows with matching join keys. If no
* join rows found, try to do the eager resources cleanup.
* @return true if matching rows have been found and false otherwise. If this returns true, then
* [[getStreamedRow]] and [[getBufferedMatches]] can be called to construct the join
* results.
*/
final def findNextInnerJoinRows(): Boolean = {
while (advancedStreamed() && streamedRowKey.anyNull) {
// Advance the streamed side of the join until we find the next row whose join key contains
// no nulls or we hit the end of the streamed iterator.
}
val found = if (streamedRow == null) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// The new streamed row has the same join key as the previous row, so return the same matches.
true
} else if (bufferedRow == null) {
// The streamed row's join key does not match the current batch of buffered rows and there are
// no more rows to read from the buffered iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// Advance both the streamed and buffered iterators to find the next pair of matching rows.
var comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
do {
if (streamedRowKey.anyNull) {
advancedStreamed()
} else {
assert(!bufferedRowKey.anyNull)
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
if (comp > 0) advancedBufferedToRowWithNullFreeJoinKey()
else if (comp < 0) advancedStreamed()
}
} while (streamedRow != null && bufferedRow != null && comp != 0)
if (streamedRow == null || bufferedRow == null) {
// We have either hit the end of one of the iterators, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// The streamed row's join key matches the current buffered row's join, so walk through the
// buffered iterator to buffer the rest of the matching rows.
assert(comp == 0)
bufferMatchingRows()
true
}
}
if (!found) eagerCleanupResources()
found
}
/**
* Advances the streamed input iterator and buffers all rows from the buffered input that
* have matching keys. If no join rows found, try to do the eager resources cleanup.
* @return true if the streamed iterator returned a row, false otherwise. If this returns true,
* then [[getStreamedRow]] and [[getBufferedMatches]] can be called to produce the outer
* join results.
*/
final def findNextOuterJoinRows(): Boolean = {
val found = if (!advancedStreamed()) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// Matches the current group, so do nothing.
} else {
// The streamed row does not match the current group.
matchJoinKey = null
bufferedMatches.clear()
if (bufferedRow != null && !streamedRowKey.anyNull) {
// The buffered iterator could still contain matching rows, so we'll need to walk through
// it until we either find matches or pass where they would be found.
var comp = 1
do {
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
} while (comp > 0 && advancedBufferedToRowWithNullFreeJoinKey())
if (comp == 0) {
// We have found matches, so buffer them (this updates matchJoinKey)
bufferMatchingRows()
} else {
// We have overshot the position where the row would be found, hence no matches.
}
}
}
// If there is a streamed input then we always return true
true
}
if (!found) eagerCleanupResources()
found
}
// --- Private methods --------------------------------------------------------------------------
/**
* Advance the streamed iterator and compute the new row's join key.
* @return true if the streamed iterator returned a row and false otherwise.
*/
private def advancedStreamed(): Boolean = {
if (streamedIter.advanceNext()) {
streamedRow = streamedIter.getRow
streamedRowKey = streamedKeyGenerator(streamedRow)
true
} else {
streamedRow = null
streamedRowKey = null
false
}
}
/**
* Advance the buffered iterator until we find a row with join key that does not contain nulls.
* @return true if the buffered iterator returned a row and false otherwise.
*/
private def advancedBufferedToRowWithNullFreeJoinKey(): Boolean = {
var foundRow: Boolean = false
while (!foundRow && bufferedIter.advanceNext()) {
bufferedRow = bufferedIter.getRow
bufferedRowKey = bufferedKeyGenerator(bufferedRow)
foundRow = !bufferedRowKey.anyNull
}
if (!foundRow) {
bufferedRow = null
bufferedRowKey = null
false
} else {
true
}
}
/**
* Called when the streamed and buffered join keys match in order to buffer the matching rows.
*/
private def bufferMatchingRows(): Unit = {
assert(streamedRowKey != null)
assert(!streamedRowKey.anyNull)
assert(bufferedRowKey != null)
assert(!bufferedRowKey.anyNull)
assert(keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
// This join key may have been produced by a mutable projection, so we need to make a copy:
matchJoinKey = streamedRowKey.copy()
bufferedMatches.clear()
do {
if (!onlyBufferFirstMatch || bufferedMatches.isEmpty) {
bufferedMatches.add(bufferedRow.asInstanceOf[UnsafeRow])
}
advancedBufferedToRowWithNullFreeJoinKey()
} while (bufferedRow != null && keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
}
}
/**
* An iterator for outputting rows in left outer join.
*/
private class LeftOuterIterator(
smjScanner: SortMergeJoinScanner,
rightNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric)
extends OneSideOuterIterator(
smjScanner, rightNullRow, boundCondition, resultProj, numOutputRows) {
protected override def setStreamSideOutput(row: InternalRow): Unit = joinedRow.withLeft(row)
protected override def setBufferedSideOutput(row: InternalRow): Unit = joinedRow.withRight(row)
}
/**
* An iterator for outputting rows in right outer join.
*/
private class RightOuterIterator(
smjScanner: SortMergeJoinScanner,
leftNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric)
extends OneSideOuterIterator(smjScanner, leftNullRow, boundCondition, resultProj, numOutputRows) {
protected override def setStreamSideOutput(row: InternalRow): Unit = joinedRow.withRight(row)
protected override def setBufferedSideOutput(row: InternalRow): Unit = joinedRow.withLeft(row)
}
/**
* An abstract iterator for sharing code between [[LeftOuterIterator]] and [[RightOuterIterator]].
*
* Each [[OneSideOuterIterator]] has a streamed side and a buffered side. Each row on the
* streamed side will output 0 or many rows, one for each matching row on the buffered side.
* If there are no matches, then the buffered side of the joined output will be a null row.
*
* In left outer join, the left is the streamed side and the right is the buffered side.
* In right outer join, the right is the streamed side and the left is the buffered side.
*
* @param smjScanner a scanner that streams rows and buffers any matching rows
* @param bufferedSideNullRow the default row to return when a streamed row has no matches
* @param boundCondition an additional filter condition for buffered rows
* @param resultProj how the output should be projected
* @param numOutputRows an accumulator metric for the number of rows output
*/
private abstract class OneSideOuterIterator(
smjScanner: SortMergeJoinScanner,
bufferedSideNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric) extends RowIterator {
// A row to store the joined result, reused many times
protected[this] val joinedRow: JoinedRow = new JoinedRow()
// Index of the buffered rows, reset to 0 whenever we advance to a new streamed row
private[this] var rightMatchesIterator: Iterator[UnsafeRow] = null
// This iterator is initialized lazily so there should be no matches initially
assert(smjScanner.getBufferedMatches.length == 0)
// Set output methods to be overridden by subclasses
protected def setStreamSideOutput(row: InternalRow): Unit
protected def setBufferedSideOutput(row: InternalRow): Unit
/**
* Advance to the next row on the stream side and populate the buffer with matches.
* @return whether there are more rows in the stream to consume.
*/
private def advanceStream(): Boolean = {
rightMatchesIterator = null
if (smjScanner.findNextOuterJoinRows()) {
setStreamSideOutput(smjScanner.getStreamedRow)
if (smjScanner.getBufferedMatches.isEmpty) {
// There are no matching rows in the buffer, so return the null row
setBufferedSideOutput(bufferedSideNullRow)
} else {
// Find the next row in the buffer that satisfied the bound condition
if (!advanceBufferUntilBoundConditionSatisfied()) {
setBufferedSideOutput(bufferedSideNullRow)
}
}
true
} else {
// Stream has been exhausted
false
}
}
/**
* Advance to the next row in the buffer that satisfies the bound condition.
* @return whether there is such a row in the current buffer.
*/
private def advanceBufferUntilBoundConditionSatisfied(): Boolean = {
var foundMatch: Boolean = false
if (rightMatchesIterator == null) {
rightMatchesIterator = smjScanner.getBufferedMatches.generateIterator()
}
while (!foundMatch && rightMatchesIterator.hasNext) {
setBufferedSideOutput(rightMatchesIterator.next())
foundMatch = boundCondition(joinedRow)
}
foundMatch
}
override def advanceNext(): Boolean = {
val r = advanceBufferUntilBoundConditionSatisfied() || advanceStream()
if (r) numOutputRows += 1
r
}
override def getRow: InternalRow = resultProj(joinedRow)
}
private class SortMergeFullOuterJoinScanner(
leftKeyGenerator: Projection,
rightKeyGenerator: Projection,
keyOrdering: Ordering[InternalRow],
leftIter: RowIterator,
rightIter: RowIterator,
boundCondition: InternalRow => Boolean,
leftNullRow: InternalRow,
rightNullRow: InternalRow) {
private[this] val joinedRow: JoinedRow = new JoinedRow()
private[this] var leftRow: InternalRow = _
private[this] var leftRowKey: InternalRow = _
private[this] var rightRow: InternalRow = _
private[this] var rightRowKey: InternalRow = _
private[this] var leftIndex: Int = 0
private[this] var rightIndex: Int = 0
private[this] val leftMatches: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow]
private[this] val rightMatches: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow]
private[this] var leftMatched: BitSet = new BitSet(1)
private[this] var rightMatched: BitSet = new BitSet(1)
advancedLeft()
advancedRight()
// --- Private methods --------------------------------------------------------------------------
/**
* Advance the left iterator and compute the new row's join key.
* @return true if the left iterator returned a row and false otherwise.
*/
private def advancedLeft(): Boolean = {
if (leftIter.advanceNext()) {
leftRow = leftIter.getRow
leftRowKey = leftKeyGenerator(leftRow)
true
} else {
leftRow = null
leftRowKey = null
false
}
}
/**
* Advance the right iterator and compute the new row's join key.
* @return true if the right iterator returned a row and false otherwise.
*/
private def advancedRight(): Boolean = {
if (rightIter.advanceNext()) {
rightRow = rightIter.getRow
rightRowKey = rightKeyGenerator(rightRow)
true
} else {
rightRow = null
rightRowKey = null
false
}
}
/**
* Populate the left and right buffers with rows matching the provided key.
* This consumes rows from both iterators until their keys are different from the matching key.
*/
private def findMatchingRows(matchingKey: InternalRow): Unit = {
leftMatches.clear()
rightMatches.clear()
leftIndex = 0
rightIndex = 0
while (leftRowKey != null && keyOrdering.compare(leftRowKey, matchingKey) == 0) {
leftMatches += leftRow.copy()
advancedLeft()
}
while (rightRowKey != null && keyOrdering.compare(rightRowKey, matchingKey) == 0) {
rightMatches += rightRow.copy()
advancedRight()
}
if (leftMatches.size <= leftMatched.capacity) {
leftMatched.clearUntil(leftMatches.size)
} else {
leftMatched = new BitSet(leftMatches.size)
}
if (rightMatches.size <= rightMatched.capacity) {
rightMatched.clearUntil(rightMatches.size)
} else {
rightMatched = new BitSet(rightMatches.size)
}
}
/**
* Scan the left and right buffers for the next valid match.
*
* Note: this method mutates `joinedRow` to point to the latest matching rows in the buffers.
* If a left row has no valid matches on the right, or a right row has no valid matches on the
* left, then the row is joined with the null row and the result is considered a valid match.
*
* @return true if a valid match is found, false otherwise.
*/
private def scanNextInBuffered(): Boolean = {
while (leftIndex < leftMatches.size) {
while (rightIndex < rightMatches.size) {
joinedRow(leftMatches(leftIndex), rightMatches(rightIndex))
if (boundCondition(joinedRow)) {
leftMatched.set(leftIndex)
rightMatched.set(rightIndex)
rightIndex += 1
return true
}
rightIndex += 1
}
rightIndex = 0
if (!leftMatched.get(leftIndex)) {
// the left row has never matched any right row, join it with null row
joinedRow(leftMatches(leftIndex), rightNullRow)
leftIndex += 1
return true
}
leftIndex += 1
}
while (rightIndex < rightMatches.size) {
if (!rightMatched.get(rightIndex)) {
// the right row has never matched any left row, join it with null row
joinedRow(leftNullRow, rightMatches(rightIndex))
rightIndex += 1
return true
}
rightIndex += 1
}
// There are no more valid matches in the left and right buffers
false
}
// --- Public methods --------------------------------------------------------------------------
def getJoinedRow(): JoinedRow = joinedRow
def advanceNext(): Boolean = {
// If we already buffered some matching rows, use them directly
if (leftIndex < leftMatches.size || rightIndex < rightMatches.size) {
if (scanNextInBuffered()) {
return true
}
}
if (leftRow != null && (leftRowKey.anyNull || rightRow == null)) {
joinedRow(leftRow.copy(), rightNullRow)
advancedLeft()
true
} else if (rightRow != null && (rightRowKey.anyNull || leftRow == null)) {
joinedRow(leftNullRow, rightRow.copy())
advancedRight()
true
} else if (leftRow != null && rightRow != null) {
// Both rows are present and neither have null values.
val comp = keyOrdering.compare(leftRowKey, rightRowKey)
if (comp < 0) {
joinedRow(leftRow.copy(), rightNullRow)
advancedLeft()
} else if (comp > 0) {
joinedRow(leftNullRow, rightRow.copy())
advancedRight()
} else {
// Populate the buffers with rows matching the next key.
findMatchingRows(leftRowKey.copy())
scanNextInBuffered()
}
true
} else {
// Both iterators have been consumed
false
}
}
}
private class FullOuterIterator(
smjScanner: SortMergeFullOuterJoinScanner,
resultProj: InternalRow => InternalRow,
numRows: SQLMetric) extends RowIterator {
private[this] val joinedRow: JoinedRow = smjScanner.getJoinedRow()
override def advanceNext(): Boolean = {
val r = smjScanner.advanceNext()
if (r) numRows += 1
r
}
override def getRow: InternalRow = resultProj(joinedRow)
}
| nchammas/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala | Scala | apache-2.0 | 67,309 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.cloudml.zen.ml.util
import org.apache.hadoop.fs._
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.ScalaReflection
import org.apache.spark.sql.types.{DataType, StructField, StructType}
import org.json4s._
import org.json4s.jackson.JsonMethods._
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
// copy form Spark MLlib
/**
* Helper methods for loading models from files.
*/
private[ml] object LoaderUtils {
/** Returns URI for path/data using the Hadoop filesystem */
def dataPath(path: String): String = new Path(path, "data").toUri.toString
/** Returns URI for path/metadata using the Hadoop filesystem */
def metadataPath(path: String): String = new Path(path, "metadata").toUri.toString
/**
* Check the schema of loaded model data.
*
* This checks every field in the expected schema to make sure that a field with the same
* name and DataType appears in the loaded schema. Note that this does NOT check metadata
* or containsNull.
*
* @param loadedSchema Schema for model data loaded from file.
* @tparam Data Expected data type from which an expected schema can be derived.
*/
def checkSchema[Data: TypeTag](loadedSchema: StructType): Unit = {
// Check schema explicitly since erasure makes it hard to use match-case for checking.
val expectedFields: Array[StructField] =
ScalaReflection.schemaFor[Data].dataType.asInstanceOf[StructType].fields
val loadedFields: Map[String, DataType] =
loadedSchema.map(field => field.name -> field.dataType).toMap
expectedFields.foreach { field =>
assert(loadedFields.contains(field.name), s"Unable to parse model data." +
s" Expected field with name ${field.name} was missing in loaded schema:" +
s" ${loadedFields.mkString(", ")}")
}
}
/**
* Load metadata from the given path.
* @return (class name, version, metadata)
*/
def loadMetadata(sc: SparkContext, path: String): (String, String, JValue) = {
implicit val formats = DefaultFormats
val metadata = parse(sc.textFile(metadataPath(path)).first())
val clazz = (metadata \ "class").extract[String]
val version = (metadata \ "version").extract[String]
(clazz, version, metadata)
}
/**
* Save an RDD to one HDFS file
* @param sc SparkContext
* @param rdd The RDD to save
* @param outPathStr The HDFS file path of String
* @param header Header line of HDFS file, used for storing some metadata
* @param mapEle The function mapping each element of RDD to a line of String
*/
def RDD2HDFSFile[T](sc: SparkContext,
rdd: RDD[T],
outPathStr: String,
header: => String,
mapEle: T => String): Unit = {
val hdpconf = sc.hadoopConfiguration
val fs = FileSystem.get(hdpconf)
val outPath = new Path(outPathStr)
if (fs.exists(outPath)) {
throw new InvalidPathException("Output path %s already exists.".format(outPathStr))
}
val fout = fs.create(outPath)
fout.write(header.getBytes)
fout.write("\n".getBytes)
rdd.toLocalIterator.foreach(e => {
fout.write(mapEle(e).getBytes)
fout.write("\n".getBytes)
})
fout.close()
}
/**
* Load an RDD from one HDFS file
* @param sc SparkContext
* @param inPathStr The HDFS file path of String
* @param init_f The function used for initialization after reading header
* @param lineParser The function parses each line in HDFS file to an element of RDD
*/
def HDFSFile2RDD[T: ClassTag, M](sc: SparkContext,
inPathStr: String,
init_f: String => M,
lineParser: (M, String) => T): (M, RDD[T]) = {
val rawrdd = sc.textFile(inPathStr)
val header = rawrdd.first()
val meta = init_f(header)
val rdd: RDD[T] = rawrdd.mapPartitions(iter => {
val first = iter.next()
if (first == header) {
iter
} else {
Iterator.single(first) ++ iter
}
}.map(lineParser(meta, _)))
(meta, rdd)
}
}
| lenovor/zen | ml/src/main/scala/com/github/cloudml/zen/ml/util/modelSaveLoad.scala | Scala | apache-2.0 | 4,838 |
package org.jetbrains.plugins.scala
package lang
package surroundWith
package surrounders
package expression
/**
* @author AlexanderPodkhalyuzin
* Date: 28.04.2008
*/
import com.intellij.psi.PsiElement
import com.intellij.lang.ASTNode
import com.intellij.openapi.util.TextRange
import lang.psi.api.expr._
import psi.ScalaPsiUtil
import com.intellij.psi.PsiWhiteSpace;
class ScalaWithMatchSurrounder extends ScalaExpressionSurrounder {
override def isApplicable(elements: Array[PsiElement]): Boolean = {
if (elements.length > 1) return false
for (element <- elements)
if (!isApplicable(element)) return false
return true
}
override def isApplicable(element: PsiElement): Boolean = {
element match {
case _: ScBlockExpr => true
case _: ScBlock => false
case _: ScExpression | _: PsiWhiteSpace => true
case e => ScalaPsiUtil.isLineTerminator(e)
}
}
private def needBraces(expr: PsiElement): Boolean = {
expr match {
case _: ScDoStmt | _: ScIfStmt | _: ScTryStmt | _: ScForStatement
| _: ScWhileStmt | _: ScThrowStmt | _: ScReturnStmt => true
case _ => false
}
}
override def getTemplateAsString(elements: Array[PsiElement]): String = {
return (if (elements.length == 1 && !needBraces(elements(0))) super.getTemplateAsString(elements)
else "(" + super.getTemplateAsString(elements) + ")")+ " match {\\ncase a =>\\n}"
}
override def getTemplateDescription = "match"
override def getSurroundSelectionRange(withMatchNode: ASTNode): TextRange = {
val element: PsiElement = withMatchNode.getPsi match {
case x: ScParenthesisedExpr => x.expr match {
case Some(y) => y
case _ => return x.getTextRange
}
case x => x
}
val whileStmt = element.asInstanceOf[ScMatchStmt]
val patternNode: ASTNode = whileStmt.getNode.getLastChildNode.getTreePrev.getTreePrev.getFirstChildNode.getFirstChildNode.getTreeNext.getTreeNext
val offset = patternNode.getTextRange.getStartOffset
patternNode.getTreeParent.removeChild(patternNode)
return new TextRange(offset, offset);
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/surroundWith/surrounders/expression/ScalaWithMatchSurrounder.scala | Scala | apache-2.0 | 2,138 |
package com.github.shadowsocks.preferences
import android.content.Context
import android.content.res.TypedArray
import android.os.Bundle
import android.preference.DialogPreference
import android.util.AttributeSet
import android.view.{ViewGroup, WindowManager}
import android.widget.NumberPicker
import com.github.shadowsocks.R
/**
* @author Mygod
*/
final class NumberPickerPreference(context: Context, attrs: AttributeSet = null)
extends DialogPreference(context, attrs) with SummaryPreference {
private val picker = new NumberPicker(context)
private var value: Int = _
{
val a: TypedArray = context.obtainStyledAttributes(attrs, R.styleable.NumberPickerPreference)
setMin(a.getInt(R.styleable.NumberPickerPreference_min, 0))
setMax(a.getInt(R.styleable.NumberPickerPreference_max, Int.MaxValue - 1))
a.recycle
}
def getValue = value
def getMin = if (picker == null) 0 else picker.getMinValue
def getMax = picker.getMaxValue
def setValue(i: Int) {
if (i == getValue || !callChangeListener(i)) return
picker.setValue(i)
value = picker.getValue
persistInt(value)
notifyChanged
}
def setMin(value: Int) = picker.setMinValue(value)
def setMax(value: Int) = picker.setMaxValue(value)
override protected def showDialog(state: Bundle) {
super.showDialog(state)
getDialog.getWindow.setSoftInputMode(WindowManager.LayoutParams.SOFT_INPUT_STATE_ALWAYS_VISIBLE)
}
override protected def onCreateDialogView = {
val parent = picker.getParent.asInstanceOf[ViewGroup]
if (parent != null) parent.removeView(picker)
picker
}
override protected def onDialogClosed(positiveResult: Boolean) {
picker.clearFocus // commit changes
super.onDialogClosed(positiveResult) // forward compatibility
if (positiveResult) setValue(picker.getValue) else picker.setValue(value)
}
override protected def onGetDefaultValue(a: TypedArray, index: Int) = a.getInt(index, getMin).asInstanceOf[AnyRef]
override protected def onSetInitialValue(restorePersistedValue: Boolean, defaultValue: Any) {
val default = defaultValue.asInstanceOf[Int]
setValue(if (restorePersistedValue) getPersistedInt(default) else default)
}
protected def getSummaryValue: AnyRef = getValue.asInstanceOf[AnyRef]
}
| magic282/shadowsocks-android | src/main/scala/com/github/shadowsocks/preferences/NumberPickerPreference.scala | Scala | gpl-3.0 | 2,281 |
package example
import common._
object Lists {
/**
* This method computes the sum of all elements in the list xs. There are
* multiple techniques that can be used for implementing this method, and
* you will learn during the class.
*
* For this example assignment you can use the following methods in class
* `List`:
*
* - `xs.isEmpty: Boolean` returns `true` if the list `xs` is empty
* - `xs.head: Int` returns the head element of the list `xs`. If the list
* is empty an exception is thrown
* - `xs.tail: List[Int]` returns the tail of the list `xs`, i.e. the the
* list `xs` without its `head` element
*
* ''Hint:'' instead of writing a `for` or `while` loop, think of a recursive
* solution.
*
* @param xs A list of natural numbers
* @return The sum of all elements in `xs`
*/
def sum(xs: List[Int]): Int = {
if(xs.isEmpty) 0;
else xs.head + sum(xs.tail);
}
/**
* This method returns the largest element in a list of integers. If the
* list `xs` is empty it throws a `java.util.NoSuchElementException`.
*
* You can use the same methods of the class `List` as mentioned above.
*
* ''Hint:'' Again, think of a recursive solution instead of using looping
* constructs. You might need to define an auxiliary method.
*
* @param xs A list of natural numbers
* @return The largest element in `xs`
* @throws java.util.NoSuchElementException if `xs` is an empty list
*/
def max(xs: List[Int]): Int = {
if(xs.isEmpty) throw new NoSuchElementException()
if(xs.tail.isEmpty || xs.head > max(xs.tail)) xs.head
else max(xs.tail)
}
}
| marcospereira/progfun-003 | assignments/example/src/main/scala/example/Lists.scala | Scala | mit | 1,663 |
/*
* Copyright 2017 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.mongodb
package libra
import _root_.libra.Quantity
import arbitrary._
import laws.discipline._
import shapeless.HNil
class LibraCodecTests extends DisciplineSuite {
checkAll(
"BsonValueDecoder[Quantity[Double, HNil]]",
SerializableTests[BsonValueDecoder[Quantity[Double, HNil]]].serializable
)
checkAll(
"BsonValueEncoder[Quantity[Double, HNil]]",
SerializableTests[BsonValueEncoder[Quantity[Double, HNil]]].serializable
)
checkAll("BsonValueCodec[Quantity[Double, HNil]]", BsonValueCodecTests[Quantity[Double, HNil]].codec[String, Float])
checkAll(
"BsonValueDecoder[Quantity[Int, HNil]]",
SerializableTests[BsonValueDecoder[Quantity[Int, HNil]]].serializable
)
checkAll(
"BsonValueEncoder[Quantity[Int, HNil]]",
SerializableTests[BsonValueEncoder[Quantity[Int, HNil]]].serializable
)
checkAll("BsonValueCodec[Quantity[Int, HNil]]", BsonValueCodecTests[Quantity[Int, HNil]].codec[String, Float])
}
| nrinaudo/kantan.mongodb | libra/src/test/scala/kantan/mongodb/libra/LibraCodecTests.scala | Scala | apache-2.0 | 1,571 |
/*
* Copyright (c) 2013, Hidekatsu Hirose
* Copyright (c) 2013, Hirose-Zouen
* This file is subject to the terms and conditions defined in
* This file is subject to the terms and conditions defined in
* file 'LICENSE.txt', which is part of this source code package.
*/
package org.hirosezouen.hznet
import scala.actors._
import scala.actors.Actor._
import scala.util.control.Exception._
import org.hirosezouen.hzutil.HZActor._
import org.hirosezouen.hzutil.HZIO._
import org.hirosezouen.hzutil.HZLog._
import HZSocketClient._
object HZEchoClient {
implicit val logger = getLogger(this.getClass.getName)
def main(args: Array[String]) {
log_info("HZEchoClient:Start")
if(args.length < 2) {
log_error("error : Argument required.")
sys.exit(0)
}
val ip = args(0)
val port = catching(classOf[NumberFormatException]) opt args(1).toInt match {
case Some(p) => p
case None => {
log_error("error : Port number.")
sys.exit(1)
}
}
var actors: Set[Actor] = Set.empty
val soClient = startSocketClient(HZSoClientConf(ip,port,10000,0,false),
SocketIOStaticDataBuilder,
self) {
case (_,s: String) => {
self ! HZDataSending(s.getBytes)
}
case (_,HZDataReceived(receivedData)) => {
log_info(new String(receivedData))
}
}
actors += soClient
actors += startInputActor(System.in) {
case "q" | "Q" => {
exit(HZNormalStoped())
}
case s => {
soClient ! HZDataSending(s.getBytes)
}
}
self.trapExit = true
var loopFlag = true
var mf: () => Unit = null
def mainFun1() = receive {
case Exit(stopedActor: Actor, reason) => {
log_debug("main:mainFun1:Exit(%s,%s)".format(stopedActor,reason))
actors -= stopedActor
if(actors.isEmpty) {
loopFlag = false
} else {
actors.foreach(_ ! HZStop())
System.in.close() /* InputAcotorはclose()の例外で停止する */
mf = mainFun2
}
}
}
def mainFun2() = receive {
case Exit(stopedActor: Actor, reason) => {
log_debug("main:mainFun2:Exit(%s,%s)".format(stopedActor,reason))
actors -= stopedActor
if(actors.isEmpty)
loopFlag = false
}
}
/*
* メイン処理
*/
mf = mainFun1
while(loopFlag) {
mf()
}
log_info("HZEchoClient:end")
}
}
| chokopapashi/HZUtils1.6.x_Scala2.10.5 | src/main/scala/org/hirosezouen/hznet/HZEchoClient.scala | Scala | bsd-3-clause | 3,021 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.bin
import java.nio.ByteBuffer
trait BinaryOutputCallback {
/**
* Callback for reduced (16-byte) values
*/
def apply(trackId: Int, lat: Float, lon: Float, dtg: Long): Unit
/**
* Callback for expanded (24-byte) values
*/
def apply(trackId: Int, lat: Float, lon: Float, dtg: Long, label: Long): Unit
/**
* Fills in basic values
*/
protected def put(buffer: ByteBuffer, trackId: Int, lat: Float, lon: Float, dtg: Long): Unit = {
buffer.putInt(trackId)
buffer.putInt((dtg / 1000).toInt)
buffer.putFloat(lat)
buffer.putFloat(lon)
}
/**
* Fills in extended values
*/
protected def put(buffer: ByteBuffer, trackId: Int, lat: Float, lon: Float, dtg: Long, label: Long): Unit = {
put(buffer, trackId, lat, lon, dtg)
buffer.putLong(label)
}
}
| ronq/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/bin/BinaryOutputCallback.scala | Scala | apache-2.0 | 1,320 |
/*
*************************************************************************************
* Copyright 2016 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.cfclerk.domain
import javax.crypto.spec.PBEKeySpec
import org.apache.commons.codec.digest.Md5Crypt
import javax.crypto.SecretKeyFactory
import net.liftweb.common.Loggable
import java.security.NoSuchAlgorithmException
import scala.util.Random
import net.liftweb.common.Full
import net.liftweb.common.Box
import net.liftweb.common.Failure
import net.liftweb.common.EmptyBox
/*
* This file contains the implementation of the 4 supported algorithmes in
* AIX /etc/security/passwd file (with example hash string result for
* the password "secret"):
* - salted md5 : {smd5}tyiOfoE4$r5HleyKHVdL3dg9ouzcZ80
* - salted sha1 : {ssha1}12$tyiOfoE4WXucUfh/$1olYn48enIIKGOOs0ve/GE.k.sF
* - salted ssha256: {ssha256}12$tyiOfoE4WXucUfh/$YDkcqbY5oKk4lwQ4pVKPy8o4MqcfVpp1ZxxvSfP0.wS
* - salted ssha512: {ssha512}10$tyiOfoE4WXucUfh/$qaLbOhKx3fwIu93Hkh4Z89Vr.otLYEhRGN3b3SAZFD3mtxhqWZmY2iJKf0KB/5fuwlERv14pIN9h4XRAZtWH..
*
* Appart for md5, which is the standard unix implementation and differs only for the
* prefix ({smd5} in place of "$1", the other implementations differ SIGNIFICANTLY from
* standard Unix crypt described at https://www.akkadia.org/drepper/SHA-crypt.txt. In fact,
* they only kept:
* - the number of bytes (and so chars) for the hash: 20 for ssha1, 32 for ssha256, 64 for ssh512
* - the base64 encoding table (which is not the standard one but starts with "./012" etc
*
* What changed is:
* - they use PBKDF2 HMAC-(sha1, sha256, sha512) in place of Sha-Crypt,
* - they use a different padding table
* - the number of iterations, named "rounds" in Unix crypt vocabulary, is not the number N
* found at the begining of the hash string (after the algo name). The number of iteration
* is actually 2^N, and N is called in /etc/security/pwdalg.cfg the "cost"
*
* Hope this decription may help other people find there way to generate AIX hash string.
*/
object AixPasswordHashAlgo extends Loggable {
import java.lang.{ StringBuilder => JStringBuilder }
/**
* Table with characters for Sha-Crypt Base64 transformation,
* and used by Aix even if they DON'T use sha-crypt.
*/
final val SCB64Table = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
/*
* Sha-Crypt Base64 byte encoding into chars.
*
* Convert 3 bytes into 0 to 3 chars from the B64Tables.
* Output is written into the buffer given in input.
*
* This code is a Scala adaptation from org.apache.commons.codec.digest.B64
*
*/
def b64from24bit(b2: Byte, b1: Byte, b0: Byte, outNumChars: Int, buffer: JStringBuilder) {
// The bit masking is necessary because the JVM byte type is signed!
var w = ((b2 << 16) & 0x00ffffff) | ((b1 << 8) & 0x00ffff) | (b0 & 0xff)
// It's effectively a "for" loop but kept to resemble the original C code.
val n = outNumChars
for(i <- 0 until n) {
buffer.append(SCB64Table.charAt(w & 0x3f))
w >>= 6
}
}
/*
* AIX {smd5} implementation.
* @parameter pwd: the string password, in UTF-8, to hash
* @parameter salt: a
*/
def smd5(pwd: String, salt: Option[String] = None): String = {
val s = salt.getOrElse(getRandomSalt(8))
val hash = Md5Crypt.md5Crypt(pwd.getBytes("UTF-8"), s, "")
val sb = new java.lang.StringBuilder()
sb.append("{smd5}")
sb.append(hash)
sb.toString
}
/*
* Generic implementation of AIX {ssha*} hash scheme.
* The hash format is {ssha*}NN$saltsalt$sha.crypt.base.64.encoded.hash
* where NN is the cost (cost_num in /etc/security/pwdalg.cfg vocabulary),
* a 0 padded int between 1 and 31, and saltsalt is the slat string,
* by default 16 chars long.
*
*/
def ssha(pwd: String, salt: Option[String], cost: Int, sha: ShaSpec): Box[String] = {
for {
skf <- getSecretKeFactory(sha)
} yield {
doSsha(sha, skf)(pwd, salt, cost)
}
}
/*
* Create an AIX {ssha1} hash string from pwd. Optionaly, specify the
* salt string to use (default to a random 16 chars string), and the "num_cost"
* (defaults to 10, i.e 1024 iterations).
*
* If PBKDF2WithHmacSHA1 crypto algorithm is not available in the current JVM,
* revert back to {smd5}.
*/
def ssha1(pwd: String, salt: Option[String] = None, cost: Int = 10): String = {
ssha1impl(pwd, salt, cost)
}
/*
* Create an AIX {ssha256} hash string from pwd. Optionaly, specify the
* salt string to use (default to a random 16 chars string), and the "num_cost"
* (defaults to 10, i.e 1024 iterations).
*
* If PBKDF2WithHmacSHA256 crypto algorithm is not available in the current JVM,
* revert back to {ssha1}.
*/
def ssha256(pwd: String, salt: Option[String] = None, cost: Int = 10): String = {
ssha256impl(pwd, salt, cost)
}
/*
* Create an AIX {ssha256} hash string from pwd. Optionaly, specify the
* salt string to use (default to a random 16 chars string), and the "num_cost"
* (defaults to 10, i.e 1024 iterations).
*
* If PBKDF2WithHmacSHA512 crypto algorithm is not available in the current JVM,
* revert back to {ssha1}.
*/
def ssha512(pwd: String, salt: Option[String] = None, cost: Int = 10): String = {
ssha512impl(pwd, salt, cost)
}
/////
///// Implementation details
///// (if you are looking to understand how AIX hashes its
///// passwords, that the part of interest)
/////
protected[domain] final def getSecretKeFactory(sha: ShaSpec): Box[SecretKeyFactory] = {
try {
Full(SecretKeyFactory.getInstance(s"PBKDF2WithHmac${sha.name}"))
} catch {
case ex: NoSuchAlgorithmException =>
Failure(s"Your current Java installation does not support PBKDF2WithHmac${sha.name} algorithm, " +
"which is necessary for {ssha256} hash")
}
}
/////
///// Initialize the correct instance of hashing algo
///// based on the available secret key factory on the
///// jvm. The check is done at most one time by jvm session.
/////
/// ssha1 revert to smd5 - but all post-java6 JVM should be ok
private[this] final lazy val ssha1impl = getSecretKeFactory(ShaSpec.SHA1) match {
case Full(skf) => doSsha(ShaSpec.SHA1, skf) _
case e:EmptyBox =>
// this should not happen, because PBKDF2WithHmacSHA1 is
// in standard Java since Java6. But who knows..
// Fallback to md5 hash.
logger.error("Your current Java installation does not support PBKDF2WithHmacSHA1 algorithm, " +
"which is necessary for {ssha1} hash. Falling back to {smd5} hashing scheme")
(pwd: String, salt: Option[String], cost: Int) => smd5(pwd, salt)
}
/// ssha256 reverts to ssha1 - not so bad
private[this] final lazy val ssha256impl = getSecretKeFactory(ShaSpec.SHA256) match {
case Full(skf) => doSsha(ShaSpec.SHA256, skf) _
case e:EmptyBox =>
// this may happen on Java 7 and older version, because PBKDF2WithHmacSHA256
// was introduced in Java 8.
// Fallback to ssha1 hash.
logger.error("Your current Java installation does not support PBKDF2WithHmacSHA256 algorithm, " +
"which is necessary for {ssha256} hash. Falling back to {ssha1} hashing scheme")
ssha1impl
}
/// ssha512 reverts to ssha1 - no so bad
private[this] final lazy val ssha512impl = getSecretKeFactory(ShaSpec.SHA512) match {
case Full(skf) => doSsha(ShaSpec.SHA512, skf) _
case e:EmptyBox =>
// this may happen on Java 7 and older version, because PBKDF2WithHmacSHA512
// was introduced in Java 8.
// Fallback to ssha1 hash.
logger.error("Your current Java installation does not support PBKDF2WithHmacSHA512 algorithm, " +
"which is necessary for {ssha256} hash. Falling back to {ssha1} hashing scheme")
ssha1impl
}
/*
* This one is not public - the caller must ensure that sha and SecretKeyFactory are compatible
*/
protected[domain] final def doSsha(sha: ShaSpec, skf: SecretKeyFactory)(pwd: String, salt: Option[String], cost: Int): String = {
val rounds = 2 << (cost-1)
val s = salt.getOrElse(getRandomSalt(16)).getBytes("UTF-8")
val spec: PBEKeySpec = new PBEKeySpec(pwd.toCharArray, s, rounds, 8*sha.byteNumber)
val sb = new java.lang.StringBuilder()
sb.append(sha.prefix)
sb.append("%02d".format(cost))
sb.append("$").append(new String(s)).append("$")
sha.scb64Encode(skf.generateSecret(spec).getEncoded, sb)
(sb.toString)
}
/*
* Generic trait denoting the specificities of each
* SHA variant in term of name, number of output
* bytes, and way to encode them into a string.
*/
sealed trait ShaSpec {
// SHA version: SHA1, SHA256, SHA512
def name : String
// algo prefix prepended in the final hash string.
final lazy val prefix = s"{s${name.toLowerCase}}"
// number of bytes of the hash before encoding
def byteNumber: Int
// encode the byte array resulting from the hash
// into a Sha-Crypt Base64 string, with correct
// byte switching and padding.
// the input byte array must have byteNumber elements.
def scb64Encode(bytes: Array[Byte], buffer: JStringBuilder): JStringBuilder
}
final object ShaSpec {
// specific implementation for SHA1, SHA256 and SHA512
final case object SHA1 extends ShaSpec {
val name = "SHA1"
val byteNumber = 20
def scb64Encode(bytes: Array[Byte], buffer: JStringBuilder): JStringBuilder = {
b64from24bit(bytes( 0), bytes( 1), bytes( 2), 4, buffer)
b64from24bit(bytes( 3), bytes( 4), bytes( 5), 4, buffer)
b64from24bit(bytes( 6), bytes( 7), bytes( 8), 4, buffer)
b64from24bit(bytes( 9), bytes(10), bytes(11), 4, buffer)
b64from24bit(bytes(12), bytes(13), bytes(14), 4, buffer)
b64from24bit(bytes(15), bytes(16), bytes(17), 4, buffer)
b64from24bit(bytes(18), bytes(19), 0 , 3, buffer)
buffer
}
}
final case object SHA256 extends ShaSpec {
val name = "SHA256"
val byteNumber = 32
def scb64Encode(bytes: Array[Byte], buffer: JStringBuilder): JStringBuilder = {
b64from24bit(bytes( 0), bytes( 1), bytes( 2), 4, buffer)
b64from24bit(bytes( 3), bytes( 4), bytes( 5), 4, buffer)
b64from24bit(bytes( 6), bytes( 7), bytes( 8), 4, buffer)
b64from24bit(bytes( 9), bytes(10), bytes(11), 4, buffer)
b64from24bit(bytes(12), bytes(13), bytes(14), 4, buffer)
b64from24bit(bytes(15), bytes(16), bytes(17), 4, buffer)
b64from24bit(bytes(18), bytes(19), bytes(20), 4, buffer)
b64from24bit(bytes(21), bytes(22), bytes(23), 4, buffer)
b64from24bit(bytes(24), bytes(25), bytes(26), 4, buffer)
b64from24bit(bytes(27), bytes(28), bytes(29), 4, buffer)
b64from24bit(bytes(30), bytes(31), 0 , 3, buffer)
buffer
}
}
final case object SHA512 extends ShaSpec {
val name = "SHA512"
val byteNumber = 64
def scb64Encode(bytes: Array[Byte], buffer: JStringBuilder): JStringBuilder = {
b64from24bit(bytes( 0), bytes( 1), bytes( 2), 4, buffer)
b64from24bit(bytes( 3), bytes( 4), bytes( 5), 4, buffer)
b64from24bit(bytes( 6), bytes( 7), bytes( 8), 4, buffer)
b64from24bit(bytes( 9), bytes(10), bytes(11), 4, buffer)
b64from24bit(bytes(12), bytes(13), bytes(14), 4, buffer)
b64from24bit(bytes(15), bytes(16), bytes(17), 4, buffer)
b64from24bit(bytes(18), bytes(19), bytes(20), 4, buffer)
b64from24bit(bytes(21), bytes(22), bytes(23), 4, buffer)
b64from24bit(bytes(24), bytes(25), bytes(26), 4, buffer)
b64from24bit(bytes(27), bytes(28), bytes(29), 4, buffer)
b64from24bit(bytes(30), bytes(31), bytes(32), 4, buffer)
b64from24bit(bytes(33), bytes(34), bytes(35), 4, buffer)
b64from24bit(bytes(36), bytes(37), bytes(38), 4, buffer)
b64from24bit(bytes(39), bytes(40), bytes(41), 4, buffer)
b64from24bit(bytes(42), bytes(43), bytes(44), 4, buffer)
b64from24bit(bytes(45), bytes(46), bytes(47), 4, buffer)
b64from24bit(bytes(48), bytes(49), bytes(50), 4, buffer)
b64from24bit(bytes(51), bytes(52), bytes(53), 4, buffer)
b64from24bit(bytes(54), bytes(55), bytes(56), 4, buffer)
b64from24bit(bytes(57), bytes(58), bytes(59), 4, buffer)
b64from24bit(bytes(60), bytes(61), bytes(62), 4, buffer)
b64from24bit(bytes(63), 0 , 0 , 2, buffer)
buffer
}
}
}
/**
* Generate a random string with the size given
* as a parameter and chars taken from the Sha-Crypt
* Base 64 table.
*/
private[this] def getRandomSalt(size: Int): String = {
val chars = for {
i <- (0 until size).toArray
} yield {
SCB64Table.charAt(Random.nextInt(SCB64Table.length))
}
new String(chars)
}
}
| armeniaca/rudder | rudder-core/src/main/scala/com/normation/cfclerk/domain/AixPasswordHashAlgo.scala | Scala | gpl-3.0 | 14,570 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.projections
import org.bdgenomics.formats.avro.VariantCallingAnnotations
/**
* Enumeration of VariantCallingAnnotations field names for predicates and projections.
*/
object VariantCallingAnnotationsField extends FieldEnumeration(VariantCallingAnnotations.SCHEMA$) {
val readDepth, downsampled, baseQRankSum, clippingRankSum, haplotypeScore, inbreedingCoefficient, alleleCountMLE, alleleFrequencyMLE, rmsMapQ, mapq0Reads, mqRankSum, usedForNegativeTrainingSet, usedForPositiveTrainingSet, variantQualityByDepth, readPositionRankSum, vqslod, culprit, variantCallErrorProbability, variantIsPassing, variantFilters = SchemaValue
}
| tdanford/adam | adam-core/src/main/scala/org/bdgenomics/adam/projections/VariantCallingAnnotationsField.scala | Scala | apache-2.0 | 1,457 |
package com.lucidchart.open.xtract
import org.specs2.mutable.Specification
import scala.xml._
class XPathSpec extends Specification {
"XPath.with_attr" should {
"Filter elements that contain attribute" in {
val xml = <d><a b="foo" /><a b="d" /><a /><a c="c" /><a b="b" /></d>
val path = (__ \\ "a")("b")
path(xml) must_== NodeSeq.fromSeq(Seq(
<a b="foo" />,
<a b="d" />,
<a b="b" />
))
}
"Filter elements by attribute name and value" in {
val xml = <d><a t="5" n="f" /><a t="6" /><a y="5" /><a t="5" /><a /></d>
val path = (__ \\ "a")("t", "5")
path(xml) must_== NodeSeq.fromSeq(Seq(
<a t="5" n="f" />,
<a t="5" />
))
}
}
}
| lucidsoftware/xtract | unit-tests/src/test/scala/com/lucidchart/open/xtract/XPathSpec.scala | Scala | apache-2.0 | 735 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.la
import cc.factorie.util.DoubleSeq
//trait SingletonTensor extends SparseTensor with SparseDoubleSeq with ReadOnlyTensor {
//}
trait SingletonTensor extends SparseTensor with ReadOnlyTensor {
def singleIndex: Int
def singleValue: Double
val activeDomainSize = 1
def sizeHint(size: Int): Unit = { }
def _makeReadable(): Unit = { }
def _unsafeActiveDomainSize: Int = 1
def _indices: Array[Int] = Array(singleIndex)
}
trait SingletonIndexedTensor extends SparseIndexedTensor with SingletonTensor {
def _values: Array[Double] = Array(singleValue)
def copyInto(t: SparseIndexedTensor): Unit = t(singleIndex) = singleValue
//def activeDomain: IntSeq = new SingletonIntSeq(singleIndex) // Can't be here and in Tensor1
override def apply(i:Int) = if (i == singleIndex) singleValue else 0.0
override def foreachActiveElement(f:(Int,Double)=>Unit): Unit = f(singleIndex, singleValue)
override def activeElements: Iterator[(Int,Double)] = Iterator.single((singleIndex, singleValue))
override def forallActiveElements(f:(Int,Double)=>Boolean): Boolean = f(singleIndex, singleValue)
override def =+(a:Array[Double], offset:Int, f:Double): Unit = a(offset+singleIndex) += f * singleValue
override def sum: Double = singleValue
override def max: Double = if (singleValue > 0.0) singleValue else 0.0
override def min: Double = if (singleValue < 0.0) singleValue else 0.0
override def maxIndex: Int = if (singleValue >= 0.0) singleIndex else if (singleIndex != 0) 0 else 1
override def containsNaN: Boolean = false
//override def dot(v:DoubleSeq): Double = v(singleIndex) * singleValue
//override def copy: SingletonTensor = this // immutable, but careful in the future we might make a mutable version
override def dot(t:DoubleSeq): Double = t match {
case t:SingletonBinaryTensor => if (singleIndex == t.singleIndex) singleValue else 0.0
case t:SingletonTensor => if (singleIndex == t.singleIndex) singleValue * t.singleValue else 0.0
case t:DoubleSeq => t(singleIndex) * singleValue
}
}
| melisabok/factorie | src/main/scala/cc/factorie/la/SingletonTensor.scala | Scala | apache-2.0 | 2,811 |
package mesosphere.marathon
package stream
import com.amazonaws.auth.{ AWSCredentials, AWSStaticCredentialsProvider, BasicAWSCredentials }
import java.net.URI
import java.nio.file.Paths
import akka.actor.ActorSystem
import akka.http.scaladsl.model.ContentTypes
import akka.stream.Materializer
import akka.stream.alpakka.s3.S3Settings
import akka.stream.alpakka.s3.acl.CannedAcl
import akka.stream.alpakka.s3.impl.MetaHeaders
import akka.stream.alpakka.s3.scaladsl.S3Client
import akka.stream.scaladsl.{ FileIO, Source, Sink => ScalaSink }
import akka.util.ByteString
import akka.Done
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain
import com.typesafe.scalalogging.StrictLogging
import com.wix.accord.Validator
import com.wix.accord.dsl._
import mesosphere.marathon.api.v2.Validation.{ isTrue, uriIsValid }
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try
/**
* UriIO provides sources from and sinks to an URI.
* It supports different providers, so multiple schemes are supported.
*
* Provided schemes:
*
* File
* Example: {{{file:///path/to/file}}}
* This will read/write from a local file under a given path
*
*
* S3
* Example: {{{s3://bucket.name/path/in/bucket?region=us=west-1&aws_access_key_id=XXX&aws_secret_access_key=jdhfdhjfg}}}
* This will read/write from an S3 bucket in the specified path
*
*/
object UriIO extends StrictLogging {
/**
* Source that reads from the specified URI.
* @param uri the uri to read from
* @return A source for reading the specified uri.
*/
def reader(uri: URI)(implicit actorSystem: ActorSystem, materializer: Materializer, ec: ExecutionContext): Source[ByteString, Future[Done]] = {
uri.getScheme match {
case "file" =>
FileIO
.fromPath(Paths.get(uri.getPath))
.mapMaterializedValue(_.map(res => res.status.getOrElse(throw res.getError)))
case "s3" =>
s3Client(uri)
.download(uri.getHost, uri.getPath.substring(1))
.mapMaterializedValue(_ => Future.successful(Done))
case unknown => throw new RuntimeException(s"Scheme not supported: $unknown")
}
}
/**
* Sink that can write to the defined URI.
* @param uri the URI to write to.
* @return the sink that can write to the defined URI.
*/
def writer(uri: URI)(implicit actorSystem: ActorSystem, materializer: Materializer, ec: ExecutionContext): ScalaSink[ByteString, Future[Done]] = {
uri.getScheme match {
case "file" =>
FileIO
.toPath(Paths.get(uri.getPath))
.mapMaterializedValue(_.map(res => res.status.getOrElse(throw res.getError)))
case "s3" =>
logger.info(s"s3location: bucket:${uri.getHost}, path:${uri.getPath}")
s3Client(uri)
.multipartUpload(
bucket = uri.getHost,
key = uri.getPath.substring(1),
metaHeaders = MetaHeaders(Map.empty),
contentType = ContentTypes.`application/octet-stream`,
cannedAcl = CannedAcl.BucketOwnerRead)
.mapMaterializedValue(_.map(_ => Done))
case unknown => throw new RuntimeException(s"Scheme not supported: $unknown")
}
}
/**
* Indicates, if the given uri is valid.
* @param uri the uri to validate
* @return true if this URI is valid, otherwise false.
*/
def isValid(uri: URI): Boolean = {
def nonEmpty(nullable: String): Boolean = nullable != null && nullable.nonEmpty
uri.getScheme match {
case "file" if nonEmpty(uri.getPath) && uri.getPath.length > 1 => true
case "s3" if nonEmpty(uri.getHost) && nonEmpty(uri.getPath) => true
case _ => false
}
}
def valid: Validator[String] = uriIsValid and isTrue[String]{ uri: String => s"Invalid URI or unsupported scheme: $uri" }(uri => isValid(new URI(uri)))
/**
* Create S3 client.
* The credentials use the following chain:
* - use credentials provided from URI parameters
* - use credentials set via the environment
* - use credentials set via system properties
* - use default credentials set via the credentials file
* - use credentials provided via the Amazon EC2 Container Service
* - use credential defined via system configuration in akka.stream.alpakka.s3
* @return The S3Client for the defined URI.
*/
private[this] def s3Client(uri: URI)(implicit actorSystem: ActorSystem, materializer: Materializer): S3Client = {
val params = parseParams(uri)
val region = params.getOrElse("region", "us-east-1")
val credentials: AWSCredentials = {
def fromURL: Option[AWSCredentials] = for {
accessKey <- params.get("access_key")
accessSecret <- params.get("secret_key")
} yield new BasicAWSCredentials(accessKey, accessSecret)
def fromProviderChain: Option[AWSCredentials] = {
Try(new DefaultAWSCredentialsProviderChain().getCredentials)
.toOption
.map(creds => new BasicAWSCredentials(creds.getAWSAccessKeyId, creds.getAWSSecretKey))
}
fromURL.orElse(fromProviderChain).getOrElse {
S3Settings().credentialsProvider.getCredentials
}
}
S3Client(new AWSStaticCredentialsProvider(credentials), region)
}
private[this] def parseParams(uri: URI): Map[String, String] = {
Option(uri.getQuery).getOrElse("").split("&").collect { case QueryParam(k, v) => k -> v }(collection.breakOut)
}
private[this] object QueryParam {
def unapply(str: String): Option[(String, String)] = str.split("=") match {
case Array(key: String, value: String) => Some(key -> value)
case _ => None
}
}
}
| guenter/marathon | src/main/scala/mesosphere/marathon/stream/UriIO.scala | Scala | apache-2.0 | 5,641 |
/*
* Copyright 2011-2014 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.ebpi.yaidom.perftest
import java.{ util => jutil }
import java.io.File
import java.lang.management.ManagementFactory
import scala.util.Try
import org.scalatest.FunSuite
import nl.ebpi.yaidom.testtag.PerformanceTest
import AbstractMemoryUsageSuite._
import nl.ebpi.yaidom.core._
import nl.ebpi.yaidom.parse
import nl.ebpi.yaidom.queryapi._
/**
* Abstract memory usage suite super-class, for different yaidom element types.
*
* It requires sbt to run with "-Dperftest.rootDir=/path/to/rootdir". All files under that rootDir that are regarded to be XML files
* (due to the extension in the file name, such as ".xml", ".xsd", etc.) are parsed.
*
* In order to get some useful logging output, and in order for the test to check reasonable assertions, make sure to run each
* concrete suite that is a sub-class of this abstract suite in isolation! For example:
* {{{
* test-only nl.ebpi.yaidom.perftest.MemoryUsageSuiteForElem
* }}}
*
* @author Chris de Vreeze
*/
abstract class AbstractMemoryUsageSuite extends FunSuite {
type E <: ClarkElemLike[E]
private val logger: jutil.logging.Logger = jutil.logging.Logger.getLogger("nl.ebpi.yaidom.perftest")
private def rootDir: File = {
Option(System.getProperty("perftest.rootDir")).map(f => new File(f)).getOrElse(
sys.error(s"Missing system property 'perftest.rootDir'. All XML files somewhere below the rootDir are used by the test."))
}
test("On querying, memory usage should be within reasonable bounds", PerformanceTest) {
ENameProvider.globalENameProvider.become(AbstractMemoryUsageSuite.defaultENameProvider)
QNameProvider.globalQNameProvider.become(AbstractMemoryUsageSuite.defaultQNameProvider)
require(rootDir.isDirectory, s"Expected directory $rootDir, but this is not an existing directory")
logger.info(s"Entering test. Test class: ${this.getClass.getName}")
val memBean = ManagementFactory.getMemoryMXBean
def getUsedHeapMemoryInMiB(): Long = convertByteCountToMiB(memBean.getHeapMemoryUsage.getUsed)
val xmlFiles = findFiles(rootDir).filter(f => Set(".xml", ".xsd").exists(ext => f.getName.endsWith(ext)))
memBean.gc()
val heapMemBeforeParsingInMiB = getUsedHeapMemoryInMiB
logger.info(s"Heap memory usage before parsing XML: ${heapMemBeforeParsingInMiB} MiB")
val totalXmlFileLength = xmlFiles.map(_.length).sum
val totalXmlFileLengthInMiB = convertByteCountToMiB(totalXmlFileLength)
logger.info(s"Total of the XML file lengths (of ${xmlFiles.size} XML files): $totalXmlFileLengthInMiB MiB")
val docElems = parseXmlFiles(xmlFiles).flatMap(_.toOption)
memBean.gc()
val heapMemAfterParsingInMiB = getUsedHeapMemoryInMiB
logger.info(s"Heap memory usage after parsing ${docElems.size} XML files: ${heapMemAfterParsingInMiB} MiB")
assertResult(
true,
s"Parsed XML should not need more than $maxMemoryToFileLengthRatio times the memory that their combined byte count") {
(heapMemAfterParsingInMiB - heapMemBeforeParsingInMiB) <= maxMemoryToFileLengthRatio.toLong * totalXmlFileLengthInMiB
}
val allDocElem = createCommonRootParent(docElems)
memBean.gc()
logger.info(s"Heap memory usage after creating large combined XML: ${getUsedHeapMemoryInMiB} MiB")
val allDocElems = allDocElem.findAllElemsOrSelf
logger.info(s"The combined XML has ${allDocElems.size} elements")
memBean.gc()
logger.info(s"Heap memory usage after this query on the large combined XML: ${getUsedHeapMemoryInMiB} MiB")
val allDocElemsWithNS = allDocElem \\\\ (elem => elem.resolvedName.namespaceUriOption.isDefined)
logger.info(s"The combined XML has ${allDocElemsWithNS.size} elements with names having a namespace")
memBean.gc()
logger.info(s"Heap memory usage after this query on the large combined XML: ${getUsedHeapMemoryInMiB} MiB")
val elementNameNamespaces = allDocElem.findAllElemsOrSelf.flatMap(_.resolvedName.namespaceUriOption).distinct.sorted
val attrNamespaces =
allDocElem.findAllElemsOrSelf.flatMap(e => e.resolvedAttributes).flatMap(_._1.namespaceUriOption).distinct.sorted
logger.info(s"The combined XML has ${elementNameNamespaces.size} different namespaces in element names")
logger.info(s"The combined XML has ${attrNamespaces.size} different namespaces in attribute names")
memBean.gc()
logger.info(s"Heap memory usage after these queries on the large combined XML: ${getUsedHeapMemoryInMiB} MiB")
ENameProvider.globalENameProvider.reset()
QNameProvider.globalQNameProvider.reset()
logger.info(s"Leaving test. Test class: ${this.getClass.getName}")
}
private def findFiles(dir: File): Vector[File] = {
require(dir.isDirectory)
val files = dir.listFiles.toVector
files.filter(_.isFile) ++ files.filter(_.isDirectory).flatMap(d => findFiles(d))
}
private def convertByteCountToMiB(byteCount: Long): Long = byteCount >> 20
protected def parseXmlFiles(files: Vector[File]): Vector[Try[E]]
protected def getDocumentParser: parse.DocumentParser = {
val parserClass =
Class.forName(System.getProperty("perftest.documentParser", "nl.ebpi.yaidom.parse.DocumentParserUsingSax")).asInstanceOf[Class[parse.DocumentParser]]
val parserFactoryMethod = parserClass.getDeclaredMethod("newInstance")
parserFactoryMethod.invoke(null).asInstanceOf[parse.DocumentParser]
}
protected def createCommonRootParent(rootElems: Vector[E]): E
protected def maxMemoryToFileLengthRatio: Int
}
object AbstractMemoryUsageSuite {
// To show that the global EName and QName providers have stable identifiers, so they can be imported
import ENameProvider.globalENameProvider._
import QNameProvider.globalQNameProvider._
val defaultENameProvider: ENameProvider = {
val thisClass = classOf[AbstractMemoryUsageSuite]
val enameFiles =
List(
new File(thisClass.getResource("/nl/ebpi/yaidom/enames-xs.txt").toURI),
new File(thisClass.getResource("/nl/ebpi/yaidom/enames-xlink.txt").toURI),
new File(thisClass.getResource("/nl/ebpi/yaidom/enames-link.txt").toURI))
val enameCache =
enameFiles flatMap { file => scala.io.Source.fromFile(file).getLines.toVector } map { s => parseEName(s) }
new ENameProvider.ENameProviderUsingImmutableCache(enameCache.toSet + EName("{http://www.xbrl.org/2003/instance}periodType"))
}
val defaultQNameProvider: QNameProvider = {
val thisClass = classOf[AbstractMemoryUsageSuite]
val qnameFiles =
List(
new File(thisClass.getResource("/nl/ebpi/yaidom/qnames-xs.txt").toURI),
new File(thisClass.getResource("/nl/ebpi/yaidom/qnames-xlink.txt").toURI),
new File(thisClass.getResource("/nl/ebpi/yaidom/qnames-link.txt").toURI))
val qnameCache =
qnameFiles flatMap { file => scala.io.Source.fromFile(file).getLines.toVector } map { s => parseQName(s) }
new QNameProvider.QNameProviderUsingImmutableCache(qnameCache.toSet + QName("xbrli:periodType"))
}
}
| EBPI/yaidom | src/perftest/scala/nl/ebpi/yaidom/perftest/AbstractMemoryUsageSuite.scala | Scala | apache-2.0 | 7,615 |
package japgolly.scalajs.react.test
import scala.Console._
import scala.scalajs.js.Dynamic.global
import scala.util.Try
import scala.util.control.NonFatal
object JsEnvUtils {
/** Sample (real) values are:
* - Mozilla/5.0 (Unknown; Linux x86_64) AppleWebKit/538.1 (KHTML, like Gecko) PhantomJS/2.1.1 Safari/538.1
* - Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0
* - Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36
*/
val userAgent: String =
Try(global.navigator.userAgent.asInstanceOf[String]) getOrElse "Unknown"
// Check each browser
val isFirefox = userAgent contains "Firefox"
val isChrome = userAgent contains "Chrome"
val isRealBrowser = isFirefox || isChrome
private val skipMTest =
YELLOW + "Skipped; need real browser." + RESET
def requiresRealBrowser(test: => Any): Any =
if (isRealBrowser)
try
test
catch {
case NonFatal(t) => throw t
case t: Throwable => throw new RuntimeException(t)
}
else
skipMTest
}
| japgolly/scalajs-react | tests/src/test/scala/japgolly/scalajs/react/test/JsEnvUtils.scala | Scala | apache-2.0 | 1,098 |
package com.sksamuel.elastic4s.analyzers
import org.scalatest.{Matchers, WordSpec}
class CommonGramsTokenFilterTest extends WordSpec with TokenFilterDsl with Matchers {
"CommonGramsTokenFilter builder" should {
"set common words" in {
commonGramsTokenFilter("testy")
.commonWords("the", "and")
.json
.string shouldBe """{"type":"common_grams","common_words":["the","and"],"ignore_case":false,"query_mode":false}"""
}
"set ignore case" in {
commonGramsTokenFilter("testy")
.ignoreCase(true)
.json
.string shouldBe """{"type":"common_grams","ignore_case":true,"query_mode":false}"""
}
"set query mode" in {
commonGramsTokenFilter("testy")
.queryMode(true)
.json
.string shouldBe """{"type":"common_grams","ignore_case":false,"query_mode":true}"""
}
}
}
| aroundus-inc/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/analyzers/CommonGramsTokenFilterTest.scala | Scala | apache-2.0 | 872 |
/*
* Licensed to Cloudera, Inc. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Cloudera, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.hue.livy
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConverters._
object LivyConf {
val SESSION_FACTORY_KEY = "livy.server.session.factory"
val SPARK_SUBMIT_KEY = "livy.server.spark-submit"
val IMPERSONATION_ENABLED_KEY = "livy.impersonation.enabled"
sealed trait SessionKind
case class Process() extends SessionKind
case class Yarn() extends SessionKind
}
/**
*
* @param loadDefaults whether to also load values from the Java system properties
*/
class LivyConf(loadDefaults: Boolean) {
import LivyConf._
/**
* Create a LivyConf that loads defaults from the system properties and the classpath.
* @return
*/
def this() = this(true)
private val settings = new ConcurrentHashMap[String, String]
if (loadDefaults) {
for ((k, v) <- System.getProperties.asScala if k.startsWith("livy.")) {
settings.put(k, v)
}
}
/** Set a configuration variable */
def set(key: String, value: String): LivyConf = {
if (key == null) {
throw new NullPointerException("null key")
}
if (value == null) {
throw new NullPointerException("null key")
}
settings.put(key, value)
this
}
/** Set if a parameter is not already configured */
def setIfMissing(key: String, value: String): LivyConf = {
if (!settings.contains(key)) {
settings.put(key, value)
}
this
}
/** Get a configuration variable */
def get(key: String): String = getOption(key).getOrElse(throw new NoSuchElementException(key))
/** Get a configuration variable */
def get(key: String, default: String): String = getOption(key).getOrElse(default)
/** Get a parameter as an Option */
def getOption(key: String): Option[String] = Option(settings.get(key))
/** Get a parameter as a Boolean */
def getBoolean(key: String, default: Boolean) = getOption(key).map(_.toBoolean).getOrElse(default)
/** Get a parameter as an Int */
def getInt(key: String, default: Int) = getOption(key).map(_.toInt).getOrElse(default)
/** Return if the configuration includes this setting */
def contains(key: String): Boolean = settings.containsKey(key)
def sparkSubmit(): String = getOption(SPARK_SUBMIT_KEY).getOrElse("spark-submit")
def sessionKind(): SessionKind = getOption(SESSION_FACTORY_KEY).getOrElse("process") match {
case "process" => Process()
case "yarn" => Yarn()
case kind => throw new IllegalStateException(f"unknown kind $kind")
}
/** Return the filesystem root. Defaults to the local filesystem. */
def filesystemRoot(): String = sessionKind() match {
case Process() => "file://"
case Yarn() => "hdfs://"
}
}
| vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/spark/java/livy-core/src/main/scala/com/cloudera/hue/livy/LivyConf.scala | Scala | gpl-2.0 | 3,475 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.feature
import org.apache.spark.annotation.Experimental
import org.apache.spark.mllib.linalg._
import scala.collection.mutable
/**
* Feature utils for selector methods.
*/
@Experimental
object DiscretizationUtils {
private val LOG2 = math.log(2)
/** @return log base 2 of x */
val log2 = { x: Double => math.log(x) / LOG2 }
/**
* Entropy is a measure of disorder. The higher the value, the closer to a purely random distribution.
* The MDLP algorithm tries to find splits that will minimize entropy.
* @param frequencies sequence of integer frequencies.
* @param n the sum of all the frequencies in the list.
* @return the total entropy
*/
def entropy(frequencies: Seq[Long], n: Long): Double = {
-frequencies.aggregate(0.0)(
{ case (h, q) => h + (if (q == 0) 0 else {
val qn = q.toDouble / n
qn * log2(qn)
})
},
{ case (h1, h2) => h1 + h2 }
)
}
}
| sramirez/spark-MDLP-discretization | src/main/scala/org/apache/spark/mllib/feature/DiscretizationUtils.scala | Scala | apache-2.0 | 1,781 |
package example.baz
import org.specs2.mutable._
class BazSpec extends Specification {
"example1" >> { Baz.baz(); ok }
}
| sugakandrey/scalamu | sbt-plugin/src/sbt-test/sbt-scalamu/test-3-aggregate/baz/src/test/scala/example/baz/BazSpec.scala | Scala | gpl-3.0 | 124 |
package com.basdek.mailchimp_v3.operations.lists.segments
import com.basdek.mailchimp_v3.{Config, MailChimpResultFuture, SimpleAuthenticate}
import com.basdek.mailchimp_v3.dto.MailChimpListSegmentList
import com.basdek.mailchimp_v3.operations.Operation
import com.ning.http.client.Response
import dispatch._, Defaults._
import org.json4s._, native.JsonMethods._
/**
* Implementation of the /lists/{listId}/segments operation.
*
* @param cfg A Config instance.
* @param listId The listId for which you want to obtain the segments.
*/
class GetSegmentsOperation(val cfg: Config, listId: String)
extends Operation with SimpleAuthenticate {
private def transformer(res: Response) : MailChimpListSegmentList = {
val responseBody = res.getResponseBody
parse(responseBody).extract[MailChimpListSegmentList]
}
def execute: MailChimpResultFuture = {
val req = addAuth(:/(s"${cfg.apiEndpoint}/lists/$listId/segments").secure)
httpToResult(req, transformer)
}
}
| basdek/mailchimp_v3 | src/main/scala/com/basdek/mailchimp_v3/operations/lists/segments/GetSegmentsOperation.scala | Scala | mit | 991 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.io.{DataInputStream, InputStream, OutputStreamWriter}
import java.net.{InetAddress, ServerSocket, Socket, SocketException}
import scala.collection.JavaConversions._
import org.apache.spark._
import org.apache.spark.util.Utils
private[spark] class PythonWorkerFactory(pythonExec: String, envVars: Map[String, String])
extends Logging {
import PythonWorkerFactory._
// Because forking processes from Java is expensive, we prefer to launch a single Python daemon
// (pyspark/daemon.py) and tell it to fork new workers for our tasks. This daemon currently
// only works on UNIX-based systems now because it uses signals for child management, so we can
// also fall back to launching workers (pyspark/worker.py) directly.
val useDaemon = !System.getProperty("os.name").startsWith("Windows")
var daemon: Process = null
val daemonHost = InetAddress.getByAddress(Array(127, 0, 0, 1))
var daemonPort: Int = 0
val pythonPath = PythonUtils.mergePythonPaths(
PythonUtils.sparkPythonPath,
envVars.getOrElse("PYTHONPATH", ""),
sys.env.getOrElse("PYTHONPATH", ""))
def create(): Socket = {
if (useDaemon) {
createThroughDaemon()
} else {
createSimpleWorker()
}
}
/**
* Connect to a worker launched through pyspark/daemon.py, which forks python processes itself
* to avoid the high cost of forking from Java. This currently only works on UNIX-based systems.
*/
private def createThroughDaemon(): Socket = {
synchronized {
// Start the daemon if it hasn't been started
startDaemon()
// Attempt to connect, restart and retry once if it fails
try {
new Socket(daemonHost, daemonPort)
} catch {
case exc: SocketException =>
logWarning("Python daemon unexpectedly quit, attempting to restart")
stopDaemon()
startDaemon()
new Socket(daemonHost, daemonPort)
}
}
}
/**
* Launch a worker by executing worker.py directly and telling it to connect to us.
*/
private def createSimpleWorker(): Socket = {
var serverSocket: ServerSocket = null
try {
serverSocket = new ServerSocket(0, 1, InetAddress.getByAddress(Array(127, 0, 0, 1)))
// Create and start the worker
val pb = new ProcessBuilder(Seq(pythonExec, "-u", "-m", "pyspark.worker"))
val workerEnv = pb.environment()
workerEnv.putAll(envVars)
workerEnv.put("PYTHONPATH", pythonPath)
val worker = pb.start()
// Redirect worker stdout and stderr
redirectStreamsToStderr(worker.getInputStream, worker.getErrorStream)
// Tell the worker our port
val out = new OutputStreamWriter(worker.getOutputStream)
out.write(serverSocket.getLocalPort + "\\n")
out.flush()
// Wait for it to connect to our socket
serverSocket.setSoTimeout(10000)
try {
return serverSocket.accept()
} catch {
case e: Exception =>
throw new SparkException("Python worker did not connect back in time", e)
}
} finally {
if (serverSocket != null) {
serverSocket.close()
}
}
null
}
private def startDaemon() {
synchronized {
// Is it already running?
if (daemon != null) {
return
}
try {
// Create and start the daemon
val pb = new ProcessBuilder(Seq(pythonExec, "-u", "-m", "pyspark.daemon"))
val workerEnv = pb.environment()
workerEnv.putAll(envVars)
workerEnv.put("PYTHONPATH", pythonPath)
daemon = pb.start()
val in = new DataInputStream(daemon.getInputStream)
daemonPort = in.readInt()
// Redirect daemon stdout and stderr
redirectStreamsToStderr(in, daemon.getErrorStream)
} catch {
case e: Exception =>
// If the daemon exists, wait for it to finish and get its stderr
val stderr = Option(daemon)
.flatMap { d => Utils.getStderr(d, PROCESS_WAIT_TIMEOUT_MS) }
.getOrElse("")
stopDaemon()
if (stderr != "") {
val formattedStderr = stderr.replace("\\n", "\\n ")
val errorMessage = s"""
|Error from python worker:
| $formattedStderr
|PYTHONPATH was:
| $pythonPath
|$e"""
// Append error message from python daemon, but keep original stack trace
val wrappedException = new SparkException(errorMessage.stripMargin)
wrappedException.setStackTrace(e.getStackTrace)
throw wrappedException
} else {
throw e
}
}
// Important: don't close daemon's stdin (daemon.getOutputStream) so it can correctly
// detect our disappearance.
}
}
/**
* Redirect the given streams to our stderr in separate threads.
*/
private def redirectStreamsToStderr(stdout: InputStream, stderr: InputStream) {
try {
new RedirectThread(stdout, System.err, "stdout reader for " + pythonExec).start()
new RedirectThread(stderr, System.err, "stderr reader for " + pythonExec).start()
} catch {
case e: Exception =>
logError("Exception in redirecting streams", e)
}
}
private def stopDaemon() {
synchronized {
// Request shutdown of existing daemon by sending SIGTERM
if (daemon != null) {
daemon.destroy()
}
daemon = null
daemonPort = 0
}
}
def stop() {
stopDaemon()
}
}
private object PythonWorkerFactory {
val PROCESS_WAIT_TIMEOUT_MS = 10000
}
| yelshater/hadoop-2.3.0 | spark-core_2.10-1.0.0-cdh5.1.0/src/main/scala/org/apache/spark/api/python/PythonWorkerFactory.scala | Scala | apache-2.0 | 6,446 |
import _root_.io.vertx.scala.build.{VertxModule, VertxProject}
import sbt._
import sbt.Keys._
object TinyValidator4Build extends VertxProject {
val module = VertxModule(
"com.campudus",
"vertx-tiny-validator4",
"1.0.0",
"Vert.x module to validate JSON against a json-schema draft v4.",
Some("2.11.6"),
Seq("2.10.5", "2.11.6"))
override lazy val customSettings = Seq(
unmanagedResourceDirectories in Compile += { baseDirectory.value / "src/main/javascript" },
libraryDependencies ++= Seq(
"io.vertx" % "testtools" % "2.0.3-final" % "test",
"org.hamcrest" % "hamcrest-library" % "1.3" % "test",
"com.novocode" % "junit-interface" % "0.11" % "test"
),
pomExtra :=
<inceptionYear>2015</inceptionYear>
<url>https://github.com/campudus/vertx-tiny-validator4</url>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.html</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/campudus/vertx-tiny-validator4.git</connection>
<developerConnection>scm:git:ssh://git@github.com:campudus/vertx-tiny-validator4.git</developerConnection>
<url>https://github.com/campudus/vertx-tiny-validator4</url>
</scm>
<developers>
<developer>
<id>alexvetter</id>
<name>Alexander Vetter</name>
</developer>
</developers>
)
} | campudus/vertx-tiny-validator4 | project/TinyValidator4Build.scala | Scala | apache-2.0 | 1,540 |
package io.eels
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{Executors, LinkedBlockingQueue}
import com.sksamuel.exts.Logging
import com.sksamuel.exts.concurrent.ExecutorImplicits._
import io.eels.schema.StructType
import scala.util.control.NonFatal
object SourceFrame {
private val Poison = List(Row.Sentinel)
private val executor = Executors.newCachedThreadPool()
}
class SourceFrame(source: Source, listener: Listener = NoopListener) extends Frame with Logging {
override lazy val schema: StructType = source.schema
def rows(): CloseableIterator[Row] = {
val completed = new AtomicInteger(0)
// by using a list of rows we reduce contention on the queue
val queue = new LinkedBlockingQueue[Seq[Row]](100)
val parts = source.parts()
logger.debug(s"Submitting ${parts.size} parts to executor")
parts.foreach { part =>
SourceFrame.executor.submit {
try {
part.iterator().foreach { rows =>
queue.put(rows)
rows.foreach(listener.onNext)
}
} catch {
case NonFatal(e) =>
logger.error("Error while reading from source", e)
}
// once all the reading tasks are complete we need to indicate that we
// are finished with the queue, so we add a sentinel for the reading thread to pick up
// by using an atomic int, we know only one thread will get inside the condition
if (completed.incrementAndGet == parts.size) {
logger.debug("All parts completed; adding sentinel to shutdown queue")
queue.put(SourceFrame.Poison)
}
}
}
new CloseableIterator[Row] {
override def close(): Unit = {
super.close()
queue.put(SourceFrame.Poison)
}
override val iterator: Iterator[Row] =
Iterator.continually(queue.take).takeWhile(_ != SourceFrame.Poison).flatten
}
}
}
| stheppi/eel | eel-core/src/main/scala/io/eels/SourceFrame.scala | Scala | apache-2.0 | 1,932 |
package x
object Main {
def main(args:Array[String]):Unit =
val arr1 = new MyArr[Int]()
val arr2 = new MyArr[Int]()
val r = X.process{
arr1.withFilter(x => x == await(CBM.pure(1)))
.flatMap(x =>
arr2.withFilter( y => y == await(CBM.pure(2)) ).
map2( y => x + y )
)
}
println(r)
}
| dotty-staging/dotty | tests/pos-macros/i10211/Test_2.scala | Scala | apache-2.0 | 359 |
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
import cats.kernel.laws.discipline._
import org.http4s.laws.discipline.arbitrary._
final class UriHostSuite extends Http4sSuite {
checkAll("Order[Uri.Host]", OrderTests[Uri.Host].order)
checkAll("Hash[Uri.Host]", HashTests[Uri.Host].hash)
}
| http4s/http4s | tests/shared/src/test/scala/org/http4s/UriHostSuite.scala | Scala | apache-2.0 | 862 |
package pl.arapso.scaffoldings.akka.web
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.{Directives, Route}
import akka.stream.ActorMaterializer
import spray.json._
import scala.util.Random
final case class User(firstName: String, lastName: String)
trait JsonSupport extends SprayJsonSupport with DefaultJsonProtocol {
implicit val userFormat = jsonFormat2(User)
}
class UserService extends Directives with JsonSupport {
val FirstNames = List("Michael", "James", "John", "Robert", "David", "William", "Mary", "Christopher", "Joseph", "Richard")
val LastNames = List("Smith", "Johnson", "Williams", "Brown", "Jones", "Garcia", "Rodriguez", "Miller", "Martinez", "Davis")
private def randomElement(list: List[String]): String = {
list(Random.nextInt(list.length))
}
private def getUser: User = {
User(
randomElement(FirstNames),
randomElement(LastNames)
)
}
private def getUsers(i : Int): List[User] = {
(0 until i).map(_ => getUser).toList
}
val route: Route = path("users") {
complete(getUsers(5))
}
}
object BankUsersApi extends App {
implicit val system = ActorSystem()
implicit val materlializer = ActorMaterializer()
val userService = new UserService()
Http().bindAndHandle(userService.route, "localhost", 9191)
}
| arapso-scaffoldings/scala | scala-tutor/akka/src/main/scala/pl/arapso/scaffoldings/akka/web/BankUsersApi.scala | Scala | apache-2.0 | 1,406 |
package odfi.h2dl.indesign.session
import com.idyria.osi.ooxoo.model.producers
import com.idyria.osi.ooxoo.model.ModelBuilder
import com.idyria.osi.ooxoo.model.producer
import com.idyria.osi.ooxoo.model.out.markdown.MDProducer
import com.idyria.osi.ooxoo.model.out.scala.ScalaProducer
@producers(Array(
new producer(value = classOf[ScalaProducer]),
new producer(value = classOf[MDProducer])))
object SessionModel extends ModelBuilder {
"Session" is {
attribute("id")
attribute("createdOn") ofType ("time")
elementsStack.head.makeTraitAndUseCustomImplementation
// parameters content
"Value" multiple {
attribute("name")
ofType("string")
"Content" multiple "cdata"
}
}
"Sessions" is {
importElement("Session").setMultiple
elementsStack.head.makeTraitAndUseCustomImplementation
}
} | richnou/h2dl-indesign | src/main/scala/odfi/h2dl/indesign/session/SessionModel.xmodel.scala | Scala | gpl-2.0 | 856 |
package redis.protocol
import akka.util.ByteString
import scala.annotation.tailrec
import scala.collection.mutable
import scala.util.Try
import redis.MultiBulkConverter
sealed trait RedisReply {
def toByteString: ByteString
def asOptByteString: Option[ByteString]
}
case class Status(status: ByteString) extends RedisReply {
def toBoolean: Boolean = status == Status.okByteString
override def toString = status.utf8String
def toByteString: ByteString = status
def asOptByteString: Option[ByteString] = Some(status)
}
object Status {
val okByteString = ByteString("OK")
}
case class Error(error: ByteString) extends RedisReply {
override def toString = error.utf8String
def toByteString: ByteString = error
def asOptByteString: Option[ByteString] = Some(error)
}
case class Integer(i: ByteString) extends RedisReply {
def toLong: Long = ParseNumber.parseLong(i)
def toInt: Int = ParseNumber.parseInt(i)
def toBoolean = i == Integer.trueByteString
override def toString = i.utf8String
def toByteString: ByteString = i
def asOptByteString: Option[ByteString] = Some(i)
}
object Integer {
val trueByteString = ByteString("1")
}
case class Bulk(response: Option[ByteString]) extends RedisReply {
// looks wrong
override def toString = response.map(_.utf8String).get
def toByteString: ByteString = response.get
def toOptString: Option[String] = response.map(_.utf8String)
def asOptByteString: Option[ByteString] = response
}
case class MultiBulk(responses: Option[Vector[RedisReply]]) extends RedisReply {
def toByteString: ByteString = throw new NoSuchElementException()
def asOptByteString: Option[ByteString] = throw new NoSuchElementException()
def asTry[A](implicit convert: MultiBulkConverter[A]): Try[A] = convert.to(this)
def asOpt[A](implicit convert: MultiBulkConverter[A]): Option[A] = asTry(convert).toOption
}
case class PartialMultiBulk(i: Int, acc: mutable.Buffer[RedisReply]) extends RedisReply {
override def toByteString: ByteString = throw new NoSuchElementException()
override def asOptByteString: Option[ByteString] = throw new NoSuchElementException()
}
sealed trait DecodeResult[+A] {
def rest: ByteString
def isFullyDecoded: Boolean
def foreach[B](f: A => Unit): DecodeResult[Unit] = this match {
case p: PartiallyDecoded[A] => PartiallyDecoded(ByteString(), bs => p.f(p.rest ++ bs).foreach(f))
case fd: FullyDecoded[A] => FullyDecoded(f(fd.result), fd.rest)
}
def map[B](f: A => B): DecodeResult[B] = this match {
case p: PartiallyDecoded[A] => PartiallyDecoded(ByteString(), bs => p.f(p.rest ++ bs).map(f))
case fd: FullyDecoded[A] => FullyDecoded(f(fd.result), fd.rest)
}
def flatMap[B](f: (A, ByteString) => DecodeResult[B]): DecodeResult[B] = this match {
case p: PartiallyDecoded[A] => PartiallyDecoded(ByteString(), bs => p.f(p.rest ++ bs).flatMap(f))
case fd: FullyDecoded[A] => f(fd.result, fd.rest)
}
def run(next: ByteString): DecodeResult[A] = this match {
case p: PartiallyDecoded[A] => p.f(p.rest ++ next)
case fd: FullyDecoded[A] => FullyDecoded(fd.result, fd.rest ++ next)
}
}
case class PartiallyDecoded[A](rest: ByteString, f: ByteString => DecodeResult[A]) extends DecodeResult[A] {
override def isFullyDecoded: Boolean = false
}
case class FullyDecoded[A](result: A, rest: ByteString) extends DecodeResult[A] {
override def isFullyDecoded: Boolean = true
}
object DecodeResult {
val unit: DecodeResult[Unit] = FullyDecoded(Unit, ByteString.empty)
}
object RedisProtocolReply {
val ERROR = '-'
val STATUS = '+'
val INTEGER = ':'
val BULK = '$'
val MULTIBULK = '*'
val LS = "\\r\\n".getBytes("UTF-8")
def decodeReply(bs: ByteString): DecodeResult[RedisReply] = {
if (bs.isEmpty) {
PartiallyDecoded(bs, decodeReply)
} else {
bs.head match {
case ERROR => decodeString(bs.tail).map(Error(_))
case INTEGER => decodeInteger(bs.tail)
case STATUS => decodeString(bs.tail).map(Status(_))
case BULK => decodeBulk(bs.tail)
case MULTIBULK => decodeMultiBulk(bs.tail)
case _ => throw new Exception("Redis Protocol error: Got " + bs.head + " as initial reply byte >>"+ bs.tail.utf8String)
}
}
}
val decodeReplyPF: PartialFunction[ByteString, DecodeResult[RedisReply]] = {
case bs if bs.head == INTEGER => decodeInteger(bs.tail)
case bs if bs.head == STATUS => decodeString(bs.tail).map(Status(_))
case bs if bs.head == BULK => decodeBulk(bs.tail)
case bs if bs.head == MULTIBULK => decodeMultiBulk(bs.tail)
}
val decodeReplyStatus: PartialFunction[ByteString, DecodeResult[Status]] = {
case bs if bs.head == STATUS => decodeString(bs.tail).map(Status(_))
}
val decodeReplyInteger: PartialFunction[ByteString, DecodeResult[Integer]] = {
case bs if bs.head == INTEGER => decodeInteger(bs.tail)
}
val decodeReplyBulk: PartialFunction[ByteString, DecodeResult[Bulk]] = {
case bs if bs.head == BULK => decodeBulk(bs.tail)
}
val decodeReplyMultiBulk: PartialFunction[ByteString, DecodeResult[MultiBulk]] = {
case bs if bs.head == MULTIBULK => decodeMultiBulk(bs.tail)
}
val decodeReplyError: PartialFunction[ByteString, DecodeResult[Error]] = {
case bs if bs.head == ERROR => decodeString(bs.tail).map(Error(_))
}
def decodeInteger(bs: ByteString): DecodeResult[Integer] = {
decodeString(bs).map { (string) => Integer(string) }
}
def decodeString(bs: ByteString): DecodeResult[ByteString] = {
val index = bs.indexOf('\\n')
if (index >= 0 && bs.length >= index + 1) {
val reply = bs.take(index + 1 - LS.length)
val tail = bs.drop(index + 1)
val r = FullyDecoded(reply, tail)
r
} else {
PartiallyDecoded(bs, decodeString)
}
}
def decodeBulk(bs: ByteString): DecodeResult[Bulk] = {
def decodeBulkBody(integer: Integer, bsRest: ByteString): DecodeResult[Bulk] = {
val i = integer.toInt
if (i < 0) {
FullyDecoded(Bulk(None), bsRest)
} else if (bsRest.length < (i + LS.length)) {
PartiallyDecoded(bsRest, decodeBulkBody(integer, _))
} else {
val data = bsRest.take(i)
FullyDecoded(Bulk(Some(data)), bsRest.drop(i).drop(LS.length))
}
}
decodeInteger(bs).flatMap(decodeBulkBody)
}
def decodeMultiBulk(bs: ByteString): DecodeResult[MultiBulk] = {
decodeInteger(bs).flatMap { (integer, bsRest) =>
val i = integer.toInt
if (i < 0) {
FullyDecoded(MultiBulk(None), bsRest)
} else if (i == 0) {
FullyDecoded(MultiBulk(Some(Vector.empty)), bsRest)
} else {
val builder = Vector.newBuilder[RedisReply]
builder.sizeHint(i)
bulks(i, builder, bsRest)
}
}
}
def bulks(i: Int, builder: mutable.Builder[RedisReply, Vector[RedisReply]], byteString: ByteString): DecodeResult[MultiBulk] = {
@tailrec
def helper(i: Int, bs: ByteString): DecodeResult[Int] = {
if (i > 0) {
val reply = decodeReply(bs)
.map { r =>
builder += r
i - 1
}
if (reply.isFullyDecoded)
helper(i - 1, reply.rest)
else
reply
} else {
FullyDecoded(0, bs)
}
}
helper(i, byteString).flatMap { (i, bs) =>
if (i > 0) {
bulks(i, builder, bs)
} else {
FullyDecoded[MultiBulk](MultiBulk(Some(builder.result())), bs)
}
}
}
}
| npeters/rediscala | src/main/scala/redis/protocol/RedisProtocolReply.scala | Scala | apache-2.0 | 7,489 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.hibernate
import java.net.URL
import java.{util => ju}
import javax.sql.DataSource
import org.beangle.commons.io.ResourcePatternResolver
import org.beangle.commons.logging.Logging
import org.beangle.data.hibernate.cfg.MappingService
import org.beangle.data.jdbc.engine.Engines
import org.beangle.data.jdbc.meta.Database
import org.beangle.data.orm.Mappings
import org.hibernate.boot.MetadataSources
import org.hibernate.boot.registry.StandardServiceRegistryBuilder
import org.hibernate.cfg.{AvailableSettings, Configuration}
object ConfigurationBuilder {
def default: Configuration = {
val resolver = new ResourcePatternResolver
val sfb = new ConfigurationBuilder(null)
sfb.configLocations = resolver.getResources("classpath*:META-INF/hibernate.cfg.xml")
sfb.ormLocations = resolver.getResources("classpath*:META-INF/beangle/orm.xml")
sfb.build()
}
}
class ConfigurationBuilder(val dataSource: DataSource) extends Logging {
var configLocations: Seq[URL] = _
var ormLocations: Seq[URL] = _
var properties = new ju.Properties
var engine: Option[String] = None
/**
* Import System properties and disable jdbc metadata lookup
*/
protected def importSysProperties(): Unit = {
// 1. Import system properties
val sysProps = System.getProperties
val keys = sysProps.propertyNames
while (keys.hasMoreElements) {
val key = keys.nextElement.asInstanceOf[String]
if (key.startsWith("hibernate.")) {
val value = sysProps.getProperty(key)
val overrided = properties.containsKey(key)
properties.put(key, value)
if (overrided) logger.info(s"Override hibernate property $key=$value")
}
}
}
protected def customProperties(): Unit = {
// 2. disable metadata lookup
val useJdbcMetaName = "hibernate.temp.use_jdbc_metadata_defaults"
if (properties.containsKey(AvailableSettings.DIALECT) && !properties.containsKey(useJdbcMetaName)) {
properties.put(useJdbcMetaName, "false")
} else {
properties.put(useJdbcMetaName, "true")
}
if (dataSource != null) properties.put(AvailableSettings.DATASOURCE, dataSource)
properties.put("hibernate.connection.handling_mode", "DELAYED_ACQUISITION_AND_HOLD")
properties.put("hibernate.ejb.metamodel.population", "disabled")
}
def build(): Configuration = {
importSysProperties()
customProperties()
val standardRegistryBuilder = new StandardServiceRegistryBuilder()
val mappings = getMappings
standardRegistryBuilder.addService(classOf[MappingService], new MappingService(mappings))
if (null != configLocations) {
for (resource <- configLocations)
standardRegistryBuilder.configure(resource)
}
standardRegistryBuilder.applySettings(this.properties)
val standardRegistry = standardRegistryBuilder.build()
val metadataSources = new MetadataSources(standardRegistry)
val configuration = new Configuration(metadataSources)
configuration.addProperties(this.properties)
configuration
}
private def getMappings: Mappings = {
val eng = engine match {
case Some(e) => Engines.forName(e)
case None =>
val connection = dataSource.getConnection
val dbProductName = connection.getMetaData.getDatabaseProductName
connection.close()
Engines.forName(dbProductName)
}
val mappings = new Mappings(new Database(eng), ormLocations.toList)
mappings.autobind()
mappings
}
}
| beangle/data | hibernate/src/main/scala/org/beangle/data/hibernate/ConfigurationBuilder.scala | Scala | lgpl-3.0 | 4,227 |
package be.wegenenverkeer.atomium.play
import be.wegenenverkeer.atomium.api.Codec
import play.api.libs.json.{ Format, Json }
/**
* Codec for encoding Entries using Play Json
*
* @param entryFormat the Format for the Entry
* @tparam E The type of the Entry
*/
case class PlayJsonEntryCodec[E]()(implicit val entryFormat: Format[E]) extends Codec[E, String] {
override def getMimeType: String = "application/json"
override def encode(entry: E): String = Json.toJson(entry).toString
override def decode(encoded: String): E = Json.parse(encoded).as[E]
}
| WegenenVerkeer/atomium | modules/play26/src/main/scala/be/wegenenverkeer/atomium/play/PlayJsonEntryCodec.scala | Scala | mit | 567 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import scala.collection.mutable
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.scheduler._
/**
* :: DeveloperApi ::
* A SparkListener that maintains executor storage status.
*
* This class is thread-safe (unlike JobProgressListener)
*/
@DeveloperApi
class StorageStatusListener extends SparkListener {
// This maintains only blocks that are cached (i.e. storage level is not StorageLevel.NONE)
private[storage] val executorIdToStorageStatus = mutable.Map[String, StorageStatus]()
def storageStatusList: Seq[StorageStatus] = synchronized {
executorIdToStorageStatus.values.toSeq
}
/** Update storage status list to reflect updated block statuses */
private def updateStorageStatus(execId: String, updatedBlocks: Seq[(BlockId, BlockStatus)]) {
executorIdToStorageStatus.get(execId).foreach { storageStatus =>
updatedBlocks.foreach { case (blockId, updatedStatus) =>
if (updatedStatus.storageLevel == StorageLevel.NONE) {
storageStatus.removeBlock(blockId)
} else {
storageStatus.updateBlock(blockId, updatedStatus)
}
}
}
}
/** Update storage status list to reflect the removal of an RDD from the cache */
private def updateStorageStatus(unpersistedRDDId: Int) {
storageStatusList.foreach { storageStatus =>
storageStatus.rddBlocksById(unpersistedRDDId).foreach { case (blockId, _) =>
storageStatus.removeBlock(blockId)
}
}
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = synchronized {
val info = taskEnd.taskInfo
val metrics = taskEnd.taskMetrics
if (info != null && metrics != null) {
val updatedBlocks = metrics.updatedBlocks.getOrElse(Seq[(BlockId, BlockStatus)]())
if (updatedBlocks.length > 0) {
updateStorageStatus(info.executorId, updatedBlocks)
}
}
}
override def onUnpersistRDD(unpersistRDD: SparkListenerUnpersistRDD): Unit = synchronized {
updateStorageStatus(unpersistRDD.rddId)
}
override def onBlockManagerAdded(blockManagerAdded: SparkListenerBlockManagerAdded) {
synchronized {
val blockManagerId = blockManagerAdded.blockManagerId
val executorId = blockManagerId.executorId
val maxMem = blockManagerAdded.maxMem
val storageStatus = new StorageStatus(blockManagerId, maxMem)
executorIdToStorageStatus(executorId) = storageStatus
}
}
override def onBlockManagerRemoved(blockManagerRemoved: SparkListenerBlockManagerRemoved) {
synchronized {
val executorId = blockManagerRemoved.blockManagerId.executorId
executorIdToStorageStatus.remove(executorId)
}
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/main/scala/org/apache/spark/storage/StorageStatusListener.scala | Scala | apache-2.0 | 3,497 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_10.scalatest2_2_1
import org.jetbrains.plugins.scala.SlowTests
import org.jetbrains.plugins.scala.testingSupport.scalatest.staticStringTest._
import org.junit.experimental.categories.Category
/**
* @author Roman.Shein
* @since 24.06.2015.
*/
@Category(Array(classOf[SlowTests]))
class Scalatest2_10_2_2_1_StaticStringTest extends Scalatest2_10_2_2_1_Base with FeatureSpecStaticStringTest with
FlatSpecStaticStringTest with FreeSpecStaticStringTest with FunSpecStaticStringTest with FunSuiteStaticStringTest with
PropSpecStaticStringTest with WordSpecStaticStringTest with MethodsStaticStringTest
| jastice/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_10/scalatest2_2_1/Scalatest2_10_2_2_1_StaticStringTest.scala | Scala | apache-2.0 | 673 |
package models.services
import java.util.UUID
import javax.inject.Inject
import com.mohiva.play.silhouette.api.util.Clock
import models.AuthToken
import models.daos.AuthTokenDAO
import org.joda.time.DateTimeZone
import play.api.libs.concurrent.Execution.Implicits._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* Handles actions to auth tokens.
*
* @param authTokenDAO The auth token DAO implementation.
* @param clock The clock instance.
*/
class AuthTokenServiceImpl @Inject() (authTokenDAO: AuthTokenDAO, clock: Clock) extends AuthTokenService {
/**
* Creates a new auth token and saves it in the backing store.
*
* @param userID The user ID for which the token should be created.
* @param expiry The duration a token expires.
* @return The saved auth token.
*/
def create(userID: UUID, expiry: FiniteDuration = 5 minutes) = {
val token = AuthToken(UUID.randomUUID(), userID, clock.now.withZone(DateTimeZone.UTC).plusSeconds(expiry.toSeconds.toInt))
authTokenDAO.save(token)
}
/**
* Validates a token ID.
*
* @param id The token ID to validate.
* @return The token if it's valid, None otherwise.
*/
def validate(id: UUID) = authTokenDAO.find(id)
/**
* Cleans expired tokens.
*
* @return The list of deleted tokens.
*/
def clean = authTokenDAO.findExpired(clock.now.withZone(DateTimeZone.UTC)).flatMap { tokens =>
Future.sequence(tokens.map { token =>
authTokenDAO.remove(token.id).map(_ => token)
})
}
}
| serversideapps/silhmojs | server/app/models/services/AuthTokenServiceImpl.scala | Scala | apache-2.0 | 1,560 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.python
import java.io._
import com.google.common.io.Closeables
import org.apache.spark.{SparkEnv, SparkException}
import org.apache.spark.io.NioBufferedFileInputStream
import org.apache.spark.memory.{MemoryConsumer, SparkOutOfMemoryError, TaskMemoryManager}
import org.apache.spark.serializer.SerializerManager
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.memory.MemoryBlock
/**
* A RowQueue is an FIFO queue for UnsafeRow.
*
* This RowQueue is ONLY designed and used for Python UDF, which has only one writer and only one
* reader, the reader ALWAYS ran behind the writer. See the doc of class [[BatchEvalPythonExec]]
* on how it works.
*/
private[python] trait RowQueue {
/**
* Add a row to the end of it, returns true iff the row has been added to the queue.
*/
def add(row: UnsafeRow): Boolean
/**
* Retrieve and remove the first row, returns null if it's empty.
*
* It can only be called after add is called, otherwise it will fail (NPE).
*/
def remove(): UnsafeRow
/**
* Cleanup all the resources.
*/
def close(): Unit
}
/**
* A RowQueue that is based on in-memory page. UnsafeRows are appended into it until it's full.
* Another thread could read from it at the same time (behind the writer).
*
* The format of UnsafeRow in page:
* [4 bytes to hold length of record (N)] [N bytes to hold record] [...]
*
* -1 length means end of page.
*/
private[python] abstract class InMemoryRowQueue(val page: MemoryBlock, numFields: Int)
extends RowQueue {
private val base: AnyRef = page.getBaseObject
private val endOfPage: Long = page.getBaseOffset + page.size
// the first location where a new row would be written
private var writeOffset = page.getBaseOffset
// points to the start of the next row to read
private var readOffset = page.getBaseOffset
private val resultRow = new UnsafeRow(numFields)
def add(row: UnsafeRow): Boolean = synchronized {
val size = row.getSizeInBytes
if (writeOffset + 4 + size > endOfPage) {
// if there is not enough space in this page to hold the new record
if (writeOffset + 4 <= endOfPage) {
// if there's extra space at the end of the page, store a special "end-of-page" length (-1)
Platform.putInt(base, writeOffset, -1)
}
false
} else {
Platform.putInt(base, writeOffset, size)
Platform.copyMemory(row.getBaseObject, row.getBaseOffset, base, writeOffset + 4, size)
writeOffset += 4 + size
true
}
}
def remove(): UnsafeRow = synchronized {
assert(readOffset <= writeOffset, "reader should not go beyond writer")
if (readOffset + 4 > endOfPage || Platform.getInt(base, readOffset) < 0) {
null
} else {
val size = Platform.getInt(base, readOffset)
resultRow.pointTo(base, readOffset + 4, size)
readOffset += 4 + size
resultRow
}
}
}
/**
* A RowQueue that is backed by a file on disk. This queue will stop accepting new rows once any
* reader has begun reading from the queue.
*/
private[python] case class DiskRowQueue(
file: File,
fields: Int,
serMgr: SerializerManager) extends RowQueue {
private var out = new DataOutputStream(serMgr.wrapForEncryption(
new BufferedOutputStream(new FileOutputStream(file.toString))))
private var unreadBytes = 0L
private var in: DataInputStream = _
private val resultRow = new UnsafeRow(fields)
def add(row: UnsafeRow): Boolean = synchronized {
if (out == null) {
// Another thread is reading, stop writing this one
return false
}
out.writeInt(row.getSizeInBytes)
out.write(row.getBytes)
unreadBytes += 4 + row.getSizeInBytes
true
}
def remove(): UnsafeRow = synchronized {
if (out != null) {
out.close()
out = null
in = new DataInputStream(serMgr.wrapForEncryption(
new NioBufferedFileInputStream(file)))
}
if (unreadBytes > 0) {
val size = in.readInt()
val bytes = new Array[Byte](size)
in.readFully(bytes)
unreadBytes -= 4 + size
resultRow.pointTo(bytes, size)
resultRow
} else {
null
}
}
def close(): Unit = synchronized {
Closeables.close(out, true)
out = null
Closeables.close(in, true)
in = null
if (file.exists()) {
file.delete()
}
}
}
/**
* A RowQueue that has a list of RowQueues, which could be in memory or disk.
*
* HybridRowQueue could be safely appended in one thread, and pulled in another thread in the same
* time.
*/
private[python] case class HybridRowQueue(
memManager: TaskMemoryManager,
tempDir: File,
numFields: Int,
serMgr: SerializerManager)
extends MemoryConsumer(memManager) with RowQueue {
// Each buffer should have at least one row
private var queues = new java.util.LinkedList[RowQueue]()
private var writing: RowQueue = _
private var reading: RowQueue = _
// exposed for testing
private[python] def numQueues(): Int = queues.size()
def spill(size: Long, trigger: MemoryConsumer): Long = {
if (trigger == this) {
// When it's triggered by itself, it should write upcoming rows into disk instead of copying
// the rows already in the queue.
return 0L
}
var released = 0L
synchronized {
// poll out all the buffers and add them back in the same order to make sure that the rows
// are in correct order.
val newQueues = new java.util.LinkedList[RowQueue]()
while (!queues.isEmpty) {
val queue = queues.remove()
val newQueue = if (!queues.isEmpty && queue.isInstanceOf[InMemoryRowQueue]) {
val diskQueue = createDiskQueue()
var row = queue.remove()
while (row != null) {
diskQueue.add(row)
row = queue.remove()
}
released += queue.asInstanceOf[InMemoryRowQueue].page.size()
queue.close()
diskQueue
} else {
queue
}
newQueues.add(newQueue)
}
queues = newQueues
}
released
}
private def createDiskQueue(): RowQueue = {
DiskRowQueue(File.createTempFile("buffer", "", tempDir), numFields, serMgr)
}
private def createNewQueue(required: Long): RowQueue = {
val page = try {
allocatePage(required)
} catch {
case _: SparkOutOfMemoryError =>
null
}
val buffer = if (page != null) {
new InMemoryRowQueue(page, numFields) {
override def close(): Unit = {
freePage(page)
}
}
} else {
createDiskQueue()
}
synchronized {
queues.add(buffer)
}
buffer
}
def add(row: UnsafeRow): Boolean = {
if (writing == null || !writing.add(row)) {
writing = createNewQueue(4 + row.getSizeInBytes)
if (!writing.add(row)) {
throw new SparkException(s"failed to push a row into $writing")
}
}
true
}
def remove(): UnsafeRow = {
var row: UnsafeRow = null
if (reading != null) {
row = reading.remove()
}
if (row == null) {
if (reading != null) {
reading.close()
}
synchronized {
reading = queues.remove()
}
assert(reading != null, s"queue should not be empty")
row = reading.remove()
assert(row != null, s"$reading should have at least one row")
}
row
}
def close(): Unit = {
if (reading != null) {
reading.close()
reading = null
}
synchronized {
while (!queues.isEmpty) {
queues.remove().close()
}
}
}
}
private[python] object HybridRowQueue {
def apply(taskMemoryMgr: TaskMemoryManager, file: File, fields: Int): HybridRowQueue = {
HybridRowQueue(taskMemoryMgr, file, fields, SparkEnv.get.serializerManager)
}
}
| pgandhi999/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/python/RowQueue.scala | Scala | apache-2.0 | 8,698 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.{File, FileInputStream, FileWriter, InputStream, IOException}
import java.net.{HttpURLConnection, URL}
import java.nio.charset.StandardCharsets
import java.util.zip.ZipInputStream
import javax.servlet._
import javax.servlet.http.{HttpServletRequest, HttpServletRequestWrapper, HttpServletResponse}
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.language.postfixOps
import com.codahale.metrics.Counter
import com.google.common.io.{ByteStreams, Files}
import org.apache.commons.io.{FileUtils, IOUtils}
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.eclipse.jetty.proxy.ProxyServlet
import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder}
import org.json4s.JsonAST._
import org.json4s.jackson.JsonMethods
import org.json4s.jackson.JsonMethods._
import org.mockito.Mockito._
import org.openqa.selenium.WebDriver
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.scalatest.{BeforeAndAfter, Matchers}
import org.scalatest.concurrent.Eventually
import org.scalatest.mockito.MockitoSugar
import org.scalatest.selenium.WebBrowser
import org.apache.spark._
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.History._
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.internal.config.UI._
import org.apache.spark.status.api.v1.ApplicationInfo
import org.apache.spark.status.api.v1.JobData
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{ResetSystemProperties, ShutdownHookManager, Utils}
/**
* A collection of tests against the historyserver, including comparing responses from the json
* metrics api to a set of known "golden files". If new endpoints / parameters are added,
* cases should be added to this test suite. The expected outcomes can be generated by running
* the HistoryServerSuite.main. Note that this will blindly generate new expectation files matching
* the current behavior -- the developer must verify that behavior is correct.
*
* Similarly, if the behavior is changed, HistoryServerSuite.main can be run to update the
* expectations. However, in general this should be done with extreme caution, as the metrics
* are considered part of Spark's public api.
*/
class HistoryServerSuite extends SparkFunSuite with BeforeAndAfter with Matchers with MockitoSugar
with JsonTestUtils with Eventually with WebBrowser with LocalSparkContext
with ResetSystemProperties {
private val logDir = getTestResourcePath("spark-events")
private val expRoot = getTestResourceFile("HistoryServerExpectations")
private val storeDir = Utils.createTempDir(namePrefix = "history")
private var provider: FsHistoryProvider = null
private var server: HistoryServer = null
private var port: Int = -1
def init(extraConf: (String, String)*): Unit = {
Utils.deleteRecursively(storeDir)
assert(storeDir.mkdir())
val conf = new SparkConf()
.set(HISTORY_LOG_DIR, logDir)
.set(UPDATE_INTERVAL_S.key, "0")
.set(IS_TESTING, true)
.set(LOCAL_STORE_DIR, storeDir.getAbsolutePath())
.set(EVENT_LOG_STAGE_EXECUTOR_METRICS, true)
.set(EVENT_LOG_PROCESS_TREE_METRICS, true)
conf.setAll(extraConf)
provider = new FsHistoryProvider(conf)
provider.checkForLogs()
val securityManager = HistoryServer.createSecurityManager(conf)
server = new HistoryServer(conf, provider, securityManager, 18080)
server.initialize()
server.bind()
port = server.boundPort
}
def stop(): Unit = {
server.stop()
server = null
}
before {
if (server == null) {
init()
}
}
val cases = Seq(
"application list json" -> "applications",
"completed app list json" -> "applications?status=completed",
"running app list json" -> "applications?status=running",
"minDate app list json" -> "applications?minDate=2015-02-10",
"maxDate app list json" -> "applications?maxDate=2015-02-10",
"maxDate2 app list json" -> "applications?maxDate=2015-02-03T16:42:40.000GMT",
"minEndDate app list json" -> "applications?minEndDate=2015-05-06T13:03:00.950GMT",
"maxEndDate app list json" -> "applications?maxEndDate=2015-05-06T13:03:00.950GMT",
"minEndDate and maxEndDate app list json" ->
"applications?minEndDate=2015-03-16&maxEndDate=2015-05-06T13:03:00.950GMT",
"minDate and maxEndDate app list json" ->
"applications?minDate=2015-03-16&maxEndDate=2015-05-06T13:03:00.950GMT",
"limit app list json" -> "applications?limit=3",
"one app json" -> "applications/local-1422981780767",
"one app multi-attempt json" -> "applications/local-1426533911241",
"job list json" -> "applications/local-1422981780767/jobs",
"job list from multi-attempt app json(1)" -> "applications/local-1426533911241/1/jobs",
"job list from multi-attempt app json(2)" -> "applications/local-1426533911241/2/jobs",
"one job json" -> "applications/local-1422981780767/jobs/0",
"succeeded job list json" -> "applications/local-1422981780767/jobs?status=succeeded",
"succeeded&failed job list json" ->
"applications/local-1422981780767/jobs?status=succeeded&status=failed",
"executor list json" -> "applications/local-1422981780767/executors",
"executor list with executor metrics json" ->
"applications/application_1506645932520_24630151/executors",
"executor list with executor process tree metrics json" ->
"applications/application_1538416563558_0014/executors",
"executor list with executor garbage collection metrics json" ->
"applications/application_1536831636016_59384/1/executors",
"stage list json" -> "applications/local-1422981780767/stages",
"complete stage list json" -> "applications/local-1422981780767/stages?status=complete",
"failed stage list json" -> "applications/local-1422981780767/stages?status=failed",
"one stage json" -> "applications/local-1422981780767/stages/1",
"one stage attempt json" -> "applications/local-1422981780767/stages/1/0",
"stage task summary w shuffle write"
-> "applications/local-1430917381534/stages/0/0/taskSummary",
"stage task summary w shuffle read"
-> "applications/local-1430917381534/stages/1/0/taskSummary",
"stage task summary w/ custom quantiles" ->
"applications/local-1430917381534/stages/0/0/taskSummary?quantiles=0.01,0.5,0.99",
"stage task list" -> "applications/local-1430917381534/stages/0/0/taskList",
"stage task list w/ offset & length" ->
"applications/local-1430917381534/stages/0/0/taskList?offset=10&length=50",
"stage task list w/ sortBy" ->
"applications/local-1430917381534/stages/0/0/taskList?sortBy=DECREASING_RUNTIME",
"stage task list w/ sortBy short names: -runtime" ->
"applications/local-1430917381534/stages/0/0/taskList?sortBy=-runtime",
"stage task list w/ sortBy short names: runtime" ->
"applications/local-1430917381534/stages/0/0/taskList?sortBy=runtime",
"stage list with accumulable json" -> "applications/local-1426533911241/1/stages",
"stage with accumulable json" -> "applications/local-1426533911241/1/stages/0/0",
"stage task list from multi-attempt app json(1)" ->
"applications/local-1426533911241/1/stages/0/0/taskList",
"stage task list from multi-attempt app json(2)" ->
"applications/local-1426533911241/2/stages/0/0/taskList",
"blacklisting for stage" -> "applications/app-20180109111548-0000/stages/0/0",
"blacklisting node for stage" -> "applications/application_1516285256255_0012/stages/0/0",
"rdd list storage json" -> "applications/local-1422981780767/storage/rdd",
"executor node blacklisting" -> "applications/app-20161116163331-0000/executors",
"executor node blacklisting unblacklisting" -> "applications/app-20161115172038-0000/executors",
"executor memory usage" -> "applications/app-20161116163331-0000/executors",
"app environment" -> "applications/app-20161116163331-0000/environment"
// Todo: enable this test when logging the even of onBlockUpdated. See: SPARK-13845
// "one rdd storage json" -> "applications/local-1422981780767/storage/rdd/0"
)
// run a bunch of characterization tests -- just verify the behavior is the same as what is saved
// in the test resource folder
cases.foreach { case (name, path) =>
test(name) {
val (code, jsonOpt, errOpt) = getContentAndCode(path)
code should be (HttpServletResponse.SC_OK)
jsonOpt should be ('defined)
errOpt should be (None)
val exp = IOUtils.toString(new FileInputStream(
new File(expRoot, HistoryServerSuite.sanitizePath(name) + "_expectation.json")))
// compare the ASTs so formatting differences don't cause failures
import org.json4s._
import org.json4s.jackson.JsonMethods._
val jsonAst = parse(clearLastUpdated(jsonOpt.get))
val expAst = parse(exp)
assertValidDataInJson(jsonAst, expAst)
}
}
// SPARK-10873 added the lastUpdated field for each application's attempt,
// the REST API returns the last modified time of EVENT LOG file for this field.
// It is not applicable to hard-code this dynamic field in a static expected file,
// so here we skip checking the lastUpdated field's value (setting it as "").
private def clearLastUpdated(json: String): String = {
if (json.indexOf("lastUpdated") >= 0) {
val subStrings = json.split(",")
for (i <- subStrings.indices) {
if (subStrings(i).indexOf("lastUpdatedEpoch") >= 0) {
subStrings(i) = subStrings(i).replaceAll("(\\\\d+)", "0")
} else if (subStrings(i).indexOf("lastUpdated") >= 0) {
val regex = "\\"lastUpdated\\"\\\\s*:\\\\s*\\".*\\"".r
subStrings(i) = regex.replaceAllIn(subStrings(i), "\\"lastUpdated\\" : \\"\\"")
}
}
subStrings.mkString(",")
} else {
json
}
}
test("download all logs for app with multiple attempts") {
doDownloadTest("local-1430917381535", None)
}
test("download one log for app with multiple attempts") {
(1 to 2).foreach { attemptId => doDownloadTest("local-1430917381535", Some(attemptId)) }
}
// Test that the files are downloaded correctly, and validate them.
def doDownloadTest(appId: String, attemptId: Option[Int]): Unit = {
val url = attemptId match {
case Some(id) =>
new URL(s"${generateURL(s"applications/$appId")}/$id/logs")
case None =>
new URL(s"${generateURL(s"applications/$appId")}/logs")
}
val (code, inputStream, error) = HistoryServerSuite.connectAndGetInputStream(url)
code should be (HttpServletResponse.SC_OK)
inputStream should not be None
error should be (None)
val zipStream = new ZipInputStream(inputStream.get)
var entry = zipStream.getNextEntry
entry should not be null
val totalFiles = {
attemptId.map { x => 1 }.getOrElse(2)
}
var filesCompared = 0
while (entry != null) {
if (!entry.isDirectory) {
val expectedFile = {
new File(logDir, entry.getName)
}
val expected = Files.toString(expectedFile, StandardCharsets.UTF_8)
val actual = new String(ByteStreams.toByteArray(zipStream), StandardCharsets.UTF_8)
actual should be (expected)
filesCompared += 1
}
entry = zipStream.getNextEntry
}
filesCompared should be (totalFiles)
}
test("response codes on bad paths") {
val badAppId = getContentAndCode("applications/foobar")
badAppId._1 should be (HttpServletResponse.SC_NOT_FOUND)
badAppId._3 should be (Some("unknown app: foobar"))
val badStageId = getContentAndCode("applications/local-1422981780767/stages/12345")
badStageId._1 should be (HttpServletResponse.SC_NOT_FOUND)
badStageId._3 should be (Some("unknown stage: 12345"))
val badStageAttemptId = getContentAndCode("applications/local-1422981780767/stages/1/1")
badStageAttemptId._1 should be (HttpServletResponse.SC_NOT_FOUND)
badStageAttemptId._3 should be (Some("unknown attempt for stage 1. Found attempts: [0]"))
val badStageId2 = getContentAndCode("applications/local-1422981780767/stages/flimflam")
badStageId2._1 should be (HttpServletResponse.SC_NOT_FOUND)
// will take some mucking w/ jersey to get a better error msg in this case
val badQuantiles = getContentAndCode(
"applications/local-1430917381534/stages/0/0/taskSummary?quantiles=foo,0.1")
badQuantiles._1 should be (HttpServletResponse.SC_BAD_REQUEST)
badQuantiles._3 should be (Some("Bad value for parameter \\"quantiles\\". Expected a double, " +
"got \\"foo\\""))
getContentAndCode("foobar")._1 should be (HttpServletResponse.SC_NOT_FOUND)
}
test("automatically retrieve uiRoot from request through Knox") {
assert(sys.props.get("spark.ui.proxyBase").isEmpty,
"spark.ui.proxyBase is defined but it should not for this UT")
assert(sys.env.get("APPLICATION_WEB_PROXY_BASE").isEmpty,
"APPLICATION_WEB_PROXY_BASE is defined but it should not for this UT")
val page = new HistoryPage(server)
val requestThroughKnox = mock[HttpServletRequest]
val knoxBaseUrl = "/gateway/default/sparkhistoryui"
when(requestThroughKnox.getHeader("X-Forwarded-Context")).thenReturn(knoxBaseUrl)
val responseThroughKnox = page.render(requestThroughKnox)
val urlsThroughKnox = responseThroughKnox \\\\ "@href" map (_.toString)
val siteRelativeLinksThroughKnox = urlsThroughKnox filter (_.startsWith("/"))
all (siteRelativeLinksThroughKnox) should startWith (knoxBaseUrl)
val directRequest = mock[HttpServletRequest]
val directResponse = page.render(directRequest)
val directUrls = directResponse \\\\ "@href" map (_.toString)
val directSiteRelativeLinks = directUrls filter (_.startsWith("/"))
all (directSiteRelativeLinks) should not startWith (knoxBaseUrl)
}
test("static relative links are prefixed with uiRoot (spark.ui.proxyBase)") {
val uiRoot = Option(System.getenv("APPLICATION_WEB_PROXY_BASE")).getOrElse("/testwebproxybase")
val page = new HistoryPage(server)
val request = mock[HttpServletRequest]
// when
System.setProperty("spark.ui.proxyBase", uiRoot)
val response = page.render(request)
// then
val urls = response \\\\ "@href" map (_.toString)
val siteRelativeLinks = urls filter (_.startsWith("/"))
all (siteRelativeLinks) should startWith (uiRoot)
}
test("/version api endpoint") {
val response = getUrl("version")
assert(response.contains(SPARK_VERSION))
}
test("ajax rendered relative links are prefixed with uiRoot (spark.ui.proxyBase)") {
val uiRoot = "/testwebproxybase"
System.setProperty("spark.ui.proxyBase", uiRoot)
stop()
init()
val port = server.boundPort
val servlet = new ProxyServlet {
override def rewriteTarget(request: HttpServletRequest): String = {
// servlet acts like a proxy that redirects calls made on
// spark.ui.proxyBase context path to the normal servlet handlers operating off "/"
val sb = request.getRequestURL()
if (request.getQueryString() != null) {
sb.append(s"?${request.getQueryString()}")
}
val proxyidx = sb.indexOf(uiRoot)
sb.delete(proxyidx, proxyidx + uiRoot.length).toString
}
}
val contextHandler = new ServletContextHandler
val holder = new ServletHolder(servlet)
contextHandler.setContextPath(uiRoot)
contextHandler.addServlet(holder, "/")
server.attachHandler(contextHandler)
implicit val webDriver: WebDriver = new HtmlUnitDriver(true) {
getWebClient.getOptions.setThrowExceptionOnScriptError(false)
}
try {
val url = s"http://localhost:$port"
go to s"$url$uiRoot"
// expect the ajax call to finish in 5 seconds
implicitlyWait(org.scalatest.time.Span(5, org.scalatest.time.Seconds))
// once this findAll call returns, we know the ajax load of the table completed
findAll(ClassNameQuery("odd"))
val links = findAll(TagNameQuery("a"))
.map(_.attribute("href"))
.filter(_.isDefined)
.map(_.get)
.filter(_.startsWith(url)).toList
// there are at least some URL links that were generated via javascript,
// and they all contain the spark.ui.proxyBase (uiRoot)
links.length should be > 4
all(links) should startWith(url + uiRoot)
} finally {
contextHandler.stop()
quit()
}
}
/**
* Verify that the security manager needed for the history server can be instantiated
* when `spark.authenticate` is `true`, rather than raise an `IllegalArgumentException`.
*/
test("security manager starts with spark.authenticate set") {
val conf = new SparkConf()
.set(IS_TESTING, true)
.set(SecurityManager.SPARK_AUTH_CONF, "true")
HistoryServer.createSecurityManager(conf)
}
test("incomplete apps get refreshed") {
implicit val webDriver: WebDriver = new HtmlUnitDriver
implicit val formats = org.json4s.DefaultFormats
// this test dir is explicitly deleted on successful runs; retained for diagnostics when
// not
val logDir = Utils.createDirectory(System.getProperty("java.io.tmpdir", "logs"))
// a new conf is used with the background thread set and running at its fastest
// allowed refresh rate (1Hz)
stop()
val myConf = new SparkConf()
.set(HISTORY_LOG_DIR, logDir.getAbsolutePath)
.set(EVENT_LOG_DIR, logDir.getAbsolutePath)
.set(UPDATE_INTERVAL_S.key, "1s")
.set(EVENT_LOG_ENABLED, true)
.set(LOCAL_STORE_DIR, storeDir.getAbsolutePath())
.remove(IS_TESTING)
val provider = new FsHistoryProvider(myConf)
val securityManager = HistoryServer.createSecurityManager(myConf)
sc = new SparkContext("local", "test", myConf)
val logDirUri = logDir.toURI
val logDirPath = new Path(logDirUri)
val fs = FileSystem.get(logDirUri, sc.hadoopConfiguration)
def listDir(dir: Path): Seq[FileStatus] = {
val statuses = fs.listStatus(dir)
statuses.flatMap(
stat => if (stat.isDirectory) listDir(stat.getPath) else Seq(stat))
}
def dumpLogDir(msg: String = ""): Unit = {
if (log.isDebugEnabled) {
logDebug(msg)
listDir(logDirPath).foreach { status =>
val s = status.toString
logDebug(s)
}
}
}
server = new HistoryServer(myConf, provider, securityManager, 0)
server.initialize()
server.bind()
val port = server.boundPort
val metrics = server.cacheMetrics
// assert that a metric has a value; if not dump the whole metrics instance
def assertMetric(name: String, counter: Counter, expected: Long): Unit = {
val actual = counter.getCount
if (actual != expected) {
// this is here because Scalatest loses stack depth
fail(s"Wrong $name value - expected $expected but got $actual" +
s" in metrics\\n$metrics")
}
}
// build a URL for an app or app/attempt plus a page underneath
def buildURL(appId: String, suffix: String): URL = {
new URL(s"http://localhost:$port/history/$appId$suffix")
}
// build a rest URL for the application and suffix.
def applications(appId: String, suffix: String): URL = {
new URL(s"http://localhost:$port/api/v1/applications/$appId$suffix")
}
val historyServerRoot = new URL(s"http://localhost:$port/")
// start initial job
val d = sc.parallelize(1 to 10)
d.count()
val stdInterval = interval(100 milliseconds)
val appId = eventually(timeout(20 seconds), stdInterval) {
val json = getContentAndCode("applications", port)._2.get
val apps = parse(json).asInstanceOf[JArray].arr
apps should have size 1
(apps.head \\ "id").extract[String]
}
val appIdRoot = buildURL(appId, "")
val rootAppPage = HistoryServerSuite.getUrl(appIdRoot)
logDebug(s"$appIdRoot ->[${rootAppPage.length}] \\n$rootAppPage")
// sanity check to make sure filter is chaining calls
rootAppPage should not be empty
def getAppUI: SparkUI = {
server.withSparkUI(appId, None) { ui => ui }
}
// selenium isn't that useful on failures...add our own reporting
def getNumJobs(suffix: String): Int = {
val target = buildURL(appId, suffix)
val targetBody = HistoryServerSuite.getUrl(target)
try {
go to target.toExternalForm
findAll(cssSelector("tbody tr")).toIndexedSeq.size
} catch {
case ex: Exception =>
throw new Exception(s"Against $target\\n$targetBody", ex)
}
}
// use REST API to get #of jobs
def getNumJobsRestful(): Int = {
val json = HistoryServerSuite.getUrl(applications(appId, "/jobs"))
val jsonAst = parse(json)
val jobList = jsonAst.asInstanceOf[JArray]
jobList.values.size
}
// get a list of app Ids of all apps in a given state. REST API
def listApplications(completed: Boolean): Seq[String] = {
val json = parse(HistoryServerSuite.getUrl(applications("", "")))
logDebug(s"${JsonMethods.pretty(json)}")
json match {
case JNothing => Seq()
case apps: JArray =>
apps.children.filter(app => {
(app \\ "attempts") match {
case attempts: JArray =>
val state = (attempts.children.head \\ "completed").asInstanceOf[JBool]
state.value == completed
case _ => false
}
}).map(app => (app \\ "id").asInstanceOf[JString].values)
case _ => Seq()
}
}
def completedJobs(): Seq[JobData] = {
getAppUI.store.jobsList(List(JobExecutionStatus.SUCCEEDED).asJava)
}
def activeJobs(): Seq[JobData] = {
getAppUI.store.jobsList(List(JobExecutionStatus.RUNNING).asJava)
}
def isApplicationCompleted(appInfo: ApplicationInfo): Boolean = {
appInfo.attempts.nonEmpty && appInfo.attempts.head.completed
}
activeJobs() should have size 0
completedJobs() should have size 1
getNumJobs("") should be (1)
getNumJobs("/jobs") should be (1)
getNumJobsRestful() should be (1)
assert(metrics.lookupCount.getCount > 0, s"lookup count too low in $metrics")
// dump state before the next bit of test, which is where update
// checking really gets stressed
dumpLogDir("filesystem before executing second job")
logDebug(s"History Server: $server")
val d2 = sc.parallelize(1 to 10)
d2.count()
dumpLogDir("After second job")
val stdTimeout = timeout(10 seconds)
logDebug("waiting for UI to update")
eventually(stdTimeout, stdInterval) {
assert(2 === getNumJobs(""),
s"jobs not updated, server=$server\\n dir = ${listDir(logDirPath)}")
assert(2 === getNumJobs("/jobs"),
s"job count under /jobs not updated, server=$server\\n dir = ${listDir(logDirPath)}")
getNumJobsRestful() should be(2)
}
d.count()
d.count()
eventually(stdTimeout, stdInterval) {
assert(4 === getNumJobsRestful(), s"two jobs back-to-back not updated, server=$server\\n")
}
val jobcount = getNumJobs("/jobs")
assert(!isApplicationCompleted(provider.getListing().next))
listApplications(false) should contain(appId)
// stop the spark context
resetSparkContext()
// check the app is now found as completed
eventually(stdTimeout, stdInterval) {
assert(isApplicationCompleted(provider.getListing().next),
s"application never completed, server=$server\\n")
}
// app becomes observably complete
eventually(stdTimeout, stdInterval) {
listApplications(true) should contain (appId)
}
// app is no longer incomplete
listApplications(false) should not contain(appId)
assert(jobcount === getNumJobs("/jobs"))
// no need to retain the test dir now the tests complete
ShutdownHookManager.registerShutdownDeleteDir(logDir)
}
test("ui and api authorization checks") {
val appId = "local-1430917381535"
val owner = "irashid"
val admin = "root"
val other = "alice"
stop()
init(
UI_FILTERS.key -> classOf[FakeAuthFilter].getName(),
HISTORY_SERVER_UI_ACLS_ENABLE.key -> "true",
HISTORY_SERVER_UI_ADMIN_ACLS.key -> admin)
val tests = Seq(
(owner, HttpServletResponse.SC_OK),
(admin, HttpServletResponse.SC_OK),
(other, HttpServletResponse.SC_FORBIDDEN),
// When the remote user is null, the code behaves as if auth were disabled.
(null, HttpServletResponse.SC_OK))
val port = server.boundPort
val testUrls = Seq(
s"http://localhost:$port/api/v1/applications/$appId/1/jobs",
s"http://localhost:$port/history/$appId/1/jobs/",
s"http://localhost:$port/api/v1/applications/$appId/logs",
s"http://localhost:$port/api/v1/applications/$appId/1/logs",
s"http://localhost:$port/api/v1/applications/$appId/2/logs")
tests.foreach { case (user, expectedCode) =>
testUrls.foreach { url =>
val headers = if (user != null) Seq(FakeAuthFilter.FAKE_HTTP_USER -> user) else Nil
val sc = TestUtils.httpResponseCode(new URL(url), headers = headers)
assert(sc === expectedCode, s"Unexpected status code $sc for $url (user = $user)")
}
}
}
def getContentAndCode(path: String, port: Int = port): (Int, Option[String], Option[String]) = {
HistoryServerSuite.getContentAndCode(new URL(s"http://localhost:$port/api/v1/$path"))
}
def getUrl(path: String): String = {
HistoryServerSuite.getUrl(generateURL(path))
}
def generateURL(path: String): URL = {
new URL(s"http://localhost:$port/api/v1/$path")
}
def generateExpectation(name: String, path: String): Unit = {
val json = getUrl(path)
val file = new File(expRoot, HistoryServerSuite.sanitizePath(name) + "_expectation.json")
val out = new FileWriter(file)
out.write(clearLastUpdated(json))
out.write('\\n')
out.close()
}
}
object HistoryServerSuite {
def main(args: Array[String]): Unit = {
// generate the "expected" results for the characterization tests. Just blindly assume the
// current behavior is correct, and write out the returned json to the test/resource files
val suite = new HistoryServerSuite
FileUtils.deleteDirectory(suite.expRoot)
suite.expRoot.mkdirs()
try {
suite.init()
suite.cases.foreach { case (name, path) =>
suite.generateExpectation(name, path)
}
} finally {
suite.stop()
}
}
def getContentAndCode(url: URL): (Int, Option[String], Option[String]) = {
val (code, in, errString) = connectAndGetInputStream(url)
val inString = in.map(IOUtils.toString)
(code, inString, errString)
}
def connectAndGetInputStream(url: URL): (Int, Option[InputStream], Option[String]) = {
val connection = url.openConnection().asInstanceOf[HttpURLConnection]
connection.setRequestMethod("GET")
connection.connect()
val code = connection.getResponseCode()
val inStream = try {
Option(connection.getInputStream())
} catch {
case io: IOException => None
}
val errString = try {
val err = Option(connection.getErrorStream())
err.map(IOUtils.toString)
} catch {
case io: IOException => None
}
(code, inStream, errString)
}
def sanitizePath(path: String): String = {
// this doesn't need to be perfect, just good enough to avoid collisions
path.replaceAll("\\\\W", "_")
}
def getUrl(path: URL): String = {
val (code, resultOpt, error) = getContentAndCode(path)
if (code == 200) {
resultOpt.get
} else {
throw new RuntimeException(
"got code: " + code + " when getting " + path + " w/ error: " + error)
}
}
}
/**
* A filter used for auth tests; sets the request's user to the value of the "HTTP_USER" header.
*/
class FakeAuthFilter extends Filter {
override def destroy(): Unit = { }
override def init(config: FilterConfig): Unit = { }
override def doFilter(req: ServletRequest, res: ServletResponse, chain: FilterChain): Unit = {
val hreq = req.asInstanceOf[HttpServletRequest]
val wrapped = new HttpServletRequestWrapper(hreq) {
override def getRemoteUser(): String = hreq.getHeader(FakeAuthFilter.FAKE_HTTP_USER)
}
chain.doFilter(wrapped, res)
}
}
object FakeAuthFilter {
val FAKE_HTTP_USER = "HTTP_USER"
}
| yanboliang/spark | core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala | Scala | apache-2.0 | 29,354 |
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Paul Phillips
*/
package scala.reflect
package io
import java.net.URL
import java.io.{ IOException, InputStream, ByteArrayInputStream, FilterInputStream }
import java.io.{ File => JFile }
import java.util.zip.{ ZipEntry, ZipFile, ZipInputStream }
import scala.collection.{ immutable, mutable }
import scala.annotation.tailrec
/** An abstraction for zip files and streams. Everything is written the way
* it is for performance: we come through here a lot on every run. Be careful
* about changing it.
*
* @author Philippe Altherr (original version)
* @author Paul Phillips (this one)
* @version 2.0,
*
* ''Note: This library is considered experimental and should not be used unless you know what you are doing.''
*/
object ZipArchive {
def fromPath(path: String): FileZipArchive = fromFile(new JFile(path))
def fromPath(path: Path): FileZipArchive = fromFile(path.toFile)
/**
* @param file a File
* @return A ZipArchive if `file` is a readable zip file, otherwise null.
*/
def fromFile(file: File): FileZipArchive = fromFile(file.jfile)
def fromFile(file: JFile): FileZipArchive =
try { new FileZipArchive(file) }
catch { case _: IOException => null }
/**
* @param url the url of a zip file
* @return A ZipArchive backed by the given url.
*/
def fromURL(url: URL): URLZipArchive = new URLZipArchive(url)
def fromURL(url: String): URLZipArchive = fromURL(new URL(url))
private def dirName(path: String) = splitPath(path, true)
private def baseName(path: String) = splitPath(path, false)
private def splitPath(path0: String, front: Boolean): String = {
val isDir = path0.charAt(path0.length - 1) == '/'
val path = if (isDir) path0.substring(0, path0.length - 1) else path0
val idx = path.lastIndexOf('/')
if (idx < 0)
if (front) "/"
else path
else
if (front) path.substring(0, idx + 1)
else path.substring(idx + 1)
}
}
import ZipArchive._
/** ''Note: This library is considered experimental and should not be used unless you know what you are doing.'' */
abstract class ZipArchive(override val file: JFile) extends AbstractFile with Equals {
self =>
override def underlyingSource = Some(this)
def isDirectory = true
def lookupName(name: String, directory: Boolean) = unsupported
def lookupNameUnchecked(name: String, directory: Boolean) = unsupported
def create() = unsupported
def delete() = unsupported
def output = unsupported
def container = unsupported
def absolute = unsupported
private def walkIterator(its: Iterator[AbstractFile]): Iterator[AbstractFile] = {
its flatMap { f =>
if (f.isDirectory) walkIterator(f.iterator)
else Iterator(f)
}
}
def deepIterator = walkIterator(iterator)
/** ''Note: This library is considered experimental and should not be used unless you know what you are doing.'' */
sealed abstract class Entry(path: String) extends VirtualFile(baseName(path), path) {
// have to keep this name for compat with sbt's compiler-interface
def getArchive: ZipFile = null
override def underlyingSource = Some(self)
override def toString = self.path + "(" + path + ")"
}
/** ''Note: This library is considered experimental and should not be used unless you know what you are doing.'' */
class DirEntry(path: String) extends Entry(path) {
val entries = mutable.HashMap[String, Entry]()
override def isDirectory = true
override def iterator: Iterator[Entry] = entries.valuesIterator
override def lookupName(name: String, directory: Boolean): Entry = {
if (directory) entries(name + "/")
else entries(name)
}
}
private def ensureDir(dirs: mutable.Map[String, DirEntry], path: String, zipEntry: ZipEntry): DirEntry =
//OPT inlined from getOrElseUpdate; saves ~50K closures on test run.
// was:
// dirs.getOrElseUpdate(path, {
// val parent = ensureDir(dirs, dirName(path), null)
// val dir = new DirEntry(path)
// parent.entries(baseName(path)) = dir
// dir
// })
dirs get path match {
case Some(v) => v
case None =>
val parent = ensureDir(dirs, dirName(path), null)
val dir = new DirEntry(path)
parent.entries(baseName(path)) = dir
dirs(path) = dir
dir
}
protected def getDir(dirs: mutable.Map[String, DirEntry], entry: ZipEntry): DirEntry = {
if (entry.isDirectory) ensureDir(dirs, entry.getName, entry)
else ensureDir(dirs, dirName(entry.getName), null)
}
}
/** ''Note: This library is considered experimental and should not be used unless you know what you are doing.'' */
final class FileZipArchive(file: JFile) extends ZipArchive(file) {
lazy val (root, allDirs) = {
val root = new DirEntry("/")
val dirs = mutable.HashMap[String, DirEntry]("/" -> root)
def openZipFile(): ZipFile = try {
new ZipFile(file)
} catch {
case ioe: IOException => throw new IOException("Error accessing " + file.getPath, ioe)
}
val zipFile = openZipFile()
val enum = zipFile.entries()
try {while (enum.hasMoreElements) {
val zipEntry = enum.nextElement
val dir = getDir(dirs, zipEntry)
if (zipEntry.isDirectory) dir
else {
class FileEntry() extends Entry(zipEntry.getName) {
override def getArchive = openZipFile
override def lastModified = zipEntry.getTime()
override def input = {
val zipFile = getArchive
val delegate = zipFile getInputStream zipEntry
new FilterInputStream(delegate) {
override def close(): Unit = {
delegate.close()
zipFile.close()
}
}
}
override def sizeOption = Some(zipEntry.getSize().toInt)
}
val f = new FileEntry()
dir.entries(f.name) = f
}
}} finally zipFile.close()
(root, dirs)
}
def iterator: Iterator[Entry] = root.iterator
def name = file.getName
def path = file.getPath
def input = File(file).inputStream()
def lastModified = file.lastModified
override def sizeOption = Some(file.length.toInt)
override def canEqual(other: Any) = other.isInstanceOf[FileZipArchive]
override def hashCode() = file.hashCode
override def equals(that: Any) = that match {
case x: FileZipArchive => file.getAbsoluteFile == x.file.getAbsoluteFile
case _ => false
}
}
/** ''Note: This library is considered experimental and should not be used unless you know what you are doing.'' */
final class URLZipArchive(val url: URL) extends ZipArchive(null) {
def iterator: Iterator[Entry] = {
val root = new DirEntry("/")
val dirs = mutable.HashMap[String, DirEntry]("/" -> root)
val in = new ZipInputStream(new ByteArrayInputStream(Streamable.bytes(input)))
@tailrec def loop() {
val zipEntry = in.getNextEntry()
class EmptyFileEntry() extends Entry(zipEntry.getName) {
override def toByteArray: Array[Byte] = null
override def sizeOption = Some(0)
}
class FileEntry() extends Entry(zipEntry.getName) {
override val toByteArray: Array[Byte] = {
val len = zipEntry.getSize().toInt
val arr = if (len == 0) Array.emptyByteArray else new Array[Byte](len)
var offset = 0
def loop() {
if (offset < len) {
val read = in.read(arr, offset, len - offset)
if (read >= 0) {
offset += read
loop()
}
}
}
loop()
if (offset == arr.length) arr
else throw new IOException("Input stream truncated: read %d of %d bytes".format(offset, len))
}
override def sizeOption = Some(zipEntry.getSize().toInt)
}
if (zipEntry != null) {
val dir = getDir(dirs, zipEntry)
if (zipEntry.isDirectory)
dir
else {
val f = if (zipEntry.getSize() == 0) new EmptyFileEntry() else new FileEntry()
dir.entries(f.name) = f
}
in.closeEntry()
loop()
}
}
loop()
try root.iterator
finally dirs.clear()
}
def name = url.getFile()
def path = url.getPath()
def input = url.openStream()
def lastModified =
try url.openConnection().getLastModified()
catch { case _: IOException => 0 }
override def canEqual(other: Any) = other.isInstanceOf[URLZipArchive]
override def hashCode() = url.hashCode
override def equals(that: Any) = that match {
case x: URLZipArchive => url == x.url
case _ => false
}
}
| d1egoaz/ensime-sbt | src/sbt-test/sbt-ensime/ensime-server/monkeys/src/main/scala-2.10/scala/reflect/io/ZipArchive.scala | Scala | apache-2.0 | 8,804 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.k8s
import java.util.concurrent.{ScheduledExecutorService, TimeUnit}
import scala.concurrent.Future
import io.fabric8.kubernetes.client.KubernetesClient
import org.apache.spark.SparkContext
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.internal.config.SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.rpc.RpcAddress
import org.apache.spark.scheduler.{ExecutorKilled, ExecutorLossReason, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, SchedulerBackendUtils}
import org.apache.spark.util.{ThreadUtils, Utils}
private[spark] class KubernetesClusterSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
kubernetesClient: KubernetesClient,
executorService: ScheduledExecutorService,
snapshotsStore: ExecutorPodsSnapshotsStore,
podAllocator: ExecutorPodsAllocator,
lifecycleEventHandler: ExecutorPodsLifecycleManager,
watchEvents: ExecutorPodsWatchSnapshotSource,
pollEvents: ExecutorPodsPollingSnapshotSource)
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv) {
protected override val minRegisteredRatio =
if (conf.get(SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO).isEmpty) {
0.8
} else {
super.minRegisteredRatio
}
private val initialExecutors = SchedulerBackendUtils.getInitialTargetExecutorNumber(conf)
private val shouldDeleteExecutors = conf.get(KUBERNETES_DELETE_EXECUTORS)
private val defaultProfile = scheduler.sc.resourceProfileManager.defaultResourceProfile
// Allow removeExecutor to be accessible by ExecutorPodsLifecycleEventHandler
private[k8s] def doRemoveExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
if (isExecutorActive(executorId)) {
removeExecutor(executorId, reason)
}
}
/**
* Get an application ID associated with the job.
* This returns the string value of spark.app.id if set, otherwise
* the locally-generated ID from the superclass.
*
* @return The application ID
*/
override def applicationId(): String = {
conf.getOption("spark.app.id").map(_.toString).getOrElse(super.applicationId)
}
override def start(): Unit = {
super.start()
podAllocator.setTotalExpectedExecutors(initialExecutors)
lifecycleEventHandler.start(this)
podAllocator.start(applicationId())
watchEvents.start(applicationId())
pollEvents.start(applicationId())
}
override def stop(): Unit = {
super.stop()
Utils.tryLogNonFatalError {
snapshotsStore.stop()
}
Utils.tryLogNonFatalError {
watchEvents.stop()
}
Utils.tryLogNonFatalError {
pollEvents.stop()
}
if (shouldDeleteExecutors) {
Utils.tryLogNonFatalError {
kubernetesClient
.pods()
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.delete()
}
}
Utils.tryLogNonFatalError {
ThreadUtils.shutdown(executorService)
}
Utils.tryLogNonFatalError {
kubernetesClient.close()
}
}
override def doRequestTotalExecutors(
resourceProfileToTotalExecs: Map[ResourceProfile, Int]): Future[Boolean] = {
podAllocator.setTotalExpectedExecutors(resourceProfileToTotalExecs(defaultProfile))
Future.successful(true)
}
override def sufficientResourcesRegistered(): Boolean = {
totalRegisteredExecutors.get() >= initialExecutors * minRegisteredRatio
}
override def getExecutorIds(): Seq[String] = synchronized {
super.getExecutorIds()
}
override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = {
executorIds.foreach { id =>
removeExecutor(id, ExecutorKilled)
}
// Give some time for the executors to shut themselves down, then forcefully kill any
// remaining ones. This intentionally ignores the configuration about whether pods
// should be deleted; only executors that shut down gracefully (and are then collected
// by the ExecutorPodsLifecycleManager) will respect that configuration.
val killTask = new Runnable() {
override def run(): Unit = Utils.tryLogNonFatalError {
val running = kubernetesClient
.pods()
.withField("status.phase", "Running")
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.withLabelIn(SPARK_EXECUTOR_ID_LABEL, executorIds: _*)
if (!running.list().getItems().isEmpty()) {
logInfo(s"Forcefully deleting ${running.list().getItems().size()} pods " +
s"(out of ${executorIds.size}) that are still running after graceful shutdown period.")
running.delete()
}
}
}
executorService.schedule(killTask, conf.get(KUBERNETES_DYN_ALLOC_KILL_GRACE_PERIOD),
TimeUnit.MILLISECONDS)
// Return an immediate success, since we can't confirm or deny that executors have been
// actually shut down without waiting too long and blocking the allocation thread, which
// waits on this future to complete, blocking further allocations / deallocations.
//
// This relies a lot on the guarantees of Spark's RPC system, that a message will be
// delivered to the destination unless there's an issue with the connection, in which
// case the executor will shut itself down (and the driver, separately, will just declare
// it as "lost"). Coupled with the allocation manager keeping track of which executors are
// pending release, returning "true" here means that eventually all the requested executors
// will be removed.
//
// The cleanup timer above is just an optimization to make sure that stuck executors don't
// stick around in the k8s server. Normally it should never delete any pods at all.
Future.successful(true)
}
override def createDriverEndpoint(): DriverEndpoint = {
new KubernetesDriverEndpoint()
}
override protected def createTokenManager(): Option[HadoopDelegationTokenManager] = {
Some(new HadoopDelegationTokenManager(conf, sc.hadoopConfiguration, driverEndpoint))
}
override protected def isBlacklisted(executorId: String, hostname: String): Boolean = {
podAllocator.isDeleted(executorId)
}
private class KubernetesDriverEndpoint extends DriverEndpoint {
override def onDisconnected(rpcAddress: RpcAddress): Unit = {
// Don't do anything besides disabling the executor - allow the Kubernetes API events to
// drive the rest of the lifecycle decisions
// TODO what if we disconnect from a networking issue? Probably want to mark the executor
// to be deleted eventually.
addressToExecutorId.get(rpcAddress).foreach(disableExecutor)
}
}
}
| kevinyu98/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala | Scala | apache-2.0 | 7,790 |
package com.softwaremill.bootzooka.api
import java.util.Date
import javax.servlet.http.HttpServletResponse
import com.softwaremill.bootzooka.api.serializers.{RequestLogger, DateTimeSerializer, UuidSerializer}
import com.typesafe.scalalogging.LazyLogging
import org.json4s.{DefaultFormats, Formats}
import org.scalatra._
import org.scalatra.json.{JValueResult, NativeJsonSupport}
import org.scalatra.swagger.SwaggerSupport
trait Mappable {
val Prefix = "/api/"
def fullMappingPath = Prefix + mappingPath
def mappingPath: String
}
trait SwaggerMappable {
self: Mappable with SwaggerSupport =>
def name = Prefix.tail + mappingPath
}
abstract class JsonServlet extends ScalatraServlet with RequestLogger with NativeJsonSupport with JValueResult with LazyLogging with Halting with Mappable {
protected implicit val jsonFormats: Formats = DefaultFormats + new DateTimeSerializer() + new UuidSerializer()
val Expire = new Date().toString
before() {
contentType = formats("json")
applyNoCache(response)
}
def applyNoCache(response: HttpServletResponse) {
response.addHeader("Expires", Expire)
response.addHeader("Last-Modified", Expire)
response.addHeader("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0")
response.addHeader("Pragma", "no-cache")
}
errorHandler = {
case t: Exception =>
{
logger.error("Exception during client request processing", t)
}
halt(500, "Internal server exception")
}
} | umitunal/bootzooka | backend/src/main/scala/com/softwaremill/bootzooka/api/JsonServlet.scala | Scala | apache-2.0 | 1,499 |
//package io.ddf.example
import java.util.ArrayList
import io.ddf
import io.ddf.DDFManager
object DDFExample {
def run() = {
val manager = DDFManager.get("spark")
manager.sql("drop TABLE if exists mtcars", "SparkSQL")
manager.sql("CREATE TABLE mtcars ("
+ "mpg double,cyl int, disp double, hp int, drat double, wt double, qsec double, vs int, am int, gear int, carb int"
+ ") ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '", "SparkSQL")
manager.sql("LOAD DATA LOCAL INPATH 'resources/test/mtcars' INTO TABLE mtcars", "SparkSQL")
val ddf = manager.sql2ddf("select * from mtcars", "SparkSQL")
ddf.getNumRows
ddf.getNumColumns
ddf.getColumnNames
val ddf2 = ddf.VIEWS.project("mpg", "disp", "hp", "drat", "qsec")
//how to do filter ?????
// val ddf3 = ddf2.Views.subset(x$1, x$2)("origin=SFO")
val ddf3 = ddf2
ddf3.VIEWS.head(10)
val col = new ArrayList[String]();
col.add("mpg")
val grcol = new ArrayList[String]();
grcol.add("ampg = avg(mpg)")
val ddf4 = ddf2.groupBy(col, grcol)
ddf4.getColumnNames
ddf4.VIEWS.top(10, "ampg", "asc")
ddf2.getSummary
ddf2.getFiveNumSummary
ddf2.setMutable(true)
ddf2.dropNA()
ddf2.getSummary()
//##########
//# ML
//##########
val kmeans = ddf2.ML.KMeans(3, 5, 1)
kmeans.predict(Array(24, 22, 1, 3, 5))
}
}
| ubolonton/DDF | examples/src/main/scala/io/ddf/example/DDFExample.scala | Scala | apache-2.0 | 1,319 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.blaze.client
import org.http4s.client.RequestKey
import scala.collection.immutable
trait BlazeClientState[F[_]] {
def isClosed: F[Boolean]
def allocated: F[immutable.Map[RequestKey, Int]]
def idleQueueDepth: F[immutable.Map[RequestKey, Int]]
def waitQueueDepth: F[Int]
}
| http4s/http4s | blaze-client/src/main/scala/org/http4s/blaze/client/BlazeClientState.scala | Scala | apache-2.0 | 896 |
package taczombie.model.util
import scala.collection.mutable.ListBuffer
trait Logger {
object logger {
private var printOnGet : Boolean = false
private val data : ListBuffer[String] = ListBuffer[String]()
def clear = data.clear()
def +=(s : String, print : Boolean = false) = {
data += s
if(print) println(s)
}
def init(s : String, print : Boolean = false, printOnGet : Boolean = false) = {
clear
this.printOnGet = printOnGet
+=(s, print)
}
def get : List[String] = {
if(printOnGet) print
data.toList
}
def merge(l : Logger) = {
data.++=(l.logger.get)
l
}
def print = {
if(data.size > 0) {
println(data.apply(0))
for(s <- data.tail)
println("\t" + s)
} else println("Empty logger")
}
}
} | mahieke/TacZombie | model/src/main/scala/taczombie/model/util/Logger.scala | Scala | gpl-2.0 | 852 |
package com.lucidchart.piezo.admin.controllers
import com.lucidchart.piezo.{TriggerMonitoringModel, TriggerMonitoringPriority}
import com.lucidchart.piezo.admin.models.TriggerType
import scala.jdk.CollectionConverters._
import org.quartz.TriggerKey
import org.quartz.{CronTrigger, SimpleTrigger}
import org.quartz.impl.matchers.GroupMatcher
import org.quartz.Scheduler
import org.quartz.Trigger
import play.api.libs.json._
import scala.collection.mutable
object TriggerHelper {
def getTriggersByGroup(scheduler: Scheduler): mutable.Buffer[(String, List[TriggerKey])] = {
val triggersByGroup =
for (groupName <- scheduler.getTriggerGroupNames.asScala) yield {
val triggers: List[TriggerKey] = scheduler.getTriggerKeys(GroupMatcher.triggerGroupEquals(groupName)).asScala.toList
val sortedTriggers: List[TriggerKey] = triggers.sortBy(triggerKey => triggerKey.getName)
(groupName, sortedTriggers)
}
triggersByGroup.sortBy(groupList => groupList._1)
}
def writesTrigger(monitoringModel: TriggerMonitoringModel): Writes[Trigger] = Writes { trigger =>
val triggerKey = trigger.getKey
val triggerType = TriggerType(trigger)
val schedule = triggerType match {
case TriggerType.Cron => {
val cronTrigger = trigger.asInstanceOf[CronTrigger]
Json.obj("cron" ->
Json.obj(
"cronExpression" -> cronTrigger.getCronExpression
)
)
}
case TriggerType.Simple => {
val simpleTrigger = trigger.asInstanceOf[SimpleTrigger]
Json.obj(
"simple" -> Json.obj(
"repeatInterval" -> simpleTrigger.getRepeatInterval,
"repeatCount" -> simpleTrigger.getRepeatCount
)
)
}
case _ => Json.obj()
}
val (monitoringPriority, maxSecondsInError) = monitoringModel.getTriggerMonitoringRecord(
trigger
).map { monitoringRecord =>
(monitoringRecord.priority, monitoringRecord.maxSecondsInError)
}.getOrElse((TriggerMonitoringPriority.Medium, 300))
val jobDataMap = trigger.getJobDataMap
val job = trigger.getJobKey
Json.obj(
"triggerType" -> triggerType.toString,
"jobGroup" -> job.getGroup,
"jobName" -> job.getName,
"group" -> triggerKey.getGroup,
"name" -> triggerKey.getName,
"description" -> trigger.getDescription,
"job-data-map" -> JsObject(jobDataMap.getKeys.toSeq.map(key => key -> JsString(jobDataMap.getString(key)))),
"triggerMonitoringPriority" -> monitoringPriority.toString,
"triggerMaxErrorTime" -> maxSecondsInError
) ++ schedule
}
def writesTriggerSeq(monitoringModel: TriggerMonitoringModel) = Writes.seq(writesTrigger(monitoringModel))
}
| lucidsoftware/piezo | admin/app/com/lucidchart/piezo/admin/controllers/TriggerHelper.scala | Scala | apache-2.0 | 2,740 |
package org.allenai.pnp.examples
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import org.allenai.pnp._
import org.allenai.pnp.Pnp._
import com.google.common.base.Preconditions
import com.jayantkrish.jklol.util.IndexedList
import edu.cmu.dynet._
import com.jayantkrish.jklol.training.NullLogFunction
class MultilayerPerceptron {
import MultilayerPerceptron._
}
object MultilayerPerceptron {
val FEATURE_VECTOR_DIM = 3
val HIDDEN_DIM = 50
val LABEL_DIM = 10
def mlp(x: FloatVector): Pnp[Boolean] = {
for {
weights1 <- param("layer1Weights")
bias1 <- param("layer1Bias")
weights2 <- param("layer2Weights")
inputExpression = Expression.input(Dim(FEATURE_VECTOR_DIM), x)
scores = weights2 * Expression.tanh((weights1 * inputExpression) + bias1)
y <- choose(Array(true, false), scores)
} yield {
y
}
}
def labelNn(left: Boolean, right: Boolean, cg: CompGraph): Expression = {
val leftParam = cg.getLookupParameter("left")
val rightParam = cg.getLookupParameter("right")
val leftVec = Expression.lookup(leftParam, if (left) { 0 } else { 1 })
val rightVec = Expression.lookup(rightParam, if (right) { 0 } else { 1 })
Expression.dotProduct(leftVec, rightVec)
}
def sequenceTag(xs: Seq[FloatVector]): Pnp[List[Boolean]] = {
xs.foldLeft(Pnp.value(List[Boolean]()))((x, y) => for {
cur <- mlp(y)
rest <- x
cg <- Pnp.computationGraph()
_ <- if (rest.length > 0) {
score(labelNn(cur, rest.head, cg))
} else {
value(())
}
} yield {
cur :: rest
})
}
def main(args: Array[String]): Unit = {
// Initialize dynet
Initialize.initialize()
val model = PnpModel.init(true)
model.addParameter("layer1Weights", Dim(HIDDEN_DIM, FEATURE_VECTOR_DIM))
model.addParameter("layer1Bias", Dim(HIDDEN_DIM))
model.addParameter("layer2Weights", Dim(2, HIDDEN_DIM))
val featureVector = new FloatVector(Seq(1.0f, 2f, 3f))
val dist = mlp(featureVector)
val marginals = dist.beamSearch(2, model)
for (x <- marginals.executions) {
println(x)
}
val featureVectors = Seq(featureVector, featureVector, featureVector)
model.locallyNormalized = false
model.addLookupParameter("left", 2, Dim(LABEL_DIM))
model.addLookupParameter("right", 2, Dim(LABEL_DIM))
val dist2 = sequenceTag(featureVectors)
val marginals2 = dist2.beamSearch(5, model)
for (x <- marginals2.executions) {
println(x)
}
val flip: Pnp[Boolean] = choose(Array(true, false), Array(0.5, 0.5))
val twoFlips: Pnp[Boolean] = for {
x <- flip
y <- flip
} yield {
x && y
}
val marginals3 = twoFlips.beamSearch(5)
println(marginals3.marginals().getProbabilityMap)
}
} | jayantk/pnp | src/main/scala/org/allenai/pnp/examples/MultilayerPerceptron.scala | Scala | apache-2.0 | 2,867 |
package io.flow.event.v2
import io.flow.event.Record
import io.flow.log.RollbarLogger
import io.flow.play.metrics.MetricsInstrumentation
import nl.grons.metrics4.scala.{Counter, MetricName, PushGauge}
import java.util.concurrent.{ConcurrentHashMap, Executors}
import scala.annotation.tailrec
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
import scala.jdk.CollectionConverters._
abstract class KinesisStyleConsumer(
config: StreamConfig,
rollbarLogger: RollbarLogger,
) {
protected val logger =
rollbarLogger
.fingerprint(getClass.getName)
.withKeyValue("stream", config.streamName)
.withKeyValue("worker_id", config.workerId)
protected val executor = Executors.newSingleThreadExecutor()
def start(): Unit
def shutdown(): Unit
}
object KinesisStyleRecordProcessor {
// Yes, it is arbitrary
val MaxRetries = 8
val BackoffTimeInMillis = 3000L
private val streamLagMetrics = new ConcurrentHashMap[String, PushGauge[Long]]().asScala
private val numRecordsMetrics = new ConcurrentHashMap[String, Counter]().asScala
}
trait KinesisStyleRecordProcessor extends MetricsInstrumentation {
def config: StreamConfig
def rollbarLogger: RollbarLogger
def streamType: String
import KinesisStyleRecordProcessor._
override lazy val metricBaseName: MetricName = MetricName(s"lib_event.$streamType.consumer")
protected def streamLagMetric(shardId: Option[String], lagMillis: Long) = {
val shardTag = shardId.fold("")(shardId => s",shard_id:$shardId")
streamLagMetrics.getOrElseUpdate(
shardTag,
metrics.pushGauge(s"lagMillis[stream_name:${config.streamName},app_name:${config.appName}$shardTag]", lagMillis)
).push(lagMillis)
}
protected def numRecordsMetric(shardId: Option[String]) = {
val shardTag = shardId.fold("")(shardId => s",shard_id:$shardId")
numRecordsMetrics.getOrElseUpdate(
shardTag,
metrics.counter(s"numRecords[stream_name:${config.streamName},app_name:${config.appName}$shardTag]")
)
}
protected val logger = rollbarLogger
.fingerprint(this.getClass.getName)
.withKeyValue("stream", config.streamName)
.withKeyValue("worker_id", config.workerId)
@tailrec
protected final def executeRetry(f: Seq[Record] => Unit, records: Seq[Record], sequenceNumbers: Seq[String], retries: Int = 0): Unit = {
Try(f(records)) match {
case Success(_) =>
case Failure(NonFatal(e)) =>
if (retries >= MaxRetries) {
val size = records.size
logger
.withKeyValue("retries", retries)
.error(s"[FlowKinesisError] Error while processing records after $MaxRetries attempts. " +
s"$size records are skipped. Sequence numbers: ${sequenceNumbers.mkString(", ")}", e)
} else {
logger
.withKeyValue("retries", retries)
.warn(s"[FlowKinesisWarn] Error while processing records (retry $retries/$MaxRetries). Retrying...", e)
Thread.sleep(BackoffTimeInMillis)
executeRetry(f, records, sequenceNumbers, retries + 1)
}
case Failure(e) =>
throw e
}
}
}
| flowcommerce/lib-event | app/io/flow/event/v2/KinesisStyle.scala | Scala | mit | 3,158 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util.Properties
import kafka.server.KafkaConfig
import kafka.utils.{ShutdownableThread, TestUtils}
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig}
import org.apache.kafka.clients.producer.internals.ErrorLoggingCallback
import org.apache.kafka.common.TopicPartition
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.Test
import scala.jdk.CollectionConverters._
import scala.collection.mutable
class TransactionsBounceTest extends IntegrationTestHarness {
private val consumeRecordTimeout = 30000
private val producerBufferSize = 65536
private val serverMessageMaxBytes = producerBufferSize/2
private val numPartitions = 3
private val outputTopic = "output-topic"
private val inputTopic = "input-topic"
val overridingProps = new Properties()
overridingProps.put(KafkaConfig.AutoCreateTopicsEnableProp, false.toString)
overridingProps.put(KafkaConfig.MessageMaxBytesProp, serverMessageMaxBytes.toString)
// Set a smaller value for the number of partitions for the offset commit topic (__consumer_offset topic)
// so that the creation of that topic/partition(s) and subsequent leader assignment doesn't take relatively long
overridingProps.put(KafkaConfig.ControlledShutdownEnableProp, true.toString)
overridingProps.put(KafkaConfig.UncleanLeaderElectionEnableProp, false.toString)
overridingProps.put(KafkaConfig.AutoLeaderRebalanceEnableProp, false.toString)
overridingProps.put(KafkaConfig.OffsetsTopicPartitionsProp, 1.toString)
overridingProps.put(KafkaConfig.OffsetsTopicReplicationFactorProp, 3.toString)
overridingProps.put(KafkaConfig.MinInSyncReplicasProp, 2.toString)
overridingProps.put(KafkaConfig.TransactionsTopicPartitionsProp, 1.toString)
overridingProps.put(KafkaConfig.TransactionsTopicReplicationFactorProp, 3.toString)
overridingProps.put(KafkaConfig.GroupMinSessionTimeoutMsProp, "10") // set small enough session timeout
overridingProps.put(KafkaConfig.GroupInitialRebalanceDelayMsProp, "0")
// This is the one of the few tests we currently allow to preallocate ports, despite the fact that this can result in transient
// failures due to ports getting reused. We can't use random ports because of bad behavior that can result from bouncing
// brokers too quickly when they get new, random ports. If we're not careful, the client can end up in a situation
// where metadata is not refreshed quickly enough, and by the time it's actually trying to, all the servers have
// been bounced and have new addresses. None of the bootstrap nodes or current metadata can get them connected to a
// running server.
//
// Since such quick rotation of servers is incredibly unrealistic, we allow this one test to preallocate ports, leaving
// a small risk of hitting errors due to port conflicts. Hopefully this is infrequent enough to not cause problems.
override def generateConfigs = {
FixedPortTestUtils.createBrokerConfigs(brokerCount, zkConnect, enableControlledShutdown = true)
.map(KafkaConfig.fromProps(_, overridingProps))
}
override protected def brokerCount: Int = 4
@Test
def testWithGroupId(): Unit = {
testBrokerFailure((producer, groupId, consumer) =>
producer.sendOffsetsToTransaction(TestUtils.consumerPositions(consumer).asJava, groupId))
}
@Test
def testWithGroupMetadata(): Unit = {
testBrokerFailure((producer, _, consumer) =>
producer.sendOffsetsToTransaction(TestUtils.consumerPositions(consumer).asJava, consumer.groupMetadata()))
}
private def testBrokerFailure(commit: (KafkaProducer[Array[Byte], Array[Byte]],
String, KafkaConsumer[Array[Byte], Array[Byte]]) => Unit): Unit = {
// basic idea is to seed a topic with 10000 records, and copy it transactionally while bouncing brokers
// constantly through the period.
val consumerGroup = "myGroup"
val numInputRecords = 10000
createTopics()
TestUtils.seedTopicWithNumberedRecords(inputTopic, numInputRecords, servers)
val consumer = createConsumerAndSubscribe(consumerGroup, List(inputTopic))
val producer = createTransactionalProducer("test-txn")
producer.initTransactions()
val scheduler = new BounceScheduler
scheduler.start()
try {
var numMessagesProcessed = 0
var iteration = 0
while (numMessagesProcessed < numInputRecords) {
val toRead = Math.min(200, numInputRecords - numMessagesProcessed)
trace(s"$iteration: About to read $toRead messages, processed $numMessagesProcessed so far..")
val records = TestUtils.pollUntilAtLeastNumRecords(consumer, toRead, waitTimeMs = consumeRecordTimeout)
trace(s"Received ${records.size} messages, sending them transactionally to $outputTopic")
producer.beginTransaction()
val shouldAbort = iteration % 3 == 0
records.foreach { record =>
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(outputTopic, null, record.key, record.value, !shouldAbort), new ErrorLoggingCallback(outputTopic, record.key, record.value, true))
}
trace(s"Sent ${records.size} messages. Committing offsets.")
commit(producer, consumerGroup, consumer)
if (shouldAbort) {
trace(s"Committed offsets. Aborting transaction of ${records.size} messages.")
producer.abortTransaction()
TestUtils.resetToCommittedPositions(consumer)
} else {
trace(s"Committed offsets. committing transaction of ${records.size} messages.")
producer.commitTransaction()
numMessagesProcessed += records.size
}
iteration += 1
}
} finally {
scheduler.shutdown()
}
val verifyingConsumer = createConsumerAndSubscribe("randomGroup", List(outputTopic), readCommitted = true)
val recordsByPartition = new mutable.HashMap[TopicPartition, mutable.ListBuffer[Int]]()
TestUtils.pollUntilAtLeastNumRecords(verifyingConsumer, numInputRecords, waitTimeMs = consumeRecordTimeout).foreach { record =>
val value = TestUtils.assertCommittedAndGetValue(record).toInt
val topicPartition = new TopicPartition(record.topic(), record.partition())
recordsByPartition.getOrElseUpdate(topicPartition, new mutable.ListBuffer[Int])
.append(value)
}
val outputRecords = new mutable.ListBuffer[Int]()
recordsByPartition.values.foreach { partitionValues =>
assertEquals(partitionValues, partitionValues.sorted, "Out of order messages detected")
outputRecords.appendAll(partitionValues)
}
val recordSet = outputRecords.toSet
assertEquals(numInputRecords, recordSet.size)
val expectedValues = (0 until numInputRecords).toSet
assertEquals(expectedValues, recordSet, s"Missing messages: ${expectedValues -- recordSet}")
}
private def createTransactionalProducer(transactionalId: String) = {
val props = new Properties()
props.put(ProducerConfig.ACKS_CONFIG, "all")
props.put(ProducerConfig.BATCH_SIZE_CONFIG, "512")
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionalId)
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
createProducer(configOverrides = props)
}
private def createConsumerAndSubscribe(groupId: String,
topics: List[String],
readCommitted: Boolean = false) = {
val consumerProps = new Properties
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId)
consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
consumerProps.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,
if (readCommitted) "read_committed" else "read_uncommitted")
val consumer = createConsumer(configOverrides = consumerProps)
consumer.subscribe(topics.asJava)
consumer
}
private def createTopics() = {
val topicConfig = new Properties()
topicConfig.put(KafkaConfig.MinInSyncReplicasProp, 2.toString)
createTopic(inputTopic, numPartitions, 3, topicConfig)
createTopic(outputTopic, numPartitions, 3, topicConfig)
}
private class BounceScheduler extends ShutdownableThread("daemon-broker-bouncer", false) {
override def doWork(): Unit = {
for (server <- servers) {
trace("Shutting down server : %s".format(server.config.brokerId))
server.shutdown()
server.awaitShutdown()
Thread.sleep(500)
trace("Server %s shut down. Starting it up again.".format(server.config.brokerId))
server.startup()
trace("Restarted server: %s".format(server.config.brokerId))
Thread.sleep(500)
}
(0 until numPartitions).foreach(partition => TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, outputTopic, partition))
}
override def shutdown(): Unit = {
super.shutdown()
}
}
}
| Chasego/kafka | core/src/test/scala/integration/kafka/api/TransactionsBounceTest.scala | Scala | apache-2.0 | 9,759 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import org.geotools.data.{Query, Transaction}
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithFeatureType
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.index.z2.XZ2Index
import org.locationtech.geomesa.index.index.z3.XZ3Index
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class XZConfigurationTest extends Specification with TestWithFeatureType {
val spec = "name:String,dtg:Date,*geom:Polygon:srid=4326;geomesa.xz.precision='10',geomesa.indexes.enabled='xz2,xz3'"
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"$i")
sf.setAttributes(Array[AnyRef](s"name$i", f"2010-05-07T$i%02d:00:00.000Z",
s"POLYGON((40 3$i, 42 3$i, 42 2$i, 40 2$i, 40 3$i))"))
sf
}
step {
addFeatures(features)
}
"XZIndices" should {
"support configurable precision" >> {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
sft.getXZPrecision mustEqual 10
}
"query XZ2Index at configurable precision" >> {
val filter = "bbox(geom,39,19,41,23)"
val query = new Query(sftName, ECQL.toFilter(filter))
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ2Index.name)
val features = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(0 until 4)
}
"query XZ3Index at configurable precision" >> {
val filter = "bbox(geom,39,19,41,23) AND dtg DURING 2010-05-07T01:30:00.000Z/2010-05-07T05:30:00.000Z"
val query = new Query(sftName, ECQL.toFilter(filter))
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ3Index.name)
val features = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
features must haveSize(2)
features.map(_.getID.toInt) must containTheSameElementsAs(2 until 4)
}
}
}
| locationtech/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/XZConfigurationTest.scala | Scala | apache-2.0 | 2,706 |
package com.asto.dop.core.entity
import com.asto.dop.core.helper.{DBHelper, Page}
import com.ecfront.common.Resp
import com.typesafe.scalalogging.slf4j.LazyLogging
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* 用户操作日志实体,用于记录UserOptEntity记录最后一次更新时间
*/
case class UserLogEntity(
// | Y | 记录主键,是操作行为,用于记录此访问的业务含义,枚举:`bind/apply/self_examine_pass/bank_examine_pass`
var action: String,
// | Y | 最后一次更新时间,格式`yyyyMMddHHmmss`,如 20151012100000
var last_update_time: String
)
object UserLogEntity extends LazyLogging {
object db {
private val TABLE_NAME = "user_log"
def init(): Unit = {
DBHelper.update {
s"""CREATE TABLE IF NOT EXISTS $TABLE_NAME
|(
| action varchar(200) NOT NULL COMMENT '记录主键,是操作行为,用于记录此访问的业务含义,枚举:`register/apply/self_examine_pass/bank_examine_pass`' ,
| last_update_time varchar(14) NOT NULL COMMENT '最后一次更新时间,格式`yyyyMMddHHmmss`,如 20151012100000' ,
| PRIMARY KEY(action) ,
| INDEX i_last_update_time(last_update_time)
|)ENGINE=innodb DEFAULT CHARSET=utf8
| """.stripMargin
}.onSuccess {
case initResp =>
if (initResp) {
save(UserLogEntity(UserOptEntity.FLAG_APPLY, "20151001000000"))
save(UserLogEntity(UserOptEntity.FLAG_BIND, "20151001000000"))
save(UserLogEntity(UserOptEntity.FLAG_SELF_EXAMINE_PASS, "20151001000000"))
save(UserLogEntity(UserOptEntity.FLAG_BANK_EXAMINE_PASS, "20151001000000"))
}
}
}
def save(obj: UserLogEntity): Future[Resp[Void]] = {
DBHelper.update(
s"""
|INSERT INTO $TABLE_NAME ( action , last_update_time)
|SELECT ? , ? FROM DUAL WHERE NOT EXISTS( SELECT 1 FROM $TABLE_NAME WHERE action = ? )
""".stripMargin,
List(
obj.action, obj.last_update_time,obj.action
)
)
}
def update(newValues: String, condition: String, parameters: List[Any]): Future[Resp[Void]] = {
DBHelper.update(
s"UPDATE $TABLE_NAME Set $newValues WHERE $condition",
parameters
)
}
def delete(action: String): Future[Resp[Void]] = {
DBHelper.update(
s"DELETE FROM $TABLE_NAME WHERE action = ? ",
List(action)
)
}
def delete(condition: String, parameters: List[Any]): Future[Resp[Void]] = {
DBHelper.update(
s"DELETE FROM $TABLE_NAME WHERE $condition ",
parameters
)
}
def get(action: String): Future[Resp[UserLogEntity]] = {
DBHelper.get(
s"SELECT * FROM $TABLE_NAME WHERE action = ? ",
List(action),
classOf[UserLogEntity]
)
}
def get(condition: String, parameters: List[Any]): Future[Resp[UserLogEntity]] = {
DBHelper.get(
s"SELECT * FROM $TABLE_NAME WHERE $condition ",
parameters,
classOf[UserLogEntity]
)
}
def exist(condition: String, parameters: List[Any]): Future[Resp[Boolean]] = {
DBHelper.exist(
s"SELECT 1 FROM $TABLE_NAME WHERE $condition ",
parameters
)
}
def find(condition: String = " 1=1 ", parameters: List[Any] = List()): Future[Resp[List[UserLogEntity]]] = {
DBHelper.find(
s"SELECT * FROM $TABLE_NAME WHERE $condition ",
parameters,
classOf[UserLogEntity]
)
}
def page(condition: String = " 1=1 ", parameters: List[Any] = List(), pageNumber: Long = 1, pageSize: Int = 10): Future[Resp[Page[UserLogEntity]]] = {
DBHelper.page(
s"SELECT * FROM $TABLE_NAME WHERE $condition ",
parameters,
pageNumber, pageSize,
classOf[UserLogEntity]
)
}
def count(condition: String = " 1=1 ", parameters: List[Any] = List()): Future[Resp[Long]] = {
DBHelper.count(
s"SELECT count(1) FROM $TABLE_NAME WHERE $condition ",
parameters
)
}
init()
}
}
| zj-lingxin/dop-core | src/main/scala/com/asto/dop/core/entity/UserLogEntity.scala | Scala | mit | 4,328 |
package lectures.part5ts
object RockingInheritance extends App {
/**
* Convenience
*/
trait Writer[T] {
def write(value: T): Unit
}
trait Closeable {
def close(status: Int): Unit
}
trait GenericStream[T] {
def foreach(f: T => Unit): Unit
}
// We can use all the traits we need, defining a specific type.
def processStream[T](stream: GenericStream[T] with Writer[T] with Closeable): Unit = {
stream.foreach(println)
stream.close(0)
}
/**
* Diamond problem
*/
trait Animal {
def name: String
}
trait Lion extends Animal {
override def name: String = "Lion"
}
trait Tiger extends Animal {
override def name: String = "Tiger"
}
// The last override (tiger) is the one that gets picked.
class Liger extends Lion with Tiger
val liger = new Liger
println(liger.name)
/**
* The `super` problem (type linearization).
*/
trait Cold {
def print: Unit = println("Cold")
}
trait Green extends Cold {
override def print: Unit = {
println("Green")
super.print
}
}
trait Blue extends Cold {
override def print: Unit = {
println("Blue")
super.print
}
}
class Red {
def print: Unit = println("Red")
}
/**
* Type linearization:
*
* White = Red with Green with Blue with <White>
* = AnyRef with <Red>
* with (AnyRef with <Cold> with <Green>)
* with (AnyRef with <Cold> with <Blue>)
* with <White>
* = AnyRef with <Red> with <Cold> with <Green> with <Blue> with <White>
*
* Calling `super` will call the types from right to left.
*/
class White extends Red with Green with Blue {
override def print: Unit = {
println("White")
super.print
}
}
val color = new White
color.print
}
| guhemama/moocs | RockTheJVMScalaAdvanced/src/main/scala/lectures/part5ts/RockingInheritance.scala | Scala | bsd-3-clause | 1,823 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.util.{Locale, Map => JMap, TimeZone}
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.parquet.hadoop.api.{InitContext, ReadSupport}
import org.apache.parquet.hadoop.api.ReadSupport.ReadContext
import org.apache.parquet.io.api.RecordMaterializer
import org.apache.parquet.schema._
import org.apache.parquet.schema.Type.Repetition
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* A Parquet [[ReadSupport]] implementation for reading Parquet records as Catalyst
* [[UnsafeRow]]s.
*
* The API interface of [[ReadSupport]] is a little bit over complicated because of historical
* reasons. In older versions of parquet-mr (say 1.6.0rc3 and prior), [[ReadSupport]] need to be
* instantiated and initialized twice on both driver side and executor side. The [[init()]] method
* is for driver side initialization, while [[prepareForRead()]] is for executor side. However,
* starting from parquet-mr 1.6.0, it's no longer the case, and [[ReadSupport]] is only instantiated
* and initialized on executor side. So, theoretically, now it's totally fine to combine these two
* methods into a single initialization method. The only reason (I could think of) to still have
* them here is for parquet-mr API backwards-compatibility.
*
* Due to this reason, we no longer rely on [[ReadContext]] to pass requested schema from [[init()]]
* to [[prepareForRead()]], but use a private `var` for simplicity.
*/
private[parquet] class ParquetReadSupport(val convertTz: Option[TimeZone],
enableVectorizedReader: Boolean)
extends ReadSupport[UnsafeRow] with Logging {
private var catalystRequestedSchema: StructType = _
def this() {
// We need a zero-arg constructor for SpecificParquetRecordReaderBase. But that is only
// used in the vectorized reader, where we get the convertTz value directly, and the value here
// is ignored.
this(None, enableVectorizedReader = true)
}
/**
* Called on executor side before [[prepareForRead()]] and instantiating actual Parquet record
* readers. Responsible for figuring out Parquet requested schema used for column pruning.
*/
override def init(context: InitContext): ReadContext = {
val conf = context.getConfiguration
catalystRequestedSchema = {
val schemaString = conf.get(ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA)
assert(schemaString != null, "Parquet requested schema not set.")
StructType.fromString(schemaString)
}
val caseSensitive = conf.getBoolean(SQLConf.CASE_SENSITIVE.key,
SQLConf.CASE_SENSITIVE.defaultValue.get)
val schemaPruningEnabled = conf.getBoolean(SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key,
SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.defaultValue.get)
val parquetFileSchema = context.getFileSchema
val parquetClippedSchema = ParquetReadSupport.clipParquetSchema(parquetFileSchema,
catalystRequestedSchema, caseSensitive)
// We pass two schema to ParquetRecordMaterializer:
// - parquetRequestedSchema: the schema of the file data we want to read
// - catalystRequestedSchema: the schema of the rows we want to return
// The reader is responsible for reconciling the differences between the two.
val parquetRequestedSchema = if (schemaPruningEnabled && !enableVectorizedReader) {
// Parquet-MR reader requires that parquetRequestedSchema include only those fields present
// in the underlying parquetFileSchema. Therefore, we intersect the parquetClippedSchema
// with the parquetFileSchema
ParquetReadSupport.intersectParquetGroups(parquetClippedSchema, parquetFileSchema)
.map(groupType => new MessageType(groupType.getName, groupType.getFields))
.getOrElse(ParquetSchemaConverter.EMPTY_MESSAGE)
} else {
// Spark's vectorized reader only support atomic types currently. It also skip fields
// in parquetRequestedSchema which are not present in the file.
parquetClippedSchema
}
logDebug(
s"""Going to read the following fields from the Parquet file with the following schema:
|Parquet file schema:
|$parquetFileSchema
|Parquet clipped schema:
|$parquetClippedSchema
|Parquet requested schema:
|$parquetRequestedSchema
|Catalyst requested schema:
|${catalystRequestedSchema.treeString}
""".stripMargin)
new ReadContext(parquetRequestedSchema, Map.empty[String, String].asJava)
}
/**
* Called on executor side after [[init()]], before instantiating actual Parquet record readers.
* Responsible for instantiating [[RecordMaterializer]], which is used for converting Parquet
* records to Catalyst [[UnsafeRow]]s.
*/
override def prepareForRead(
conf: Configuration,
keyValueMetaData: JMap[String, String],
fileSchema: MessageType,
readContext: ReadContext): RecordMaterializer[UnsafeRow] = {
val parquetRequestedSchema = readContext.getRequestedSchema
new ParquetRecordMaterializer(
parquetRequestedSchema,
ParquetReadSupport.expandUDT(catalystRequestedSchema),
new ParquetToSparkSchemaConverter(conf),
convertTz)
}
}
private[parquet] object ParquetReadSupport {
val SPARK_ROW_REQUESTED_SCHEMA = "org.apache.spark.sql.parquet.row.requested_schema"
val SPARK_METADATA_KEY = "org.apache.spark.sql.parquet.row.metadata"
/**
* Tailors `parquetSchema` according to `catalystSchema` by removing column paths don't exist
* in `catalystSchema`, and adding those only exist in `catalystSchema`.
*/
def clipParquetSchema(
parquetSchema: MessageType,
catalystSchema: StructType,
caseSensitive: Boolean = true): MessageType = {
val clippedParquetFields = clipParquetGroupFields(
parquetSchema.asGroupType(), catalystSchema, caseSensitive)
if (clippedParquetFields.isEmpty) {
ParquetSchemaConverter.EMPTY_MESSAGE
} else {
Types
.buildMessage()
.addFields(clippedParquetFields: _*)
.named(ParquetSchemaConverter.SPARK_PARQUET_SCHEMA_NAME)
}
}
private def clipParquetType(
parquetType: Type, catalystType: DataType, caseSensitive: Boolean): Type = {
catalystType match {
case t: ArrayType if !isPrimitiveCatalystType(t.elementType) =>
// Only clips array types with nested type as element type.
clipParquetListType(parquetType.asGroupType(), t.elementType, caseSensitive)
case t: MapType
if !isPrimitiveCatalystType(t.keyType) ||
!isPrimitiveCatalystType(t.valueType) =>
// Only clips map types with nested key type or value type
clipParquetMapType(parquetType.asGroupType(), t.keyType, t.valueType, caseSensitive)
case t: StructType =>
clipParquetGroup(parquetType.asGroupType(), t, caseSensitive)
case _ =>
// UDTs and primitive types are not clipped. For UDTs, a clipped version might not be able
// to be mapped to desired user-space types. So UDTs shouldn't participate schema merging.
parquetType
}
}
/**
* Whether a Catalyst [[DataType]] is primitive. Primitive [[DataType]] is not equivalent to
* [[AtomicType]]. For example, [[CalendarIntervalType]] is primitive, but it's not an
* [[AtomicType]].
*/
private def isPrimitiveCatalystType(dataType: DataType): Boolean = {
dataType match {
case _: ArrayType | _: MapType | _: StructType => false
case _ => true
}
}
/**
* Clips a Parquet [[GroupType]] which corresponds to a Catalyst [[ArrayType]]. The element type
* of the [[ArrayType]] should also be a nested type, namely an [[ArrayType]], a [[MapType]], or a
* [[StructType]].
*/
private def clipParquetListType(
parquetList: GroupType, elementType: DataType, caseSensitive: Boolean): Type = {
// Precondition of this method, should only be called for lists with nested element types.
assert(!isPrimitiveCatalystType(elementType))
// Unannotated repeated group should be interpreted as required list of required element, so
// list element type is just the group itself. Clip it.
if (parquetList.getOriginalType == null && parquetList.isRepetition(Repetition.REPEATED)) {
clipParquetType(parquetList, elementType, caseSensitive)
} else {
assert(
parquetList.getOriginalType == OriginalType.LIST,
"Invalid Parquet schema. " +
"Original type of annotated Parquet lists must be LIST: " +
parquetList.toString)
assert(
parquetList.getFieldCount == 1 && parquetList.getType(0).isRepetition(Repetition.REPEATED),
"Invalid Parquet schema. " +
"LIST-annotated group should only have exactly one repeated field: " +
parquetList)
// Precondition of this method, should only be called for lists with nested element types.
assert(!parquetList.getType(0).isPrimitive)
val repeatedGroup = parquetList.getType(0).asGroupType()
// If the repeated field is a group with multiple fields, or the repeated field is a group
// with one field and is named either "array" or uses the LIST-annotated group's name with
// "_tuple" appended then the repeated type is the element type and elements are required.
// Build a new LIST-annotated group with clipped `repeatedGroup` as element type and the
// only field.
if (
repeatedGroup.getFieldCount > 1 ||
repeatedGroup.getName == "array" ||
repeatedGroup.getName == parquetList.getName + "_tuple"
) {
Types
.buildGroup(parquetList.getRepetition)
.as(OriginalType.LIST)
.addField(clipParquetType(repeatedGroup, elementType, caseSensitive))
.named(parquetList.getName)
} else {
// Otherwise, the repeated field's type is the element type with the repeated field's
// repetition.
Types
.buildGroup(parquetList.getRepetition)
.as(OriginalType.LIST)
.addField(
Types
.repeatedGroup()
.addField(clipParquetType(repeatedGroup.getType(0), elementType, caseSensitive))
.named(repeatedGroup.getName))
.named(parquetList.getName)
}
}
}
/**
* Clips a Parquet [[GroupType]] which corresponds to a Catalyst [[MapType]]. Either key type or
* value type of the [[MapType]] must be a nested type, namely an [[ArrayType]], a [[MapType]], or
* a [[StructType]].
*/
private def clipParquetMapType(
parquetMap: GroupType,
keyType: DataType,
valueType: DataType,
caseSensitive: Boolean): GroupType = {
// Precondition of this method, only handles maps with nested key types or value types.
assert(!isPrimitiveCatalystType(keyType) || !isPrimitiveCatalystType(valueType))
val repeatedGroup = parquetMap.getType(0).asGroupType()
val parquetKeyType = repeatedGroup.getType(0)
val parquetValueType = repeatedGroup.getType(1)
val clippedRepeatedGroup =
Types
.repeatedGroup()
.as(repeatedGroup.getOriginalType)
.addField(clipParquetType(parquetKeyType, keyType, caseSensitive))
.addField(clipParquetType(parquetValueType, valueType, caseSensitive))
.named(repeatedGroup.getName)
Types
.buildGroup(parquetMap.getRepetition)
.as(parquetMap.getOriginalType)
.addField(clippedRepeatedGroup)
.named(parquetMap.getName)
}
/**
* Clips a Parquet [[GroupType]] which corresponds to a Catalyst [[StructType]].
*
* @return A clipped [[GroupType]], which has at least one field.
* @note Parquet doesn't allow creating empty [[GroupType]] instances except for empty
* [[MessageType]]. Because it's legal to construct an empty requested schema for column
* pruning.
*/
private def clipParquetGroup(
parquetRecord: GroupType, structType: StructType, caseSensitive: Boolean): GroupType = {
val clippedParquetFields = clipParquetGroupFields(parquetRecord, structType, caseSensitive)
Types
.buildGroup(parquetRecord.getRepetition)
.as(parquetRecord.getOriginalType)
.addFields(clippedParquetFields: _*)
.named(parquetRecord.getName)
}
/**
* Clips a Parquet [[GroupType]] which corresponds to a Catalyst [[StructType]].
*
* @return A list of clipped [[GroupType]] fields, which can be empty.
*/
private def clipParquetGroupFields(
parquetRecord: GroupType, structType: StructType, caseSensitive: Boolean): Seq[Type] = {
val toParquet = new SparkToParquetSchemaConverter(writeLegacyParquetFormat = false)
if (caseSensitive) {
val caseSensitiveParquetFieldMap =
parquetRecord.getFields.asScala.map(f => f.getName -> f).toMap
structType.map { f =>
caseSensitiveParquetFieldMap
.get(f.name)
.map(clipParquetType(_, f.dataType, caseSensitive))
.getOrElse(toParquet.convertField(f))
}
} else {
// Do case-insensitive resolution only if in case-insensitive mode
val caseInsensitiveParquetFieldMap =
parquetRecord.getFields.asScala.groupBy(_.getName.toLowerCase(Locale.ROOT))
structType.map { f =>
caseInsensitiveParquetFieldMap
.get(f.name.toLowerCase(Locale.ROOT))
.map { parquetTypes =>
if (parquetTypes.size > 1) {
// Need to fail if there is ambiguity, i.e. more than one field is matched
val parquetTypesString = parquetTypes.map(_.getName).mkString("[", ", ", "]")
throw new RuntimeException(s"""Found duplicate field(s) "${f.name}": """ +
s"$parquetTypesString in case-insensitive mode")
} else {
clipParquetType(parquetTypes.head, f.dataType, caseSensitive)
}
}.getOrElse(toParquet.convertField(f))
}
}
}
/**
* Computes the structural intersection between two Parquet group types.
* This is used to create a requestedSchema for ReadContext of Parquet-MR reader.
* Parquet-MR reader does not support the nested field access to non-existent field
* while parquet library does support to read the non-existent field by regular field access.
*/
private def intersectParquetGroups(
groupType1: GroupType, groupType2: GroupType): Option[GroupType] = {
val fields =
groupType1.getFields.asScala
.filter(field => groupType2.containsField(field.getName))
.flatMap {
case field1: GroupType =>
val field2 = groupType2.getType(field1.getName)
if (field2.isPrimitive) {
None
} else {
intersectParquetGroups(field1, field2.asGroupType)
}
case field1 => Some(field1)
}
if (fields.nonEmpty) {
Some(groupType1.withNewFields(fields.asJava))
} else {
None
}
}
def expandUDT(schema: StructType): StructType = {
def expand(dataType: DataType): DataType = {
dataType match {
case t: ArrayType =>
t.copy(elementType = expand(t.elementType))
case t: MapType =>
t.copy(
keyType = expand(t.keyType),
valueType = expand(t.valueType))
case t: StructType =>
val expandedFields = t.fields.map(f => f.copy(dataType = expand(f.dataType)))
t.copy(fields = expandedFields)
case t: UserDefinedType[_] =>
t.sqlType
case t =>
t
}
}
expand(schema).asInstanceOf[StructType]
}
}
| icexelloss/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadSupport.scala | Scala | apache-2.0 | 16,632 |
package reftree.render
import org.scalajs.dom.raw.DOMParser
import reftree.diagram.{Animation, Diagram}
import reftree.dot.Graph
import reftree.graph.Graphs
import org.scalajs.dom
import reftree.svg.{OptimizedGraphAnimation, DomSvgApi}
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.FiniteDuration
import scala.scalajs.js
import scala.scalajs.js.annotation.JSGlobalScope
/**
* An interface to https://github.com/mdaines/viz.js/
*/
@js.native
@JSGlobalScope
object VizFacade extends js.Object {
def Viz(source: String): String = js.native
}
/**
* This class provides functionality for rendering diagrams and animations
*
* It can be used in two ways:
* - conventionally, via the `render` methods;
* - with special syntax sugar, allowing `render` calls on the diagrams/animations themselves.
*
* Usage examples:
* {{{
* import reftree.diagram.Diagram
* import org.scalajs.dom
*
* val renderer = Renderer(
* renderingOptions = RenderingOptions(density = 75)
* )
*
* // Conventional usage
* renderer
* .tweakRendering(_.withVerticalSpacing(2))
* .render(dom.document.body, Diagram(List(1)))
*
* // Sweet sugar, recommended
* import renderer._
* Diagram(List(1))
* .render(dom.document.body, _.withVerticalSpacing(2))
* }}}
*/
case class Renderer(
renderingOptions: RenderingOptions = RenderingOptions(),
animationOptions: AnimationOptions = AnimationOptions()
) { self ⇒
/** Tweak the rendering options with the provided funciton */
def tweakRendering(tweak: RenderingOptions ⇒ RenderingOptions) =
copy(renderingOptions = tweak(renderingOptions))
/** Tweak the animation options with the provided funciton */
def tweakAnimation(tweak: AnimationOptions ⇒ AnimationOptions) =
copy(animationOptions = tweak(animationOptions))
private def renderSvg(graph: Graph) = (new DOMParser)
.parseFromString(VizFacade.Viz(graph.encode), "image/svg+xml")
.documentElement
private def renderTo(target: dom.Node, content: dom.Node) = {
content.asInstanceOf[dom.Element].setAttribute("width", "100%")
content.asInstanceOf[dom.Element].setAttribute("height", "100%")
val newTarget = target.cloneNode(deep = false)
newTarget.appendChild(content)
target.parentNode.replaceChild(newTarget, target)
newTarget
}
private val animation = OptimizedGraphAnimation(DomSvgApi)
/** Render a diagram to a given DOM node */
def render(target: dom.Node, diagram: Diagram): Unit = {
scribe.trace(s"Rendering diagram to $target")
val graph = Graphs.graph(renderingOptions)(diagram)
scribe.trace("Processing graphs with Viz.js...")
val svg = renderSvg(graph)
scribe.trace("Rendering...")
renderTo(target, svg)
}
/** Render an animation to a given DOM node */
def render(target: dom.Node, animation: Animation): Unit = {
scribe.trace(s"Rendering animation to $target")
val graphs = Graphs.graphs(renderingOptions, animationOptions.onionSkinLayers)(animation)
scribe.trace("Processing graphs with Viz.js...")
val svgs = graphs.map(renderSvg)
scribe.trace("Preprocessing frames...")
val frames = this.animation.animate(
animationOptions.keyFrames, animationOptions.interpolationFrames
)(svgs)
scribe.trace("Starting the animation...")
var i = 0
var currentTarget = target
def iteration(): Unit = {
try {
// we catch the IOOB exception rather than checking the bounds to avoid forcing the stream
val currentFrame = frames(i)
i += 1
js.timers.setTimeout(FiniteDuration(animationOptions.delay.multipliedBy(currentFrame.repeat).toNanos, TimeUnit.NANOSECONDS))(iteration())
currentTarget = renderTo(currentTarget, currentFrame.frame)
if (currentFrame.repeat > 1) {
// preprocess a few frames if we are going to be waiting
js.timers.setTimeout(FiniteDuration(animationOptions.delay.toNanos, TimeUnit.NANOSECONDS)) {
frames.take(i + currentFrame.repeat + 1).force
}
}
} catch {
case _: IndexOutOfBoundsException ⇒
if (animationOptions.loop) {
i = 0
js.timers.setTimeout(FiniteDuration(animationOptions.delay.toNanos, TimeUnit.NANOSECONDS))(iteration())
}
}
}
iteration()
}
/** Syntactic sugar for diagrams */
implicit class DiagramRenderSyntax(diagram: Diagram) {
def render(
target: dom.Node,
tweak: RenderingOptions ⇒ RenderingOptions = identity
): Unit = self
.tweakRendering(tweak)
.render(target, diagram)
}
/** Syntactic sugar for animations */
implicit class AnimationRenderSyntax(animation: Animation) {
def render(
target: dom.Node,
tweakRendering: RenderingOptions ⇒ RenderingOptions = identity,
tweakAnimation: AnimationOptions ⇒ AnimationOptions = identity
): Unit = self
.tweakRendering(tweakRendering)
.tweakAnimation(tweakAnimation)
.render(target, animation)
}
} | stanch/reftree | core/js/src/main/scala/reftree/render/Renderer.scala | Scala | gpl-3.0 | 5,059 |
package com.wixpress.common.petri
import java.util
import java.util.UUID
import javax.annotation.Resource
import javax.servlet.http.{Cookie, HttpServletRequest, HttpServletResponse}
import com.fasterxml.jackson.annotation.JsonProperty
import com.wixpress.common.petri.PageWithButtonController.labUserIdField
import com.wixpress.petri.amplitude.AmplitudeAdapter
import com.wixpress.petri.google_analytics.GoogleAnalyticsAdapter
import com.wixpress.petri.laboratory.converters.StringConverter
import com.wixpress.petri.laboratory.http.LaboratoryFilter.PETRI_USER_INFO_STORAGE
import com.wixpress.petri.laboratory.{BaseBiEvent, Laboratory, RegisteredUserInfoType, UserInfo, UserInfoStorage}
import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.{RequestMapping, RequestMethod, ResponseBody}
@Controller
class PageWithButtonController {
@Resource
var amplitudeAdapter: AmplitudeAdapter = _
@Resource
var googleAnalyticsAdapter: GoogleAnalyticsAdapter = _
@Resource
var laboratory: Laboratory = _
@RequestMapping(value = Array("/testAmplitude"), method = Array(RequestMethod.GET), produces = Array("text/html"))
@ResponseBody
def testAmplitude(request: HttpServletRequest, response: HttpServletResponse) = {
val amplitudeUrl = "https://amplitude.com/app/151746/funnels?fid=20206&sset=%7B%22byProp%22:%22a%22,%22segmentIndex%22:0%7D&sset=%7B%22byProp%22:%22b%22,%22segmentIndex%22:0%7D&cg=User&range=Last%207%20Days&i=1&dets=0"
val amplitudeUser = "nimrodl@wix.com"
val amplitudePassword = "GNK5OdwkZzh5Qw7f9qPB"
test(request, response, amplitudeUrl, amplitudeUser, amplitudePassword, "amplitude")
}
@RequestMapping(value = Array("/testGoogleAnalytics"), method = Array(RequestMethod.GET), produces = Array("text/html"))
@ResponseBody
def testGoogleAnalytics(request: HttpServletRequest, response: HttpServletResponse) = {
val googleAnalyticsUrl = "https://analytics.google.com/analytics/web/?authuser=0#realtime/rt-event/a89204848w132385051p136346283/"
val googleAnalyticsUser = "wix.petri.os@gmail.com"
val googleAnalyticsPassword = "pRDm1E18Mr"
test(request, response, googleAnalyticsUrl, googleAnalyticsUser, googleAnalyticsPassword, "google")
}
private def test(request: HttpServletRequest, response: HttpServletResponse, url: String, user: String, password: String, biType: String): String = {
val theUserId = getOrPutUserId(request, response)
val userInfo = request.getSession.getAttribute(PETRI_USER_INFO_STORAGE).asInstanceOf[UserInfoStorage]
userInfo.write(copyUserWithNewId(UUID.fromString(theUserId), userInfo.read()))
renderedPageForRegisteredUser(theUserId, url, user, password, biType)
}
def copyUserWithNewId(userId: UUID, u: UserInfo) =
new UserInfo(u.experimentsLog, userId, u.clientId, u.ip, u.url, u.userAgent, RegisteredUserInfoType(userId),
u.language, u.country, u.dateCreated, u.companyEmployee, u.anonymousExperimentsLog, u.isRecurringUser,
u.experimentOverrides, u.isRobot, u.host, new util.HashMap[UUID, String](),
u.potentialOtherUserExperimentsLogFromCookies, u.registeredUserExists, u.globalSessionId)
private def renderedPageForRegisteredUser(userId: String, url: String, user: String, password: String, biType: String) = {
val color = colorFromExperiment()
s"""
|<html>
|<head>
|<script src="https://code.jquery.com/jquery-1.9.1.js"></script>
|</head>
|<body>
|<div>
|Welcome user $userId!
|</div>
|<div>
| <input type="button" name="buttonId" id="buttonId" value="${if (color == "red") "Don't " else ""}click here!" style="color: $color"/>
|</div>
|<div id="resultText">
|</div>
|<div id="userDetails">
|</div>
|</body>
|<script>
|$$(document).ready(function() {
| $$("#buttonId").click(function() {
| $$("#buttonId").hide();
| $$("#resultText").html("working on it... wait!");
| $$.post( "/${biType}ButtonClicked", function(res) {
| $$("#resultText").html('Finished! checkout results <a href="$url">here!</a>');
| $$("#userDetails").html('to login: User: $user , Password: $password');
| }).fail(function(error) {
| $$("#resultText").html('Sorry! timeout contacting amplitude service. checkout results <a href="$url">here!</a>');
| $$("#userDetails").html('to login: User: $user , Password: $password');
| })
| });
|});
|</script>
|</html>
""".stripMargin
}
def colorFromExperiment() =
laboratory.conductExperiment("BUTTON_COLOR_SPEC", "yellow", new StringConverter)
@RequestMapping(value = Array("/amplitudeButtonClicked"), method = Array(RequestMethod.POST))
@ResponseBody
def amplitudeButtonClicked(request: HttpServletRequest, response: HttpServletResponse): Unit = {
getUserId(request).foreach { userId =>
amplitudeAdapter.sendEvent(AmplitudeBiEvent(ButtonClickedEvent.eventType, "1.1.1.1", "en", "us", userId))
}
}
@RequestMapping(value = Array("/googleButtonClicked"), method = Array(RequestMethod.POST))
@ResponseBody
def googleButtonClicked(request: HttpServletRequest, response: HttpServletResponse): Unit = {
getUserId(request).foreach { userId =>
googleAnalyticsAdapter.sendEvent(GoogleAnalyticsBiEvent(ButtonClickedEvent.eventType, "1.1.1.1", "en", "us", userId, "ButtonClicked"))
}
}
private def getOrPutUserId(request: HttpServletRequest, response: HttpServletResponse): String = {
val userId = getUserId(request).getOrElse(UUID.randomUUID().toString)
response.addCookie(new Cookie(labUserIdField, userId))
userId
}
private def getUserId(request: HttpServletRequest) = {
val cookies: Array[Cookie] = request.getCookies
val cookie = if (cookies == null) None else cookies.toSet.find(_.getName == labUserIdField).map(_.getValue)
Option(request.getParameter(labUserIdField)).orElse(cookie)
}
}
object ButtonClickedEvent {
val eventType = "ButtonClickedEvent"
}
object PageWithButtonController {
val labUserIdField = "laboratory_user_id"
}
case class AmplitudeBiEvent(@JsonProperty("event_type") eventType: String,
ip: String, language: String, country: String,
@JsonProperty("user_id") userId: String) extends BaseBiEvent
//Note - BE CAREFUL WHEN RENAMING! (GoogleAnalyticsAdapter relies on some of these names)
case class GoogleAnalyticsBiEvent(eventType: String,
ip: String, language: String, country: String,
userId: String,
ec: String) extends BaseBiEvent
| wix/petri | petri-bi-integration-testapp/src/main/scala/com/wixpress/common/petri/PageWithButtonController.scala | Scala | bsd-3-clause | 6,785 |
/* Title: Pure/General/file.scala
Author: Makarius
File-system operations.
*/
package isabelle
import java.io.{BufferedWriter, OutputStreamWriter, FileOutputStream, BufferedOutputStream,
OutputStream, InputStream, FileInputStream, BufferedInputStream, BufferedReader,
InputStreamReader, File => JFile, IOException}
import java.nio.file.{StandardOpenOption, StandardCopyOption, Path => JPath,
Files, SimpleFileVisitor, FileVisitResult}
import java.nio.file.attribute.BasicFileAttributes
import java.net.{URL, MalformedURLException}
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import java.util.regex.Pattern
import org.tukaani.xz.{XZInputStream, XZOutputStream}
import scala.collection.mutable
import scala.util.matching.Regex
object File
{
/* standard path (Cygwin or Posix) */
def standard_path(path: Path): String = path.expand.implode
def standard_path(platform_path: String): String =
if (Platform.is_windows) {
val Platform_Root = new Regex("(?i)" +
Pattern.quote(Isabelle_System.cygwin_root()) + """(?:\\\\+|\\z)(.*)""")
val Drive = new Regex("""([a-zA-Z]):\\\\*(.*)""")
platform_path.replace('/', '\\\\') match {
case Platform_Root(rest) => "/" + rest.replace('\\\\', '/')
case Drive(letter, rest) =>
"/cygdrive/" + Word.lowercase(letter) +
(if (rest == "") "" else "/" + rest.replace('\\\\', '/'))
case path => path.replace('\\\\', '/')
}
}
else platform_path
def standard_path(file: JFile): String = standard_path(file.getPath)
def standard_url(name: String): String =
try {
val url = new URL(name)
if (url.getProtocol == "file" && Url.is_wellformed_file(name))
standard_path(Url.parse_file(name))
else name
}
catch { case _: MalformedURLException => standard_path(name) }
/* platform path (Windows or Posix) */
private val Cygdrive = new Regex("/cygdrive/([a-zA-Z])($|/.*)")
private val Named_Root = new Regex("//+([^/]*)(.*)")
def platform_path(standard_path: String): String =
if (Platform.is_windows) {
val result_path = new StringBuilder
val rest =
standard_path match {
case Cygdrive(drive, rest) =>
result_path ++= (Word.uppercase(drive) + ":" + JFile.separator)
rest
case Named_Root(root, rest) =>
result_path ++= JFile.separator
result_path ++= JFile.separator
result_path ++= root
rest
case path if path.startsWith("/") =>
result_path ++= Isabelle_System.cygwin_root()
path
case path => path
}
for (p <- space_explode('/', rest) if p != "") {
val len = result_path.length
if (len > 0 && result_path(len - 1) != JFile.separatorChar)
result_path += JFile.separatorChar
result_path ++= p
}
result_path.toString
}
else standard_path
def platform_path(path: Path): String = platform_path(standard_path(path))
def platform_file(path: Path): JFile = new JFile(platform_path(path))
/* platform files */
def absolute(file: JFile): JFile = file.toPath.toAbsolutePath.normalize.toFile
def absolute_name(file: JFile): String = absolute(file).getPath
def canonical(file: JFile): JFile = file.getCanonicalFile
def canonical_name(file: JFile): String = canonical(file).getPath
def path(file: JFile): Path = Path.explode(standard_path(file))
def pwd(): Path = path(Path.current.absolute_file)
/* bash path */
def bash_path(path: Path): String = Bash.string(standard_path(path))
def bash_path(file: JFile): String = Bash.string(standard_path(file))
/* directory entries */
def check_dir(path: Path): Path =
if (path.is_dir) path else error("No such directory: " + path)
def check_file(path: Path): Path =
if (path.is_file) path else error("No such file: " + path)
/* directory content */
def read_dir(dir: Path): List[String] =
{
if (!dir.is_dir) error("Bad directory: " + dir.toString)
val files = dir.file.listFiles
if (files == null) Nil
else files.toList.map(_.getName)
}
def find_files(
start: JFile,
pred: JFile => Boolean = _ => true,
include_dirs: Boolean = false): List[JFile] =
{
val result = new mutable.ListBuffer[JFile]
def check(file: JFile) { if (pred(file)) result += file }
if (start.isFile) check(start)
else if (start.isDirectory) {
Files.walkFileTree(start.toPath,
new SimpleFileVisitor[JPath] {
override def preVisitDirectory(path: JPath, attrs: BasicFileAttributes): FileVisitResult =
{
if (include_dirs) check(path.toFile)
FileVisitResult.CONTINUE
}
override def visitFile(path: JPath, attrs: BasicFileAttributes): FileVisitResult =
{
check(path.toFile)
FileVisitResult.CONTINUE
}
}
)
}
result.toList
}
/* read */
def read(file: JFile): String = Bytes.read(file).text
def read(path: Path): String = read(path.file)
def read_stream(reader: BufferedReader): String =
{
val output = new StringBuilder(100)
var c = -1
while ({ c = reader.read; c != -1 }) output += c.toChar
reader.close
output.toString
}
def read_stream(stream: InputStream): String =
read_stream(new BufferedReader(new InputStreamReader(stream, UTF8.charset)))
def read_gzip(file: JFile): String =
read_stream(new GZIPInputStream(new BufferedInputStream(new FileInputStream(file))))
def read_gzip(path: Path): String = read_gzip(path.file)
def read_xz(file: JFile): String =
read_stream(new XZInputStream(new BufferedInputStream(new FileInputStream(file))))
def read_xz(path: Path): String = read_xz(path.file)
/* read lines */
def read_lines(reader: BufferedReader, progress: String => Unit): List[String] =
{
val result = new mutable.ListBuffer[String]
var line: String = null
while ({ line = try { reader.readLine} catch { case _: IOException => null }; line != null }) {
progress(line)
result += line
}
reader.close
result.toList
}
/* write */
def write_file(file: JFile, text: CharSequence, make_stream: OutputStream => OutputStream)
{
val stream = make_stream(new FileOutputStream(file))
val writer = new BufferedWriter(new OutputStreamWriter(stream, UTF8.charset))
try { writer.append(text) } finally { writer.close }
}
def write(file: JFile, text: CharSequence): Unit = write_file(file, text, s => s)
def write(path: Path, text: CharSequence): Unit = write(path.file, text)
def write_gzip(file: JFile, text: CharSequence): Unit =
write_file(file, text, (s: OutputStream) => new GZIPOutputStream(new BufferedOutputStream(s)))
def write_gzip(path: Path, text: CharSequence): Unit = write_gzip(path.file, text)
def write_xz(file: JFile, text: CharSequence, options: XZ.Options): Unit =
File.write_file(file, text, s => new XZOutputStream(new BufferedOutputStream(s), options))
def write_xz(file: JFile, text: CharSequence): Unit = write_xz(file, text, XZ.options())
def write_xz(path: Path, text: CharSequence, options: XZ.Options): Unit =
write_xz(path.file, text, options)
def write_xz(path: Path, text: CharSequence): Unit = write_xz(path, text, XZ.options())
def write_backup(path: Path, text: CharSequence)
{
if (path.is_file) move(path, path.backup)
write(path, text)
}
def write_backup2(path: Path, text: CharSequence)
{
if (path.is_file) move(path, path.backup2)
write(path, text)
}
/* append */
def append(file: JFile, text: CharSequence): Unit =
Files.write(file.toPath, UTF8.bytes(text.toString),
StandardOpenOption.APPEND, StandardOpenOption.CREATE)
def append(path: Path, text: CharSequence): Unit = append(path.file, text)
/* eq */
def eq(file1: JFile, file2: JFile): Boolean =
try { java.nio.file.Files.isSameFile(file1.toPath, file2.toPath) }
catch { case ERROR(_) => false }
def eq(path1: Path, path2: Path): Boolean = eq(path1.file, path2.file)
/* eq_content */
def eq_content(file1: JFile, file2: JFile): Boolean =
if (eq(file1, file2)) true
else if (file1.length != file2.length) false
else Bytes.read(file1) == Bytes.read(file2)
def eq_content(path1: Path, path2: Path): Boolean = eq_content(path1.file, path2.file)
/* copy */
def copy(src: JFile, dst: JFile)
{
val target = if (dst.isDirectory) new JFile(dst, src.getName) else dst
if (!eq(src, target))
Files.copy(src.toPath, target.toPath,
StandardCopyOption.COPY_ATTRIBUTES,
StandardCopyOption.REPLACE_EXISTING)
}
def copy(path1: Path, path2: Path): Unit = copy(path1.file, path2.file)
/* move */
def move(src: JFile, dst: JFile)
{
val target = if (dst.isDirectory) new JFile(dst, src.getName) else dst
if (!eq(src, target))
Files.move(src.toPath, target.toPath, StandardCopyOption.REPLACE_EXISTING)
}
def move(path1: Path, path2: Path): Unit = move(path1.file, path2.file)
}
| larsrh/libisabelle | modules/pide/2017/src/main/scala/General/file.scala | Scala | apache-2.0 | 9,139 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.utils
import java.io.FileNotFoundException
import java.net.URL
import org.scalatest.{BeforeAndAfter, Matchers, FunSpec}
import scala.io.Source
import scala.tools.nsc.io.File
class DownloadSupportSpec extends FunSpec with Matchers with BeforeAndAfter {
val downloadDestinationUrl = new URL("file:///tmp/testfile2.ext")
val testFileContent = "This is a test"
val testFileName = "/tmp/testfile.txt"
// Create a test file for downloading
before {
File(testFileName).writeAll(testFileContent)
}
// Cleanup what we made
after {
File(testFileName).deleteIfExists()
File(downloadDestinationUrl.getPath).deleteIfExists()
}
describe("DownloadSupport"){
describe("#downloadFile( String, String )"){
it("should download a file to the download directory"){
val testFileUrl = "file:///tmp/testfile.txt"
// Create our utility and download the file
val downloader = new Object with DownloadSupport
downloader.downloadFile(
testFileUrl,
downloadDestinationUrl.getProtocol + "://" +
downloadDestinationUrl.getPath)
// Verify the file contents are what was in the original file
val downloadedFileContent: String =
Source.fromFile(downloadDestinationUrl.getPath).mkString
downloadedFileContent should be (testFileContent)
}
}
describe("#downloadFile( URL, URL )"){
it("should download a file to the download directory"){
val testFileUrl = new URL("file:///tmp/testfile.txt")
val downloader = new Object with DownloadSupport
downloader.downloadFile(testFileUrl, downloadDestinationUrl)
// Verify the file contents are what was in the original file
val downloadedFileContent: String =
Source.fromFile(downloadDestinationUrl.getPath).mkString
downloadedFileContent should be (testFileContent)
}
it("should throw FileNotFoundException if the download URL is bad"){
val badFilename = "file:///tmp/testbadfile.txt"
File(badFilename).deleteIfExists()
val badFileUrl = new URL(badFilename)
val downloader = new Object with DownloadSupport
intercept[FileNotFoundException] {
downloader.downloadFile(badFileUrl, downloadDestinationUrl)
}
}
it("should throw FileNotFoundException if the download ") {
val testFileUrl = new URL("file:///tmp/testfile.txt")
val badDestinationUrl =
new URL("file:///tmp/badloc/that/doesnt/exist.txt")
val downloader = new Object with DownloadSupport
intercept[FileNotFoundException] {
downloader.downloadFile(testFileUrl, badDestinationUrl)
}
}
}
}
}
| kapil-malik/incubator-toree | kernel-api/src/test/scala/org/apache/toree/utils/DownloadSupportSpec.scala | Scala | apache-2.0 | 3,586 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.zk
import java.nio.charset.StandardCharsets.UTF_8
import java.util.Properties
import com.fasterxml.jackson.annotation.JsonProperty
import com.fasterxml.jackson.core.JsonProcessingException
import kafka.api.{ApiVersion, KAFKA_0_10_0_IV1, LeaderAndIsr}
import kafka.cluster.{Broker, EndPoint}
import kafka.common.{NotificationHandler, ZkNodeChangeNotificationListener}
import kafka.controller.{IsrChangeNotificationHandler, LeaderIsrAndControllerEpoch}
import kafka.security.auth.Resource.Separator
import kafka.security.auth.SimpleAclAuthorizer.VersionedAcls
import kafka.security.auth.{Acl, Resource, ResourceType}
import kafka.server.{ConfigType, DelegationTokenManager}
import kafka.utils.Json
import org.apache.kafka.common.{KafkaException, TopicPartition}
import org.apache.kafka.common.errors.UnsupportedVersionException
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.resource.PatternType
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.security.token.delegation.{DelegationToken, TokenInformation}
import org.apache.kafka.common.utils.Time
import org.apache.zookeeper.ZooDefs
import org.apache.zookeeper.data.{ACL, Stat}
import scala.beans.BeanProperty
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.collection.{Seq, breakOut}
import scala.util.{Failure, Success, Try}
// This file contains objects for encoding/decoding data stored in ZooKeeper nodes (znodes).
object ControllerZNode {
def path = "/controller"
def encode(brokerId: Int, timestamp: Long): Array[Byte] = {
Json.encodeAsBytes(Map("version" -> 1, "brokerid" -> brokerId, "timestamp" -> timestamp.toString).asJava)
}
def decode(bytes: Array[Byte]): Option[Int] = Json.parseBytes(bytes).map { js =>
js.asJsonObject("brokerid").to[Int]
}
}
object ControllerEpochZNode {
def path = "/controller_epoch"
def encode(epoch: Int): Array[Byte] = epoch.toString.getBytes(UTF_8)
def decode(bytes: Array[Byte]): Int = new String(bytes, UTF_8).toInt
}
object ConfigZNode {
def path = "/config"
}
object BrokersZNode {
def path = "/brokers"
}
object BrokerIdsZNode {
def path = s"${BrokersZNode.path}/ids"
def encode: Array[Byte] = null
}
object BrokerInfo {
/**
* Create a broker info with v4 json format (which includes multiple endpoints and rack) if
* the apiVersion is 0.10.0.X or above. Register the broker with v2 json format otherwise.
*
* Due to KAFKA-3100, 0.9.0.0 broker and old clients will break if JSON version is above 2.
*
* We include v2 to make it possible for the broker to migrate from 0.9.0.0 to 0.10.0.X or above without having to
* upgrade to 0.9.0.1 first (clients have to be upgraded to 0.9.0.1 in any case).
*/
def apply(broker: Broker, apiVersion: ApiVersion, jmxPort: Int): BrokerInfo = {
// see method documentation for the reason why we do this
val version = if (apiVersion >= KAFKA_0_10_0_IV1) 4 else 2
BrokerInfo(broker, version, jmxPort)
}
}
case class BrokerInfo(broker: Broker, version: Int, jmxPort: Int) {
val path: String = BrokerIdZNode.path(broker.id)
def toJsonBytes: Array[Byte] = BrokerIdZNode.encode(this)
}
object BrokerIdZNode {
private val HostKey = "host"
private val PortKey = "port"
private val VersionKey = "version"
private val EndpointsKey = "endpoints"
private val RackKey = "rack"
private val JmxPortKey = "jmx_port"
private val ListenerSecurityProtocolMapKey = "listener_security_protocol_map"
private val TimestampKey = "timestamp"
def path(id: Int) = s"${BrokerIdsZNode.path}/$id"
/**
* Encode to JSON bytes.
*
* The JSON format includes a top level host and port for compatibility with older clients.
*/
def encode(version: Int, host: String, port: Int, advertisedEndpoints: Seq[EndPoint], jmxPort: Int,
rack: Option[String]): Array[Byte] = {
val jsonMap = collection.mutable.Map(VersionKey -> version,
HostKey -> host,
PortKey -> port,
EndpointsKey -> advertisedEndpoints.map(_.connectionString).toBuffer.asJava,
JmxPortKey -> jmxPort,
TimestampKey -> Time.SYSTEM.milliseconds().toString
)
rack.foreach(rack => if (version >= 3) jsonMap += (RackKey -> rack))
if (version >= 4) {
jsonMap += (ListenerSecurityProtocolMapKey -> advertisedEndpoints.map { endPoint =>
endPoint.listenerName.value -> endPoint.securityProtocol.name
}.toMap.asJava)
}
Json.encodeAsBytes(jsonMap.asJava)
}
def encode(brokerInfo: BrokerInfo): Array[Byte] = {
val broker = brokerInfo.broker
// the default host and port are here for compatibility with older clients that only support PLAINTEXT
// we choose the first plaintext port, if there is one
// or we register an empty endpoint, which means that older clients will not be able to connect
val plaintextEndpoint = broker.endPoints.find(_.securityProtocol == SecurityProtocol.PLAINTEXT).getOrElse(
new EndPoint(null, -1, null, null))
encode(brokerInfo.version, plaintextEndpoint.host, plaintextEndpoint.port, broker.endPoints, brokerInfo.jmxPort,
broker.rack)
}
/**
* Create a BrokerInfo object from id and JSON bytes.
*
* @param id
* @param jsonBytes
*
* Version 1 JSON schema for a broker is:
* {
* "version":1,
* "host":"localhost",
* "port":9092
* "jmx_port":9999,
* "timestamp":"2233345666"
* }
*
* Version 2 JSON schema for a broker is:
* {
* "version":2,
* "host":"localhost",
* "port":9092,
* "jmx_port":9999,
* "timestamp":"2233345666",
* "endpoints":["PLAINTEXT://host1:9092", "SSL://host1:9093"]
* }
*
* Version 3 JSON schema for a broker is:
* {
* "version":3,
* "host":"localhost",
* "port":9092,
* "jmx_port":9999,
* "timestamp":"2233345666",
* "endpoints":["PLAINTEXT://host1:9092", "SSL://host1:9093"],
* "rack":"dc1"
* }
*
* Version 4 (current) JSON schema for a broker is:
* {
* "version":4,
* "host":"localhost",
* "port":9092,
* "jmx_port":9999,
* "timestamp":"2233345666",
* "endpoints":["CLIENT://host1:9092", "REPLICATION://host1:9093"],
* "listener_security_protocol_map":{"CLIENT":"SSL", "REPLICATION":"PLAINTEXT"},
* "rack":"dc1"
* }
*/
def decode(id: Int, jsonBytes: Array[Byte]): BrokerInfo = {
Json.tryParseBytes(jsonBytes) match {
case Right(js) =>
val brokerInfo = js.asJsonObject
val version = brokerInfo(VersionKey).to[Int]
val jmxPort = brokerInfo(JmxPortKey).to[Int]
val endpoints =
if (version < 1)
throw new KafkaException("Unsupported version of broker registration: " +
s"${new String(jsonBytes, UTF_8)}")
else if (version == 1) {
val host = brokerInfo(HostKey).to[String]
val port = brokerInfo(PortKey).to[Int]
val securityProtocol = SecurityProtocol.PLAINTEXT
val endPoint = new EndPoint(host, port, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol)
Seq(endPoint)
}
else {
val securityProtocolMap = brokerInfo.get(ListenerSecurityProtocolMapKey).map(
_.to[Map[String, String]].map { case (listenerName, securityProtocol) =>
new ListenerName(listenerName) -> SecurityProtocol.forName(securityProtocol)
})
val listeners = brokerInfo(EndpointsKey).to[Seq[String]]
listeners.map(EndPoint.createEndPoint(_, securityProtocolMap))
}
val rack = brokerInfo.get(RackKey).flatMap(_.to[Option[String]])
BrokerInfo(Broker(id, endpoints, rack), version, jmxPort)
case Left(e) =>
throw new KafkaException(s"Failed to parse ZooKeeper registration for broker $id: " +
s"${new String(jsonBytes, UTF_8)}", e)
}
}
}
object TopicsZNode {
def path = s"${BrokersZNode.path}/topics"
}
object TopicZNode {
def path(topic: String) = s"${TopicsZNode.path}/$topic"
def encode(assignment: collection.Map[TopicPartition, Seq[Int]]): Array[Byte] = {
val assignmentJson = assignment.map { case (partition, replicas) =>
partition.partition.toString -> replicas.asJava
}
Json.encodeAsBytes(Map("version" -> 1, "partitions" -> assignmentJson.asJava).asJava)
}
def decode(topic: String, bytes: Array[Byte]): Map[TopicPartition, Seq[Int]] = {
Json.parseBytes(bytes).flatMap { js =>
val assignmentJson = js.asJsonObject
val partitionsJsonOpt = assignmentJson.get("partitions").map(_.asJsonObject)
partitionsJsonOpt.map { partitionsJson =>
partitionsJson.iterator.map { case (partition, replicas) =>
new TopicPartition(topic, partition.toInt) -> replicas.to[Seq[Int]]
}
}
}.map(_.toMap).getOrElse(Map.empty)
}
}
object TopicPartitionsZNode {
def path(topic: String) = s"${TopicZNode.path(topic)}/partitions"
}
object TopicPartitionZNode {
def path(partition: TopicPartition) = s"${TopicPartitionsZNode.path(partition.topic)}/${partition.partition}"
}
object TopicPartitionStateZNode {
def path(partition: TopicPartition) = s"${TopicPartitionZNode.path(partition)}/state"
def encode(leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch): Array[Byte] = {
val leaderAndIsr = leaderIsrAndControllerEpoch.leaderAndIsr
val controllerEpoch = leaderIsrAndControllerEpoch.controllerEpoch
Json.encodeAsBytes(Map("version" -> 1, "leader" -> leaderAndIsr.leader, "leader_epoch" -> leaderAndIsr.leaderEpoch,
"controller_epoch" -> controllerEpoch, "isr" -> leaderAndIsr.isr.asJava).asJava)
}
def decode(bytes: Array[Byte], stat: Stat): Option[LeaderIsrAndControllerEpoch] = {
Json.parseBytes(bytes).map { js =>
val leaderIsrAndEpochInfo = js.asJsonObject
val leader = leaderIsrAndEpochInfo("leader").to[Int]
val epoch = leaderIsrAndEpochInfo("leader_epoch").to[Int]
val isr = leaderIsrAndEpochInfo("isr").to[List[Int]]
val controllerEpoch = leaderIsrAndEpochInfo("controller_epoch").to[Int]
val zkPathVersion = stat.getVersion
LeaderIsrAndControllerEpoch(LeaderAndIsr(leader, epoch, isr, zkPathVersion), controllerEpoch)
}
}
}
object ConfigEntityTypeZNode {
def path(entityType: String) = s"${ConfigZNode.path}/$entityType"
}
object ConfigEntityZNode {
def path(entityType: String, entityName: String) = s"${ConfigEntityTypeZNode.path(entityType)}/$entityName"
def encode(config: Properties): Array[Byte] = {
Json.encodeAsBytes(Map("version" -> 1, "config" -> config).asJava)
}
def decode(bytes: Array[Byte]): Properties = {
val props = new Properties()
if (bytes != null) {
Json.parseBytes(bytes).foreach { js =>
val configOpt = js.asJsonObjectOption.flatMap(_.get("config").flatMap(_.asJsonObjectOption))
configOpt.foreach(config => config.iterator.foreach { case (k, v) => props.setProperty(k, v.to[String]) })
}
}
props
}
}
object ConfigEntityChangeNotificationZNode {
def path = s"${ConfigZNode.path}/changes"
}
object ConfigEntityChangeNotificationSequenceZNode {
val SequenceNumberPrefix = "config_change_"
def createPath = s"${ConfigEntityChangeNotificationZNode.path}/$SequenceNumberPrefix"
def encode(sanitizedEntityPath: String): Array[Byte] = Json.encodeAsBytes(
Map("version" -> 2, "entity_path" -> sanitizedEntityPath).asJava)
}
object IsrChangeNotificationZNode {
def path = "/isr_change_notification"
}
object IsrChangeNotificationSequenceZNode {
val SequenceNumberPrefix = "isr_change_"
def path(sequenceNumber: String = "") = s"${IsrChangeNotificationZNode.path}/$SequenceNumberPrefix$sequenceNumber"
def encode(partitions: collection.Set[TopicPartition]): Array[Byte] = {
val partitionsJson = partitions.map(partition => Map("topic" -> partition.topic, "partition" -> partition.partition).asJava)
Json.encodeAsBytes(Map("version" -> IsrChangeNotificationHandler.Version, "partitions" -> partitionsJson.asJava).asJava)
}
def decode(bytes: Array[Byte]): Set[TopicPartition] = {
Json.parseBytes(bytes).map { js =>
val partitionsJson = js.asJsonObject("partitions").asJsonArray
partitionsJson.iterator.map { partitionsJson =>
val partitionJson = partitionsJson.asJsonObject
val topic = partitionJson("topic").to[String]
val partition = partitionJson("partition").to[Int]
new TopicPartition(topic, partition)
}
}
}.map(_.toSet).getOrElse(Set.empty)
def sequenceNumber(path: String) = path.substring(path.lastIndexOf(SequenceNumberPrefix) + SequenceNumberPrefix.length)
}
object LogDirEventNotificationZNode {
def path = "/log_dir_event_notification"
}
object LogDirEventNotificationSequenceZNode {
val SequenceNumberPrefix = "log_dir_event_"
val LogDirFailureEvent = 1
def path(sequenceNumber: String) = s"${LogDirEventNotificationZNode.path}/$SequenceNumberPrefix$sequenceNumber"
def encode(brokerId: Int) = {
Json.encodeAsBytes(Map("version" -> 1, "broker" -> brokerId, "event" -> LogDirFailureEvent).asJava)
}
def decode(bytes: Array[Byte]): Option[Int] = Json.parseBytes(bytes).map { js =>
js.asJsonObject("broker").to[Int]
}
def sequenceNumber(path: String) = path.substring(path.lastIndexOf(SequenceNumberPrefix) + SequenceNumberPrefix.length)
}
object AdminZNode {
def path = "/admin"
}
object DeleteTopicsZNode {
def path = s"${AdminZNode.path}/delete_topics"
}
object DeleteTopicsTopicZNode {
def path(topic: String) = s"${DeleteTopicsZNode.path}/$topic"
}
object ReassignPartitionsZNode {
/**
* The assignment of brokers for a `TopicPartition`.
*
* A replica assignment consists of a `topic`, `partition` and a list of `replicas`, which
* represent the broker ids that the `TopicPartition` is assigned to.
*/
case class ReplicaAssignment(@BeanProperty @JsonProperty("topic") topic: String,
@BeanProperty @JsonProperty("partition") partition: Int,
@BeanProperty @JsonProperty("replicas") replicas: java.util.List[Int])
/**
* An assignment consists of a `version` and a list of `partitions`, which represent the
* assignment of topic-partitions to brokers.
*/
case class PartitionAssignment(@BeanProperty @JsonProperty("version") version: Int,
@BeanProperty @JsonProperty("partitions") partitions: java.util.List[ReplicaAssignment])
def path = s"${AdminZNode.path}/reassign_partitions"
def encode(reassignmentMap: collection.Map[TopicPartition, Seq[Int]]): Array[Byte] = {
val reassignment = PartitionAssignment(1,
reassignmentMap.toSeq.map { case (tp, replicas) =>
ReplicaAssignment(tp.topic, tp.partition, replicas.asJava)
}.asJava
)
Json.encodeAsBytes(reassignment)
}
def decode(bytes: Array[Byte]): Either[JsonProcessingException, collection.Map[TopicPartition, Seq[Int]]] =
Json.parseBytesAs[PartitionAssignment](bytes).right.map { partitionAssignment =>
partitionAssignment.partitions.asScala.map { replicaAssignment =>
new TopicPartition(replicaAssignment.topic, replicaAssignment.partition) -> replicaAssignment.replicas.asScala
}(breakOut)
}
}
object PreferredReplicaElectionZNode {
def path = s"${AdminZNode.path}/preferred_replica_election"
def encode(partitions: Set[TopicPartition]): Array[Byte] = {
val jsonMap = Map("version" -> 1,
"partitions" -> partitions.map(tp => Map("topic" -> tp.topic, "partition" -> tp.partition).asJava).asJava)
Json.encodeAsBytes(jsonMap.asJava)
}
def decode(bytes: Array[Byte]): Set[TopicPartition] = Json.parseBytes(bytes).map { js =>
val partitionsJson = js.asJsonObject("partitions").asJsonArray
partitionsJson.iterator.map { partitionsJson =>
val partitionJson = partitionsJson.asJsonObject
val topic = partitionJson("topic").to[String]
val partition = partitionJson("partition").to[Int]
new TopicPartition(topic, partition)
}
}.map(_.toSet).getOrElse(Set.empty)
}
object ConsumerOffset {
def path(group: String, topic: String, partition: Integer) = s"/consumers/${group}/offsets/${topic}/${partition}"
def encode(offset: Long): Array[Byte] = offset.toString.getBytes(UTF_8)
def decode(bytes: Array[Byte]): Option[Long] = Option(bytes).map(new String(_, UTF_8).toLong)
}
object ZkVersion {
val NoVersion = -1
}
object ZkStat {
val NoStat = new Stat()
}
object StateChangeHandlers {
val ControllerHandler = "controller-state-change-handler"
def zkNodeChangeListenerHandler(seqNodeRoot: String) = s"change-notification-$seqNodeRoot"
}
/**
* Acls for resources are stored in ZK under two root paths:
* <ul>
* <li>[[org.apache.kafka.common.resource.PatternType#LITERAL Literal]] patterns are stored under '/kafka-acl'.
* The format is JSON. See [[kafka.zk.ResourceZNode]] for details.</li>
* <li>All other patterns are stored under '/kafka-acl-extended/<i>pattern-type</i>'.
* The format is JSON. See [[kafka.zk.ResourceZNode]] for details.</li>
* </ul>
*
* Under each root node there will be one child node per resource type (Topic, Cluster, Group, etc).
* Under each resourceType there will be a unique child for each resource pattern and the data for that child will contain
* list of its acls as a json object. Following gives an example:
*
* <pre>
* // Literal patterns:
* /kafka-acl/Topic/topic-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
* /kafka-acl/Cluster/kafka-cluster => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
*
* // Prefixed patterns:
* /kafka-acl-extended/PREFIXED/Group/group-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]}
* </pre>
*
* Acl change events are also stored under two paths:
* <ul>
* <li>[[org.apache.kafka.common.resource.PatternType#LITERAL Literal]] patterns are stored under '/kafka-acl-changes'.
* The format is a UTF8 string in the form: <resource-type>:<resource-name></li>
* <li>All other patterns are stored under '/kafka-acl-extended-changes'
* The format is JSON, as defined by [[kafka.zk.ExtendedAclChangeEvent]]</li>
* </ul>
*/
sealed trait ZkAclStore {
val patternType: PatternType
val aclPath: String
def path(resourceType: ResourceType): String = s"$aclPath/$resourceType"
def path(resourceType: ResourceType, resourceName: String): String = s"$aclPath/$resourceType/$resourceName"
def changeStore: ZkAclChangeStore
}
object ZkAclStore {
private val storesByType: Map[PatternType, ZkAclStore] = PatternType.values
.filter(patternType => patternType != PatternType.MATCH)
.filter(patternType => patternType != PatternType.ANY)
.filter(patternType => patternType != PatternType.UNKNOWN)
.map(patternType => (patternType, create(patternType)))
.toMap
val stores: Iterable[ZkAclStore] = storesByType.values
val securePaths: Iterable[String] = stores
.flatMap(store => Set(store.aclPath, store.changeStore.aclChangePath))
def apply(patternType: PatternType): ZkAclStore = {
storesByType.get(patternType) match {
case Some(store) => store
case None => throw new KafkaException(s"Invalid pattern type: $patternType")
}
}
private def create(patternType: PatternType) = {
patternType match {
case PatternType.LITERAL => LiteralAclStore
case _ => new ExtendedAclStore(patternType)
}
}
}
object LiteralAclStore extends ZkAclStore {
val patternType: PatternType = PatternType.LITERAL
val aclPath: String = "/kafka-acl"
def changeStore: ZkAclChangeStore = LiteralAclChangeStore
}
class ExtendedAclStore(val patternType: PatternType) extends ZkAclStore {
if (patternType == PatternType.LITERAL)
throw new IllegalArgumentException("Literal pattern types are not supported")
val aclPath: String = s"/kafka-acl-extended/${patternType.name.toLowerCase}"
def changeStore: ZkAclChangeStore = ExtendedAclChangeStore
}
trait AclChangeNotificationHandler {
def processNotification(resource: Resource): Unit
}
trait AclChangeSubscription extends AutoCloseable {
def close(): Unit
}
case class AclChangeNode(path: String, bytes: Array[Byte])
sealed trait ZkAclChangeStore {
val aclChangePath: String
def createPath: String = s"$aclChangePath/${ZkAclChangeStore.SequenceNumberPrefix}"
def decode(bytes: Array[Byte]): Resource
protected def encode(resource: Resource): Array[Byte]
def createChangeNode(resource: Resource): AclChangeNode = AclChangeNode(createPath, encode(resource))
def createListener(handler: AclChangeNotificationHandler, zkClient: KafkaZkClient): AclChangeSubscription = {
val rawHandler: NotificationHandler = new NotificationHandler {
def processNotification(bytes: Array[Byte]): Unit =
handler.processNotification(decode(bytes))
}
val aclChangeListener = new ZkNodeChangeNotificationListener(
zkClient, aclChangePath, ZkAclChangeStore.SequenceNumberPrefix, rawHandler)
aclChangeListener.init()
new AclChangeSubscription {
def close(): Unit = aclChangeListener.close()
}
}
}
object ZkAclChangeStore {
val stores: Iterable[ZkAclChangeStore] = List(LiteralAclChangeStore, ExtendedAclChangeStore)
def SequenceNumberPrefix = "acl_changes_"
}
case object LiteralAclChangeStore extends ZkAclChangeStore {
val name = "LiteralAclChangeStore"
val aclChangePath: String = "/kafka-acl-changes"
def encode(resource: Resource): Array[Byte] = {
if (resource.patternType != PatternType.LITERAL)
throw new IllegalArgumentException("Only literal resource patterns can be encoded")
val legacyName = resource.resourceType + Resource.Separator + resource.name
legacyName.getBytes(UTF_8)
}
def decode(bytes: Array[Byte]): Resource = {
val string = new String(bytes, UTF_8)
string.split(Separator, 2) match {
case Array(resourceType, resourceName, _*) => new Resource(ResourceType.fromString(resourceType), resourceName, PatternType.LITERAL)
case _ => throw new IllegalArgumentException("expected a string in format ResourceType:ResourceName but got " + string)
}
}
}
case object ExtendedAclChangeStore extends ZkAclChangeStore {
val name = "ExtendedAclChangeStore"
val aclChangePath: String = "/kafka-acl-extended-changes"
def encode(resource: Resource): Array[Byte] = {
if (resource.patternType == PatternType.LITERAL)
throw new IllegalArgumentException("Literal pattern types are not supported")
Json.encodeAsBytes(ExtendedAclChangeEvent(
ExtendedAclChangeEvent.currentVersion,
resource.resourceType.name,
resource.name,
resource.patternType.name))
}
def decode(bytes: Array[Byte]): Resource = {
val changeEvent = Json.parseBytesAs[ExtendedAclChangeEvent](bytes) match {
case Right(event) => event
case Left(e) => throw new IllegalArgumentException("Failed to parse ACL change event", e)
}
changeEvent.toResource match {
case Success(r) => r
case Failure(e) => throw new IllegalArgumentException("Failed to convert ACL change event to resource", e)
}
}
}
object ResourceZNode {
def path(resource: Resource): String = ZkAclStore(resource.patternType).path(resource.resourceType, resource.name)
def encode(acls: Set[Acl]): Array[Byte] = Json.encodeAsBytes(Acl.toJsonCompatibleMap(acls).asJava)
def decode(bytes: Array[Byte], stat: Stat): VersionedAcls = VersionedAcls(Acl.fromBytes(bytes), stat.getVersion)
}
object ExtendedAclChangeEvent {
val currentVersion: Int = 1
}
case class ExtendedAclChangeEvent(@BeanProperty @JsonProperty("version") version: Int,
@BeanProperty @JsonProperty("resourceType") resourceType: String,
@BeanProperty @JsonProperty("name") name: String,
@BeanProperty @JsonProperty("patternType") patternType: String) {
if (version > ExtendedAclChangeEvent.currentVersion)
throw new UnsupportedVersionException(s"Acl change event received for unsupported version: $version")
def toResource: Try[Resource] = {
for {
resType <- Try(ResourceType.fromString(resourceType))
patType <- Try(PatternType.fromString(patternType))
resource = Resource(resType, name, patType)
} yield resource
}
}
object ClusterZNode {
def path = "/cluster"
}
object ClusterIdZNode {
def path = s"${ClusterZNode.path}/id"
def toJson(id: String): Array[Byte] = {
Json.encodeAsBytes(Map("version" -> "1", "id" -> id).asJava)
}
def fromJson(clusterIdJson: Array[Byte]): String = {
Json.parseBytes(clusterIdJson).map(_.asJsonObject("id").to[String]).getOrElse {
throw new KafkaException(s"Failed to parse the cluster id json $clusterIdJson")
}
}
}
object BrokerSequenceIdZNode {
def path = s"${BrokersZNode.path}/seqid"
}
object ProducerIdBlockZNode {
def path = "/latest_producer_id_block"
}
object DelegationTokenAuthZNode {
def path = "/delegation_token"
}
object DelegationTokenChangeNotificationZNode {
def path = s"${DelegationTokenAuthZNode.path}/token_changes"
}
object DelegationTokenChangeNotificationSequenceZNode {
val SequenceNumberPrefix = "token_change_"
def createPath = s"${DelegationTokenChangeNotificationZNode.path}/$SequenceNumberPrefix"
def deletePath(sequenceNode: String) = s"${DelegationTokenChangeNotificationZNode.path}/${sequenceNode}"
def encode(tokenId : String): Array[Byte] = tokenId.getBytes(UTF_8)
def decode(bytes: Array[Byte]): String = new String(bytes, UTF_8)
}
object DelegationTokensZNode {
def path = s"${DelegationTokenAuthZNode.path}/tokens"
}
object DelegationTokenInfoZNode {
def path(tokenId: String) = s"${DelegationTokensZNode.path}/$tokenId"
def encode(token: DelegationToken): Array[Byte] = Json.encodeAsBytes(DelegationTokenManager.toJsonCompatibleMap(token).asJava)
def decode(bytes: Array[Byte]): Option[TokenInformation] = DelegationTokenManager.fromBytes(bytes)
}
object ZkData {
// Important: it is necessary to add any new top level Zookeeper path to the Seq
val SecureRootPaths = Seq(AdminZNode.path,
BrokersZNode.path,
ClusterZNode.path,
ConfigZNode.path,
ControllerZNode.path,
ControllerEpochZNode.path,
IsrChangeNotificationZNode.path,
ProducerIdBlockZNode.path,
LogDirEventNotificationZNode.path,
DelegationTokenAuthZNode.path) ++ ZkAclStore.securePaths
// These are persistent ZK paths that should exist on kafka broker startup.
val PersistentZkPaths = Seq(
"/consumers", // old consumer path
BrokerIdsZNode.path,
TopicsZNode.path,
ConfigEntityChangeNotificationZNode.path,
DeleteTopicsZNode.path,
BrokerSequenceIdZNode.path,
IsrChangeNotificationZNode.path,
ProducerIdBlockZNode.path,
LogDirEventNotificationZNode.path
) ++ ConfigType.all.map(ConfigEntityTypeZNode.path)
val SensitiveRootPaths = Seq(
ConfigEntityTypeZNode.path(ConfigType.User),
ConfigEntityTypeZNode.path(ConfigType.Broker),
DelegationTokensZNode.path
)
def sensitivePath(path: String): Boolean = {
path != null && SensitiveRootPaths.exists(path.startsWith)
}
def defaultAcls(isSecure: Boolean, path: String): Seq[ACL] = {
if (isSecure) {
val acls = new ArrayBuffer[ACL]
acls ++= ZooDefs.Ids.CREATOR_ALL_ACL.asScala
if (!sensitivePath(path))
acls ++= ZooDefs.Ids.READ_ACL_UNSAFE.asScala
acls
} else ZooDefs.Ids.OPEN_ACL_UNSAFE.asScala
}
}
| richhaase/kafka | core/src/main/scala/kafka/zk/ZkData.scala | Scala | apache-2.0 | 28,773 |
/**
*
* Copyright (C) 2017 University of Bamberg, Software Technologies Research Group
* <https://www.uni-bamberg.de/>, <http://www.swt-bamberg.de/>
*
* This file is part of the Data Structure Investigator (DSI) project, which received financial support by the
* German Research Foundation (DFG) under grant no. LU 1748/4-1, see
* <http://www.swt-bamberg.de/dsi/>.
*
* DSI is licensed under the GNU GENERAL PUBLIC LICENSE (Version 3), see
* the LICENSE file at the project's top-level directory for details or consult <http://www.gnu.org/licenses/>.
*
* DSI is free software: you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or any later version.
*
* DSI is a RESEARCH PROTOTYPE and distributed WITHOUT ANY
* WARRANTY, without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* The following people contributed to the conception and realization of the present DSI distribution (in
* alphabetic order by surname):
*
* - Jan H. Boockmann
* - Gerald Lüttgen
* - Thomas Rupprecht
* - David H. White
*
*/
/**
* @author DSI
*
* IDsOliEPTWriter.scala created on Jan 28, 2015
*
* Description: Interface for DSI's entry pointer tag writer component
*/
package entrypoint
/**
* @author DSI
*
*/
trait IDsOliEPTWriter {
def writeEPTTrace() : String
} | uniba-swt/DSIsrc | src/entrypoint/IDsOliEPTWriter.scala | Scala | gpl-3.0 | 1,502 |
package au.com.dius.pact.model
import org.specs2.mutable.Specification
import au.com.dius.pact.model.Pact.MergeSuccess
import au.com.dius.pact.model.Pact.MergeConflict
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class PactSpec extends Specification {
"Pact" should {
"Locate Interactions" in {
val description = "descriptiondata"
val state = Some("stateData")
val interaction = Interaction(
description,
state,
Request(HttpMethod.Get,"", None, None, None, None),
Response(200, None, None, None)
)
val pact = Pact(
Provider("foo"),
Consumer("bar"),
Seq(interaction)
)
pact.interactionFor(description, state) must beSome(interaction)
}
"merge" should {
import Fixtures._
"allow different descriptions" in {
val newInteractions = Seq(interaction.copy(description = "different"))
val result = pact merge pact.copy(interactions = newInteractions)
result must beEqualTo(MergeSuccess(Pact(provider, consumer, interactions ++ newInteractions )))
}
"allow different states" in {
val newInteractions = Seq(interaction.copy(providerState = Some("different")))
val result = pact merge pact.copy(interactions = newInteractions)
result must beEqualTo(MergeSuccess(Pact(provider, consumer, interactions ++ newInteractions )))
}
"allow identical interactions without duplication" in {
val result = pact merge pact.copy()
result must beEqualTo(MergeSuccess(pact))
}
"refuse different requests for identical description and states" in {
val newInteractions = Seq(interaction.copy(request = request.copy(path = "different")))
val result = pact merge pact.copy(interactions = newInteractions)
result must beEqualTo(MergeConflict(Seq((interaction, newInteractions.head))))
}
"refuse different responses for identical description and states" in {
val newInteractions = Seq(interaction.copy(response = response.copy(status = 503)))
val result = pact merge pact.copy(interactions = newInteractions)
result must beEqualTo(MergeConflict(Seq((interaction, newInteractions.head))))
}
}
"mimeType" should {
"default to text" in {
val request = Request(HttpMethod.Get,"", None, None, None, None)
request.mimeType must beEqualTo("text/plain")
}
"get the mime type from the headers" in {
val request = Request(HttpMethod.Get,"", None, Some(Map("Content-Type" -> "text/html")), None, None)
request.mimeType must beEqualTo("text/html")
}
"handle charsets in the content type" in {
val request = Request(HttpMethod.Get,"", None, Some(Map("Content-Type" -> "application/json; charset=UTF-8")), None, None)
request.mimeType must beEqualTo("application/json")
}
"use regexp detection when not supplied" should {
"for json" in {
var request = Request(HttpMethod.Get,"", None, None, Some("{\\"json\\": true}"), None)
request.mimeType must beEqualTo("application/json")
request = Request(HttpMethod.Get,"", None, None, Some("{}"), None)
request.mimeType must beEqualTo("application/json")
request = Request(HttpMethod.Get,"", None, None, Some("[]"), None)
request.mimeType must beEqualTo("application/json")
request = Request(HttpMethod.Get,"", None, None, Some("[1,2,3]"), None)
request.mimeType must beEqualTo("application/json")
request = Request(HttpMethod.Get,"", None, None, Some(" \\"string\\" "), None)
request.mimeType must beEqualTo("application/json")
}
"for xml" in {
var request = Request(HttpMethod.Get,"", None, None, Some("<?xml version=\\"1.0\\" encoding=\\"UTF-8\\"?>\\n<json>false</json>"), None)
request.mimeType must beEqualTo("application/xml")
request = Request(HttpMethod.Get,"", None, None, Some("<json>false</json>"), None)
request.mimeType must beEqualTo("application/xml")
}
"for text" in {
val request = Request(HttpMethod.Get,"", None, None, Some("this is not json"), None)
request.mimeType must beEqualTo("text/plain")
}
"for html" in {
val request = Request(HttpMethod.Get,"", None, None, Some("<html><body>this is also not json</body></html>"), None)
request.mimeType must beEqualTo("text/html")
}
}
}
"request" should {
"provide an way to find a header case insensitive" in {
val request = Request(HttpMethod.Get, "", None, Some(Map("Cookie" -> "cookie-value")), None, None)
request.findHeaderByCaseInsensitiveKey("cookie") must beSome("Cookie" -> "cookie-value")
}
}
}
}
| caoquendo/pact-jvm | pact-jvm-model/src/test/scala/au/com/dius/pact/model/PactSpec.scala | Scala | apache-2.0 | 5,008 |
package com.arcusys.learn.liferay.services
import com.liferay.portlet.social.model.SocialActivityCounter
import scala.collection.JavaConverters._
import com.liferay.portal.kernel.dao.orm.RestrictionsFactoryUtil
import com.liferay.portlet.social.service.SocialActivityCounterLocalServiceUtil
/**
* Created by mminin on 29.08.14.
*/
object SocialActivityCounterLocalServiceHelper {
def getUserValue(userId: Long, counterNames: String): Option[Int] = {
SocialActivityCounterLocalServiceUtil.dynamicQuery(SocialActivityCounterLocalServiceUtil.dynamicQuery
.add(RestrictionsFactoryUtil.eq("classPK", userId)) // user Id in classpk column
.add(RestrictionsFactoryUtil.eq("name", counterNames))
)
.asScala
.filter(_.isInstanceOf[SocialActivityCounter])
.map(_.asInstanceOf[SocialActivityCounter].getTotalValue).toSeq match {
case Nil => None
case values: Seq[Int] => Some(values.sum)
}
}
}
| ViLPy/Valamis | learn-liferay620-services/src/main/scala/com/arcusys/learn/liferay/services/SocialActivityCounterLocalServiceHelper.scala | Scala | lgpl-3.0 | 966 |
package com.datastax.spark.connector.rdd
import com.datastax.driver.core.{ResultSet, Session}
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.rdd.reader._
import com.datastax.spark.connector.writer._
import com.google.common.util.concurrent.{FutureCallback, Futures, SettableFuture}
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
/**
* An [[org.apache.spark.rdd.RDD RDD]] that will do a selecting join between `left` RDD and the specified
* Cassandra Table This will perform individual selects to retrieve the rows from Cassandra and will take
* advantage of RDDs that have been partitioned with the
* [[com.datastax.spark.connector.rdd.partitioner.ReplicaPartitioner]]
*
* @tparam L item type on the left side of the join (any RDD)
* @tparam R item type on the right side of the join (fetched from Cassandra)
*/
class CassandraJoinRDD[L, R] private[connector](
override val left: RDD[L],
val keyspaceName: String,
val tableName: String,
val connector: CassandraConnector,
val columnNames: ColumnSelector = AllColumns,
val joinColumns: ColumnSelector = PartitionKeyColumns,
val where: CqlWhereClause = CqlWhereClause.empty,
val limit: Option[Long] = None,
val clusteringOrder: Option[ClusteringOrder] = None,
val readConf: ReadConf = ReadConf(),
manualRowReader: Option[RowReader[R]] = None,
override val manualRowWriter: Option[RowWriter[L]] = None)(
implicit
val leftClassTag: ClassTag[L],
val rightClassTag: ClassTag[R],
@transient val rowWriterFactory: RowWriterFactory[L],
@transient val rowReaderFactory: RowReaderFactory[R])
extends CassandraRDD[(L, R)](left.sparkContext, left.dependencies)
with CassandraTableRowReaderProvider[R]
with AbstractCassandraJoin[L, R] {
override type Self = CassandraJoinRDD[L, R]
override protected val classTag = rightClassTag
override lazy val rowReader: RowReader[R] = manualRowReader match {
case Some(rr) => rr
case None => rowReaderFactory.rowReader(tableDef, columnNames.selectFrom(tableDef))
}
override protected def copy(
columnNames: ColumnSelector = columnNames,
where: CqlWhereClause = where,
limit: Option[Long] = limit,
clusteringOrder: Option[ClusteringOrder] = None,
readConf: ReadConf = readConf,
connector: CassandraConnector = connector
): Self = {
new CassandraJoinRDD[L, R](
left = left,
keyspaceName = keyspaceName,
tableName = tableName,
connector = connector,
columnNames = columnNames,
joinColumns = joinColumns,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf = readConf
)
}
override def cassandraCount(): Long = {
columnNames match {
case SomeColumns(_) =>
logWarning("You are about to count rows but an explicit projection has been specified.")
case _ =>
}
val counts =
new CassandraJoinRDD[L, Long](
left = left,
connector = connector,
keyspaceName = keyspaceName,
tableName = tableName,
columnNames = SomeColumns(RowCountRef),
joinColumns = joinColumns,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf = readConf
)
counts.map(_._2).reduce(_ + _)
}
def on(joinColumns: ColumnSelector): CassandraJoinRDD[L, R] = {
new CassandraJoinRDD[L, R](
left = left,
connector = connector,
keyspaceName = keyspaceName,
tableName = tableName,
columnNames = columnNames,
joinColumns = joinColumns,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf = readConf
)
}
private[rdd] def fetchIterator(
session: Session,
bsb: BoundStatementBuilder[L],
leftIterator: Iterator[L]
): Iterator[(L, R)] = {
val columnNames = selectedColumnRefs.map(_.selectedAs).toIndexedSeq
val rateLimiter = new RateLimiter(
readConf.throughputJoinQueryPerSec, readConf.throughputJoinQueryPerSec
)
def pairWithRight(left: L): SettableFuture[Iterator[(L, R)]] = {
val resultFuture = SettableFuture.create[Iterator[(L, R)]]
val leftSide = Iterator.continually(left)
val queryFuture = session.executeAsync(bsb.bind(left))
Futures.addCallback(queryFuture, new FutureCallback[ResultSet] {
def onSuccess(rs: ResultSet) {
val resultSet = new PrefetchingResultSetIterator(rs, fetchSize)
val columnMetaData = CassandraRowMetadata.fromResultSet(columnNames, rs);
val rightSide = resultSet.map(rowReader.read(_, columnMetaData))
resultFuture.set(leftSide.zip(rightSide))
}
def onFailure(throwable: Throwable) {
resultFuture.setException(throwable)
}
})
resultFuture
}
val queryFutures = leftIterator.map(left => {
rateLimiter.maybeSleep(1)
pairWithRight(left)
}).toList
queryFutures.iterator.flatMap(_.get)
}
/**
* Turns this CassandraJoinRDD into a factory for converting other RDD's after being serialized
* This method is for streaming operations as it allows us to Serialize a template JoinRDD
* and the use that serializable template in the DStream closure. This gives us a fully serializable
* joinWithCassandra operation
*/
private[connector] def applyToRDD(left: RDD[L]): CassandraJoinRDD[L, R] = {
new CassandraJoinRDD[L, R](
left,
keyspaceName,
tableName,
connector,
columnNames,
joinColumns,
where,
limit,
clusteringOrder,
readConf,
Some(rowReader),
Some(rowWriter)
)
}
}
| ponkin/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/rdd/CassandraJoinRDD.scala | Scala | apache-2.0 | 5,756 |
package is.hail.types.physical
import is.hail.annotations._
import is.hail.check.Gen
import is.hail.expr.ir.EmitMethodBuilder
import is.hail.types.virtual.TSet
abstract class PSet extends PContainer {
lazy val virtualType: TSet = TSet(elementType.virtualType)
def arrayFundamentalType: PArray = fundamentalType.asInstanceOf[PArray]
def codeOrdering(mb: EmitMethodBuilder[_], other: PType): CodeOrdering = {
assert(other isOfType this)
CodeOrdering.setOrdering(this, other.asInstanceOf[PSet], mb)
}
override def genNonmissingValue: Gen[Annotation] = Gen.buildableOf[Set](elementType.genValue)
}
| danking/hail | hail/src/main/scala/is/hail/types/physical/PSet.scala | Scala | mit | 617 |
package com.xhachi.gae4s.datastore
object ScalaEnum extends Enumeration {
val ScalaEnum1, ScalaEnum2 = Value
}
| thachi/gae4s | core/src/test/scala/com/xhachi/gae4s/datastore/ScalaEnum.scala | Scala | apache-2.0 | 117 |
package be.objectify.deadbolt.scala.test.controllers
import play.api.test.WithServer
abstract class AbstractRestrictSpec extends AbstractControllerSpec {
"The application" should {
"when the foo AND bar roles are required" >> {
"deny access if no subject is present" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooAndBar").get()).status must equalTo(UNAUTHORIZED)
}
"allow access if the subject has the foo and bar roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooAndBar").addHttpHeaders(("x-deadbolt-test-user", "greet")).get()).status must equalTo(OK)
}
"deny access if the subject has the foo but not the bar roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooAndBar").addHttpHeaders(("x-deadbolt-test-user", "trippel")).get()).status must equalTo(UNAUTHORIZED)
}
"deny access if the subject has the bar but not the foo roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooAndBar").addHttpHeaders(("x-deadbolt-test-user", "steve")).get()).status must equalTo(UNAUTHORIZED)
}
}
"when the foo OR bar roles are required" >> {
"deny access if no subject is present" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrBar").get()).status must equalTo(UNAUTHORIZED)
}
"allow access if the subject has the foo and bar roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrBar").addHttpHeaders(("x-deadbolt-test-user", "greet")).get()).status must equalTo(OK)
}
"allow access if the subject has the foo but not the bar roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrBar").addHttpHeaders(("x-deadbolt-test-user", "trippel")).get()).status must equalTo(OK)
}
"allow access if the subject has the bar but not the foo roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrBar").addHttpHeaders(("x-deadbolt-test-user", "steve")).get()).status must equalTo(OK)
}
"deny access if the subject has neither the bar or foo roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrBar").addHttpHeaders(("x-deadbolt-test-user", "lotte")).get()).status must equalTo(UNAUTHORIZED)
}
}
"when the foo AND NOT the bar roles are required" >> {
"deny access if no subject is present" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooAndNotBar").get()).status must equalTo(UNAUTHORIZED)
}
"deny access if the subject has the foo and bar roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooAndNotBar").addHttpHeaders(("x-deadbolt-test-user", "greet")).get()).status must equalTo(UNAUTHORIZED)
}
"allow access if the subject has the foo but not the bar roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooAndNotBar").addHttpHeaders(("x-deadbolt-test-user", "trippel")).get()).status must equalTo(OK)
}
"deny access if the subject has the bar but not the foo roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooAndNotBar").addHttpHeaders(("x-deadbolt-test-user", "steve")).get()).status must equalTo(UNAUTHORIZED)
}
"deny access if the subject has neither the bar or foo roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooAndNotBar").addHttpHeaders(("x-deadbolt-test-user", "lotte")).get()).status must equalTo(UNAUTHORIZED)
}
}
"when the foo OR NOT the bar roles are required" >> {
"deny access if no subject is present" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrNotBar").get()).status must equalTo(UNAUTHORIZED)
}
"allow access if the subject has the foo and bar roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrNotBar").addHttpHeaders(("x-deadbolt-test-user", "greet")).get()).status must equalTo(OK)
}
"allow access if the subject has the foo but not the bar roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrNotBar").addHttpHeaders(("x-deadbolt-test-user", "trippel")).get()).status must equalTo(OK)
}
"deny access if the subject only has the bar role" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrNotBar").addHttpHeaders(("x-deadbolt-test-user", "steve")).get()).status must equalTo(UNAUTHORIZED)
}
"allow access if the subject has neither the bar or foo roles" in new WithServer(app = testApp, port = 3333) {
await(ws(implicitApp).url(s"http://localhost:3333/$pathSegment/restrict/fooOrNotBar").addHttpHeaders(("x-deadbolt-test-user", "lotte")).get()).status must equalTo(OK)
}
}
}
}
| schaloner/deadbolt-2-scala | test-app/test/be/objectify/deadbolt/scala/test/controllers/AbstractRestrictSpec.scala | Scala | apache-2.0 | 5,917 |
package me.stojan.reunion.structure
/**
* Generic algebraic element.
*/
trait Element[V] {
def value: V
}
/**
* Describes the structure to which an element belongs.
*/
trait Descriptor[V, E <: Element[V]] {
/**
* Obtain a structured element from the internal type.
*/
def obtain(value: V): E
}
/**
* Describes a group.
*/
trait GroupDescriptor[V] extends Descriptor[V, Group[V]] {
/**
* Identity element of the group.
*/
def identity: Group[V]
}
/**
* Describes a ring.
*/
trait RingDescriptor[V] extends Descriptor[V, Ring[V]] {
/**
* Additive identity element.
*/
def zero: Ring[V]
/**
* Multiplicative identity element.
*/
def one: Ring[V]
}
/**
* Describes a field.
*/
trait FieldDescriptor[V] extends Descriptor[V, Field[V]] {
/**
* Additive identity element.
*/
def zero: Field[V]
/**
* Multiplicative identity element.
*/
def one: Field[V]
}
/**
* A generic Abelian Group element.
*/
trait Group[V] extends Element[V] {
/**
* This group's descriptor.
*/
def descriptor: GroupDescriptor[V]
/**
* Whether this is the identity element.
*/
def isIdentity: Boolean
def nonIdentity: Boolean = !isIdentity
/**
* Check whether this group element is greater than (partially ordered above)
* the group element `g`.
*/
def >(g: Group[V]): Boolean
/**
* Check whether this group element is greater than or equal
* (partially ordered above) to the group element `g`.
*/
def >=(g: Group[V]): Boolean = (this > g) || (this == g)
/**
* Check whether this group element is less than (partially ordered below) the
* group element `g`.
*/
def <(g: Group[V]): Boolean
/**
* Check whether this group element is less than or equal (partially ordered
* below) the group element `g`.
*/
def <=(g: Group[V]): Boolean = (this < g) || (this == g)
/**
* Inverse element. (Additive inverse.)
*/
def unary_-(): Group[V]
/**
* Commutative operation of the group. (Addition.)
*/
def +(g: Group[V]): Group[V]
/**
* Non-commutative inverse addition.
*/
def -(g: Group[V]): Group[V] = this + (-g)
}
/**
* A generic Ring element.
*/
trait Ring[V] extends Element[V] {
/**
* This ring's descriptor.
*/
def descriptor: RingDescriptor[V]
/**
* Whether this is the additive identity element.
*/
def isZero: Boolean
def nonZero: Boolean = !isZero
/**
* Whether this is the multiplicative identity element.
*/
def isOne: Boolean
def nonOne: Boolean = !isOne
/**
* Check whether this group element is greater than (partially ordered above)
* the group element `r`.
*/
def >(r: Ring[V]): Boolean
/**
* Check whether this ring element is greater than or equal
* (partially ordered above) to the ring element `r`.
*/
def >=(r: Ring[V]): Boolean = (this > r) || (this == r)
/**
* Check whether this ring element is less than (partially ordered below) the
* ring element `r`.
*/
def <(r: Ring[V]): Boolean
/**
* Check whether this ring element is less than or equal (partially ordered
* below) the ring element `r`.
*/
def <=(r: Ring[V]): Boolean = (this < r) || (this == r)
/**
* Additive inverse element.
*/
def unary_-(): Ring[V]
/**
* Commutative operation on all elements in the set. (Addition.)
*/
def +(r: Ring[V]): Ring[V]
/**
* Non-commutative operation on all elements in the set. (Inverse addition.)
*/
def -(r: Ring[V]): Ring[V] = this + (-r)
/**
* Associative multiplication operation on all elements in the set. (Need
* not be commutative.) It also must be distributive over addition.
*/
def *(r: Ring[V]): Ring[V]
}
/**
* A generic field element.
*
* Elements are assumed to have partial ordering.
*/
trait Field[V] extends Element[V] {
/**
* This field's descriptor.
*/
def descriptor: FieldDescriptor[V]
/**
* Whether this is the additive identity element.
*/
def isZero: Boolean
def nonZero: Boolean = !isZero
/**
* Whether this is the multiplicative identity element.
*/
def isOne: Boolean
def nonOne: Boolean = !isOne
/**
* Check whether this field element is greater than (partially ordered above)
* the field element `f`.
*/
def >(f: Field[V]): Boolean
/**
* Check whether this field element is greater than or equal
* (partially ordered above) to the field element `f`.
*/
def >=(f: Field[V]): Boolean = (this > f) || (this == f)
/**
* Check whether this field element is less than (partially ordered below) the
* field element `f`.
*/
def <(f: Field[V]): Boolean
/**
* Check whether this field element is less than or equal (partially ordered
* below) the field element `f`.
*/
def <=(f: Field[V]): Boolean = (this < f) || (this == f)
/**
* Additive inverse.
*/
def unary_-(): Field[V]
/**
* Multiplicative inverse. (Not defined for `(this - this)`.)
*/
def unary_~(): Field[V]
/**
* Commutative addition.
*/
def +(f: Field[V]): Field[V]
/**
* Non-commutative inverse addition.
*/
def -(f: Field[V]): Field[V] = this + (-f)
/**
* Commutative multiplication.
*/
def *(f: Field[V]): Field[V]
/**
* Non-commutative inverse multiplication. (Not defined if `f == f - f`).
*/
def /(f: Field[V]): Field[V] = this * (~f)
}
object Group {
private case class RAdditiveGroupDescriptor[V](ringDescriptor: RingDescriptor[V]) extends GroupDescriptor[Ring[V]] {
override lazy val identity: Group[Ring[V]] = RAdditiveGroup(ringDescriptor.zero)
override def obtain(value: Ring[V]): Group[Ring[V]] = RAdditiveGroup(value)
}
private case class RAdditiveGroup[V](value: Ring[V]) extends Group[Ring[V]] {
override val descriptor: GroupDescriptor[Ring[V]] = RAdditiveGroupDescriptor(value.descriptor)
override def isIdentity: Boolean = value.isZero
override def +(g: Group[Ring[V]]): Group[Ring[V]] = value + g.value
override def unary_-(): Group[Ring[V]] = -value
override def -(g: Group[Ring[V]]): Group[Ring[V]] = value - g.value
override def >(g: Group[Ring[V]]): Boolean = value > g.value
override def <(g: Group[Ring[V]]): Boolean = value < g.value
override def <=(g: Group[Ring[V]]): Boolean = value <= g.value
override def >=(g: Group[Ring[V]]): Boolean = value >= g.value
private implicit def ringToGroup(r: Ring[V]): Group[Ring[V]] = RAdditiveGroup(r)
}
private case class FAdditiveGroupDescriptor[V](fieldDescriptor: FieldDescriptor[V]) extends GroupDescriptor[Field[V]] {
override lazy val identity: Group[Field[V]] = obtain(fieldDescriptor.zero)
override def obtain(value: Field[V]): Group[Field[V]] = FAdditiveGroup(value)
}
private case class FAdditiveGroup[V](value: Field[V]) extends Group[Field[V]] {
override val descriptor: GroupDescriptor[Field[V]] = FAdditiveGroupDescriptor(value.descriptor)
override def isIdentity: Boolean = value.isZero
override def +(f: Group[Field[V]]): Group[Field[V]] = value + f.value
override def unary_-(): Group[Field[V]] = -value
override def -(f: Group[Field[V]]): Group[Field[V]] = value - f.value
override def >(f: Group[Field[V]]): Boolean = value > f.value
override def <(f: Group[Field[V]]): Boolean = value < f.value
override def <=(f: Group[Field[V]]): Boolean = value <= f.value
override def >=(f: Group[Field[V]]): Boolean = value >= f.value
private implicit def fieldToGroup(f: Field[V]): Group[Field[V]] = FAdditiveGroup(f)
}
def additive[V](ring: Ring[V]): Group[Ring[V]] = RAdditiveGroup(ring)
def additive[V](field: Field[V]): Group[Field[V]] = FAdditiveGroup(field)
}
object Ring {
private case class FRingDescriptor[V](fieldDescriptor: FieldDescriptor[V]) extends RingDescriptor[Field[V]] {
override lazy val zero: Ring[Field[V]] = obtain(fieldDescriptor.zero)
override lazy val one: Ring[Field[V]] = obtain(fieldDescriptor.one)
override def obtain(value: Field[V]): Ring[Field[V]] = FRing(value)
}
private case class FRing[V](value: Field[V]) extends Ring[Field[V]] {
override val descriptor: RingDescriptor[Field[V]] = FRingDescriptor(value.descriptor)
override def isZero: Boolean = value.isZero
override def isOne: Boolean = value.isOne
override def +(r: Ring[Field[V]]): Ring[Field[V]] = value + r.value
override def -(r: Ring[Field[V]]): Ring[Field[V]] = value - r.value
override def unary_-(): Ring[Field[V]] = -value
override def *(r: Ring[Field[V]]): Ring[Field[V]] = value * r.value
override def >(r: Ring[Field[V]]): Boolean = value > r.value
override def <(r: Ring[Field[V]]): Boolean = value < r.value
override def <=(r: Ring[Field[V]]): Boolean = value <= r.value
override def >=(r: Ring[Field[V]]): Boolean = value >= r.value
private implicit def fieldToRing(f: Field[V]): Ring[Field[V]] = FRing(f)
}
def apply[V](field: Field[V]): Ring[Field[V]] = FRing(field)
}
/**
* Holds implicits for converting `Field`s into `Ring`s and `Group`s.
*/
object FieldConversions {
/**
* Exposes the field's additive abelian group.
*/
implicit def fieldToGroup[V](f: Field[V]): Group[Field[V]] = Group.additive(f)
implicit def fieldToRing[V](f: Field[V]): Ring[Field[V]] = Ring.apply(f)
}
/**
* Holds implicits for converting `Ring`s to `Group`s.
*/
object RingConversions {
/**
* Exposes the ring's additive abelian group.
*/
implicit def ringToGroup[V](r: Ring[V]): Group[Ring[V]] = Group.additive(r)
}
| hf/reunion | src/main/scala/me/stojan/reunion/structure/Structures.scala | Scala | mit | 9,567 |
package scala.pickling.inheritance3
import org.scalatest.FunSuite
import scala.pickling._, scala.pickling.Defaults._, json._
trait Person {
val name: String
val age: Int
}
abstract class Employee {
val salary: Int
}
case class Firefighter(val name: String, val age: Int, val salary: Int, val since: Int) extends Employee with Person
class Inheritance3Test extends FunSuite {
test("main") {
val f = new Firefighter(
name = "Joey",
age = 32,
salary = 30000,
since = 1999
)
val pickleF = (f: Firefighter).pickle
assert(pickleF.value === """
|{
| "tpe": "scala.pickling.inheritance3.Firefighter",
| "name": "Joey",
| "age": 32,
| "salary": 30000,
| "since": 1999
|}
""".trim.stripMargin)
assert(pickleF.unpickle[Firefighter] === f)
val pickleE = (f: Employee).pickle
assert(pickleE.value === """
|{
| "tpe": "scala.pickling.inheritance3.Firefighter",
| "name": "Joey",
| "age": 32,
| "salary": 30000,
| "since": 1999
|}
""".trim.stripMargin)
assert(pickleE.unpickle[Employee] === f)
val pickleP = (f: Person).pickle
assert(pickleP.value === """
|{
| "tpe": "scala.pickling.inheritance3.Firefighter",
| "name": "Joey",
| "age": 32,
| "salary": 30000,
| "since": 1999
|}
""".trim.stripMargin)
assert(pickleP.unpickle[Person] === f)
}
}
| eed3si9n/pickling-historical | core/src/test/scala/pickling/run/inheritance3.scala | Scala | bsd-3-clause | 1,469 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.reactiveflows
import akka.actor.ActorRef
import akka.cluster.Cluster
import akka.cluster.ddata.Replicator.{ Changed, Subscribe }
import akka.cluster.ddata.{ DistributedData, LWWMapKey }
import akka.cluster.pubsub.DistributedPubSubMediator.Publish
import akka.testkit.TestActor.KeepRunning
import akka.testkit.TestProbe
import java.time.Instant
import org.scalatest.{ Matchers, WordSpec }
final class FlowFacadeSpec extends WordSpec with Matchers with AkkaSpec {
import FlowFacade._
"FlowFacade" should {
"correctly handle GetFlows, AddFlow and RemoveFlow commands" in {
val sender = TestProbe()
implicit val senderRef = sender.ref
val mediator = TestProbe()
val replicator = TestProbe()
val flowFacade = system.actorOf(FlowFacade(mediator.ref, replicator.ref, system.deadLetters))
replicator.expectMsg(Subscribe(LWWMapKey[String, FlowDesc]("flows"), flowFacade))
flowFacade ! GetFlows
sender.expectMsg(Flows(Set.empty))
flowFacade ! AddFlow("")
sender.expectMsg(InvalidCommand("label empty"))
flowFacade ! AddFlow("Akka")
sender.expectMsg(FlowAdded(FlowDesc("akka", "Akka")))
mediator.expectMsg(Publish(className[Event], FlowAdded(FlowDesc("akka", "Akka"))))
flowFacade ! AddFlow("Akka")
sender.expectMsg(FlowExists(FlowDesc("akka", "Akka")))
flowFacade ! GetFlows
sender.expectMsg(Flows(Set(FlowDesc("akka", "Akka"))))
flowFacade ! RemoveFlow("")
sender.expectMsg(InvalidCommand("name empty"))
flowFacade ! RemoveFlow("akka")
sender.expectMsg(FlowRemoved("akka"))
mediator.expectMsg(Publish(className[Event], FlowRemoved("akka")))
flowFacade ! GetFlows
sender.expectMsg(Flows(Set.empty))
flowFacade ! RemoveFlow("akka")
sender.expectMsg(FlowUnknown("akka"))
}
"correctly handle GetPosts and AddPost commands" in {
val sender = TestProbe()
implicit val senderRef = sender.ref
val time = Instant.now()
val flowShardRegion = TestProbe()
flowShardRegion.setAutoPilot(
(sender: ActorRef, msg: Any) =>
msg match {
case Flow.CommandEnvelope("akka", Flow.GetPosts(Long.MaxValue, Int.MaxValue)) =>
sender ! Flow.Posts(Vector(Flow.Post(0, "Akka rocks!", time)))
KeepRunning
case Flow.CommandEnvelope("akka", Flow.AddPost(text)) =>
sender ! Flow.PostAdded("akka", Flow.Post(1, text, time))
KeepRunning
}
)
val flowFacade =
system.actorOf(FlowFacade(system.deadLetters, system.deadLetters, flowShardRegion.ref))
flowFacade ! GetPosts("", Long.MaxValue, Int.MaxValue)
sender.expectMsg(InvalidCommand("name empty"))
flowFacade ! GetPosts("akka", Long.MaxValue, Int.MaxValue)
sender.expectMsg(FlowUnknown("akka"))
flowFacade ! AddFlow("Akka")
sender.expectMsg(FlowAdded(FlowDesc("akka", "Akka")))
flowFacade ! GetPosts("akka", Long.MaxValue, Int.MaxValue)
sender.expectMsg(Flow.Posts(Vector(Flow.Post(0, "Akka rocks!", time))))
flowFacade ! AddPost("", "Scala rocks!")
sender.expectMsg(InvalidCommand("name empty"))
flowFacade ! AddPost("scala", "Scala rocks!")
sender.expectMsg(FlowUnknown("scala"))
flowFacade ! AddPost("akka", "Scala rocks!")
sender.expectMsg(Flow.PostAdded("akka", Flow.Post(1, "Scala rocks!", time)))
}
"correctly update DistributedData" in {
val replicator = DistributedData(system).replicator
val subscriber = TestProbe()
val flowFacade =
system.actorOf(FlowFacade(system.deadLetters, replicator, system.deadLetters))
replicator ! Subscribe(flows, subscriber.ref)
flowFacade ! AddFlow("Akka")
subscriber.expectMsgPF(hint = """expected `Changed(`flows`) with Set("akka")`""") {
case c @ Changed(`flows`) if c.get(flows).entries.keySet == Set("akka") => ()
}
flowFacade ! RemoveFlow("akka")
subscriber.expectMsgPF(hint = """expected `Changed(`flows`) with entries.isEmpty`""") {
case c @ Changed(`flows`) if c.get(flows).entries.isEmpty => ()
}
}
}
}
| hseeberger/reactive-flows | src/test/scala/de/heikoseeberger/reactiveflows/FlowFacadeSpec.scala | Scala | apache-2.0 | 4,848 |
package scala
package collection
import scala.annotation.tailrec
import scala.language.higherKinds
import scala.math.Ordering
import Searching.{SearchResult, Found, InsertionPoint}
/** Base trait for indexed sequences that have efficient `apply` and `length` */
trait IndexedSeq[+A] extends Seq[A] with IndexedSeqOps[A, IndexedSeq, IndexedSeq[A]]
object IndexedSeq extends SeqFactory.Delegate[IndexedSeq](immutable.IndexedSeq)
/** Base trait for indexed Seq operations */
trait IndexedSeqOps[+A, +CC[_], +C] extends Any with SeqOps[A, CC, C] { self =>
def iterator(): Iterator[A] = view.iterator()
override def reverseIterator(): Iterator[A] = new AbstractIterator[A] {
private var i = self.length
def hasNext: Boolean = 0 < i
def next(): A =
if (0 < i) {
i -= 1
self(i)
} else Iterator.empty.next()
}
override def view: IndexedView[A] = new IndexedView.Id[A](this)
override protected def reversed: Iterable[A] = new IndexedView.Reverse(this)
// Override transformation operations to use more efficient views than the default ones
override def prepended[B >: A](elem: B): CC[B] = iterableFactory.from(new IndexedView.Prepended(elem, this))
override def take(n: Int): C = fromSpecificIterable(new IndexedView.Take(this, n))
override def takeRight(n: Int): C = fromSpecificIterable(new IndexedView.TakeRight(this, n))
override def drop(n: Int): C = fromSpecificIterable(new IndexedView.Drop(this, n))
override def dropRight(n: Int): C = fromSpecificIterable(new IndexedView.DropRight(this, n))
override def map[B](f: A => B): CC[B] = iterableFactory.from(new IndexedView.Map(this, f))
override def reverse: C = fromSpecificIterable(new IndexedView.Reverse(this))
override def slice(from: Int, until: Int): C = fromSpecificIterable(new IndexedView.Slice(this, from, until))
override def lengthCompare(len: Int): Int = length - len
final override def knownSize: Int = length
override def search[B >: A](elem: B)(implicit ord: Ordering[B]): SearchResult =
binarySearch(elem, 0, length)(ord)
override def search[B >: A](elem: B, from: Int, to: Int)(implicit ord: Ordering[B]): SearchResult =
binarySearch(elem, from, to)(ord)
@tailrec
private[this] def binarySearch[B >: A](elem: B, from: Int, to: Int)
(implicit ord: Ordering[B]): SearchResult = {
if (to == from) InsertionPoint(from) else {
val idx = from+(to-from-1)/2
math.signum(ord.compare(elem, apply(idx))) match {
case -1 => binarySearch(elem, from, idx)(ord)
case 1 => binarySearch(elem, idx + 1, to)(ord)
case _ => Found(idx)
}
}
}
}
| rorygraves/perf_tester | corpus/scala-library/src/main/scala/collection/IndexedSeq.scala | Scala | apache-2.0 | 2,692 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package verification
import purescala.Definitions.Program
import solvers._
class VerificationContext(
context: LeonContext,
val program: Program,
val solverFactory: SolverFactory[Solver]
) extends LeonContext(
context.reporter,
context.interruptManager,
context.options,
context.files,
context.classDir,
context.timers
) {
lazy val checkInParallel: Boolean = context.findOptionOrDefault(VerificationPhase.optParallelVCs)
}
| epfl-lara/leon | src/main/scala/leon/verification/VerificationContext.scala | Scala | gpl-3.0 | 500 |
package org.jetbrains.plugins.scala.structureView
import com.intellij.lang.Language
import com.intellij.testFramework.UsefulTestCase.assertThrows
import org.jetbrains.plugins.scala.ScalaLanguage
import org.jetbrains.plugins.scala.icons.Icons.{ABSTRACT_CLASS, CASE_CLASS, CLASS, ENUM, EXTENSION, FUNCTION, OBJECT, TRAIT, TYPE_ALIAS, VAL, VAR}
import org.jetbrains.plugins.scala.structureView.ScalaStructureViewTestBase.Node
class Scala2StructureViewTest extends ScalaStructureViewCommonTests {
override protected def scalaLanguage: Language = ScalaLanguage.INSTANCE
// NOTE: in Scala 2 top level functions/value/etc... are not supported
// but it still will not hurt to show them in the structure view even in a non-compilable code
private val TopLevelDefinitionsText =
"""class MyClass()
|
|case class MyCaseClass()
|
|abstract class MyAbstractClass()
|
|trait MyTrait
|
|object MyObject
|
|type MyTypeAlias[T] = (String, T)
|
|val myValue = 1
|
|var myVariable = 2
|
|def myFunction: String = ???
|
|""".stripMargin
private val TopLevelDefinitionsNodes: Seq[Node] =
Seq(
Node(CLASS, "MyClass()"),
Node(CASE_CLASS, "MyCaseClass()"),
Node(ABSTRACT_CLASS, "MyAbstractClass()"),
Node(TRAIT, "MyTrait"),
Node(OBJECT, "MyObject"),
Node(TYPE_ALIAS, "MyTypeAlias"),
Node(VAL, "myValue"),
Node(VAR, "myVariable"),
Node(FUNCTION, "myFunction: String"),
)
def testTopLevelDefinitions_InRootPackage(): Unit = {
check(TopLevelDefinitionsText, TopLevelDefinitionsNodes: _*)
}
def testThatCheckMethodCorrectlyFailsOnWrongIcons(): Unit = {
assertThrows(
classOf[org.junit.ComparisonFailure],
null,
() => {
check(
"""class A""",
Node(OBJECT, "A")
)
}
)
}
def testThatCheckMethodCorrectlyFailsOnWrongNames(): Unit = {
assertThrows(
classOf[org.junit.ComparisonFailure],
null,
() => {
check(
"""class A""",
Node(CLASS, "B")
)
}
)
}
// TODO:
// We could parse top level definitions in Scala, even though it's not compilable (we already do so in root package, for "script" files)
// We could show the error later in annotator.
// def testTopLevelDefinitions_InPackage(): Unit = {
// check("package aaa.bbb.ccc\n" + TopLevelDefinitionsText, TopLevelDefinitionsNodes: _*)
// }
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/structureView/Scala2StructureViewTest.scala | Scala | apache-2.0 | 2,515 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.util
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml._
import org.apache.spark.ml.evaluation.Evaluator
import org.apache.spark.ml.feature.{Instance, LabeledPoint}
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.{HasFeaturesCol, HasLabelCol, HasWeightCol}
import org.apache.spark.ml.recommendation.{ALS, ALSModel}
import org.apache.spark.ml.tree.impl.TreeTests
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
object MLTestingUtils extends SparkFunSuite {
def checkCopyAndUids[T <: Estimator[_]](estimator: T, model: Model[_]): Unit = {
assert(estimator.uid === model.uid, "Model uid does not match parent estimator")
// copied model must have the same parent
val copied = model.copy(ParamMap.empty)
.asInstanceOf[Model[_]]
assert(copied.parent == model.parent)
assert(copied.parent.uid == model.parent.uid)
}
def checkNumericTypes[M <: Model[M], T <: Estimator[M]](
estimator: T,
spark: SparkSession,
isClassification: Boolean = true)(check: (M, M) => Unit): Unit = {
val dfs = if (isClassification) {
genClassifDFWithNumericLabelCol(spark)
} else {
genRegressionDFWithNumericLabelCol(spark)
}
val finalEstimator = estimator match {
case weighted: Estimator[M] with HasWeightCol =>
weighted.set(weighted.weightCol, "weight")
weighted
case _ => estimator
}
val expected = finalEstimator.fit(dfs(DoubleType))
val actuals = dfs.keys.filter(_ != DoubleType).map { t =>
finalEstimator.fit(dfs(t))
}
actuals.foreach(actual => check(expected, actual))
val dfWithStringLabels = spark.createDataFrame(Seq(
("0", 1, Vectors.dense(0, 2, 3), 0.0)
)).toDF("label", "weight", "features", "censor")
val thrown = intercept[IllegalArgumentException] {
estimator.fit(dfWithStringLabels)
}
assert(thrown.getMessage.contains(
"Column label must be of type NumericType but was actually of type StringType"))
estimator match {
case weighted: Estimator[M] with HasWeightCol =>
val dfWithStringWeights = spark.createDataFrame(Seq(
(0, "1", Vectors.dense(0, 2, 3), 0.0)
)).toDF("label", "weight", "features", "censor")
weighted.set(weighted.weightCol, "weight")
val thrown = intercept[IllegalArgumentException] {
weighted.fit(dfWithStringWeights)
}
assert(thrown.getMessage.contains(
"Column weight must be of type NumericType but was actually of type StringType"))
case _ =>
}
}
def checkNumericTypes[T <: Evaluator](evaluator: T, spark: SparkSession): Unit = {
val dfs = genEvaluatorDFWithNumericLabelCol(spark, "label", "prediction")
val expected = evaluator.evaluate(dfs(DoubleType))
val actuals = dfs.keys.filter(_ != DoubleType).map(t => evaluator.evaluate(dfs(t)))
actuals.foreach(actual => assert(expected === actual))
val dfWithStringLabels = spark.createDataFrame(Seq(
("0", 0d)
)).toDF("label", "prediction")
val thrown = intercept[IllegalArgumentException] {
evaluator.evaluate(dfWithStringLabels)
}
assert(thrown.getMessage.contains(
"Column label must be of type NumericType but was actually of type StringType"))
}
def genClassifDFWithNumericLabelCol(
spark: SparkSession,
labelColName: String = "label",
featuresColName: String = "features",
weightColName: String = "weight"): Map[NumericType, DataFrame] = {
val df = spark.createDataFrame(Seq(
(0, Vectors.dense(0, 2, 3)),
(1, Vectors.dense(0, 3, 1)),
(0, Vectors.dense(0, 2, 2)),
(1, Vectors.dense(0, 3, 9)),
(0, Vectors.dense(0, 2, 6))
)).toDF(labelColName, featuresColName)
val types =
Seq(ShortType, LongType, IntegerType, FloatType, ByteType, DoubleType, DecimalType(10, 0))
types.map { t =>
val castDF = df.select(col(labelColName).cast(t), col(featuresColName))
t -> TreeTests.setMetadata(castDF, 2, labelColName, featuresColName)
.withColumn(weightColName, round(rand(seed = 42)).cast(t))
}.toMap
}
def genRegressionDFWithNumericLabelCol(
spark: SparkSession,
labelColName: String = "label",
weightColName: String = "weight",
featuresColName: String = "features",
censorColName: String = "censor"): Map[NumericType, DataFrame] = {
val df = spark.createDataFrame(Seq(
(1, Vectors.dense(1)),
(2, Vectors.dense(2)),
(3, Vectors.dense(3)),
(4, Vectors.dense(4))
)).toDF(labelColName, featuresColName)
val types =
Seq(ShortType, LongType, IntegerType, FloatType, ByteType, DoubleType, DecimalType(10, 0))
types.map { t =>
val castDF = df.select(col(labelColName).cast(t), col(featuresColName))
t -> TreeTests.setMetadata(castDF, 0, labelColName, featuresColName)
.withColumn(censorColName, lit(0.0))
.withColumn(weightColName, round(rand(seed = 42)).cast(t))
}.toMap
}
def genEvaluatorDFWithNumericLabelCol(
spark: SparkSession,
labelColName: String = "label",
predictionColName: String = "prediction"): Map[NumericType, DataFrame] = {
val df = spark.createDataFrame(Seq(
(0, 0d),
(1, 1d),
(2, 2d),
(3, 3d),
(4, 4d)
)).toDF(labelColName, predictionColName)
val types =
Seq(ShortType, LongType, IntegerType, FloatType, ByteType, DoubleType, DecimalType(10, 0))
types
.map(t => t -> df.select(col(labelColName).cast(t), col(predictionColName)))
.toMap
}
/**
* Given a DataFrame, generate two output DataFrames: one having the original rows oversampled
* an integer number of times, and one having the original rows but with a column of weights
* proportional to the number of oversampled instances in the oversampled DataFrames.
*/
def genEquivalentOversampledAndWeightedInstances(
data: Dataset[LabeledPoint],
seed: Long): (Dataset[Instance], Dataset[Instance]) = {
import data.sparkSession.implicits._
val rng = new scala.util.Random(seed)
val sample: () => Int = () => rng.nextInt(10) + 1
val sampleUDF = udf(sample)
val rawData = data.select("label", "features").withColumn("samples", sampleUDF())
val overSampledData = rawData.rdd.flatMap { case Row(label: Double, features: Vector, n: Int) =>
Iterator.fill(n)(Instance(label, 1.0, features))
}.toDS()
rng.setSeed(seed)
val weightedData = rawData.rdd.map { case Row(label: Double, features: Vector, n: Int) =>
Instance(label, n.toDouble, features)
}.toDS()
(overSampledData, weightedData)
}
/**
* Helper function for testing sample weights. Tests that oversampling each point is equivalent
* to assigning a sample weight proportional to the number of samples for each point.
*/
def testOversamplingVsWeighting[M <: Model[M], E <: Estimator[M]](
data: Dataset[LabeledPoint],
estimator: E with HasWeightCol,
modelEquals: (M, M) => Unit,
seed: Long): Unit = {
val (overSampledData, weightedData) = genEquivalentOversampledAndWeightedInstances(
data, seed)
val weightedModel = estimator.set(estimator.weightCol, "weight").fit(weightedData)
val overSampledModel = estimator.set(estimator.weightCol, "").fit(overSampledData)
modelEquals(weightedModel, overSampledModel)
}
/**
* Helper function for testing sample weights. Tests that injecting a large number of outliers
* with very small sample weights does not affect fitting. The predictor should learn the true
* model despite the outliers.
*/
def testOutliersWithSmallWeights[M <: Model[M], E <: Estimator[M]](
data: Dataset[LabeledPoint],
estimator: E with HasWeightCol,
numClasses: Int,
modelEquals: (M, M) => Unit,
outlierRatio: Int): Unit = {
import data.sqlContext.implicits._
val outlierDS = data.withColumn("weight", lit(1.0)).as[Instance].flatMap {
case Instance(l, w, f) =>
val outlierLabel = if (numClasses == 0) -l else numClasses - l - 1
List.fill(outlierRatio)(Instance(outlierLabel, 0.0001, f)) ++ List(Instance(l, w, f))
}
val trueModel = estimator.set(estimator.weightCol, "").fit(data)
val outlierModel = estimator.set(estimator.weightCol, "weight").fit(outlierDS)
modelEquals(trueModel, outlierModel)
}
/**
* Helper function for testing sample weights. Tests that giving constant weights to each data
* point yields the same model, regardless of the magnitude of the weight.
*/
def testArbitrarilyScaledWeights[M <: Model[M], E <: Estimator[M]](
data: Dataset[LabeledPoint],
estimator: E with HasWeightCol,
modelEquals: (M, M) => Unit): Unit = {
estimator.set(estimator.weightCol, "weight")
val models = Seq(0.001, 1.0, 1000.0).map { w =>
val df = data.withColumn("weight", lit(w))
estimator.fit(df)
}
models.sliding(2).foreach { case Seq(m1, m2) => modelEquals(m1, m2)}
}
}
| brad-kaiser/spark | mllib/src/test/scala/org/apache/spark/ml/util/MLTestingUtils.scala | Scala | apache-2.0 | 10,030 |
/**
* Copyright 2013, 2018 Gianluca Amato
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of a
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.targets.slil
import it.unich.jandom.domains.numerical.NumericalProperty
import it.unich.jandom.targets.{Annotation, Environment, lts}
import it.unich.jandom.ui.output.{OutputBuilder, TextOutputBuilder}
/**
* The abstract class for program statements. Each object in SLILStmt represents a statement
* of a simple imperative language.
*/
abstract class SLILStmt extends SLILTarget {
import AnalysisPhase._
/**
* A method for sending the statement with related annotations to an output builder.
*
* @param ann the annotation to print together with the program
* @param ob the output builder to use for printing the result
* @param env an optional environment used to produce correct variable names
*/
def outputAnnotation[T <: NumericalProperty[_]](ann: Annotation[ProgramPoint, T], ob: OutputBuilder,
env: Environment = Environment()): Unit
/**
* A method for printing the statement the with related annotations.
*
* @param ann the annotation to print together with the program
* @param env an optional environment used to produce correct variable names
* @return the string representation of the program
*/
def mkString[T <: NumericalProperty[_]](ann: Annotation[ProgramPoint, T], env: Environment = Environment()): String = {
val ob = TextOutputBuilder()
outputAnnotation(ann, ob)
ob.toString
}
/**
* The analyzer for a SLIL statement. This methods is different from the one declared in Target since it takes
* an annotations as a parameter, and update it with the result of the analysis. Moreover, it returns a numerical
* property as a result instead of an annotation.
*
* @param input the property at the program point before the statement
* @param params the parameter which control the analysis
* @param phase the current analysis phase
* @param ann an annotation where to put informations on the inner program points
* @return the property at the end of the statement
*/
def analyzeStmt(params: Parameters)(input: params.Property, phase: AnalysisPhase,
ann: Annotation[ProgramPoint, params.Property]): params.Property
/**
* @inheritdoc
* A statement is analyzed under the assumption that initially variables
* may assume all possible values.
*/
def analyze(params: Parameters): Annotation[ProgramPoint, params.Property] = {
val ann = getAnnotation[params.Property]
val input = params.domain.top(numvars)
analyzeStmt(params)(input, AscendingRestart, ann)
ann
}
/**
* Returns true if `that` is syntactically equal to `this`.
*/
def syntacticallyEquals(that: SLILStmt): Boolean
/**
* Returns the number of variables in the statement. The standard implementation
* return zero.
*/
val numvars: Int
/**
* This method builds a set of transitions and locations corresponding to the program statement. The initial and
* final locations are passed as parameters.
*
* @param prev the incoming program point
* @param next the outgoing program point
* @return a pair `(m,s)` where `m` is map from program points in this statement to locations and `s` is a sequence
* of transitions from `prev` to `next`
*/
def toLTS(prev: lts.Location, next: lts.Location): (Map[ProgramPoint, lts.Location], Seq[lts.Transition])
val lastPP: Option[ProgramPoint] = None
}
| amato-gianluca/Jandom | core/src/main/scala/it/unich/jandom/targets/slil/SLILStmt.scala | Scala | lgpl-3.0 | 4,282 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.