code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package io.udash.web.commons.views
import com.avsystem.commons._
import com.avsystem.commons.misc.AbstractCase
import io.udash._
import io.udash.bootstrap._
import io.udash.bootstrap.alert.UdashAlert
import io.udash.web.guide.markdown.{MarkdownPage, MarkdownPageRPC}
import io.udash.web.guide.styles.MarkdownStyles
import org.scalajs.dom
import scala.util.{Failure, Success}
trait MarkdownPageState extends State {
def page: MarkdownPage
}
final case class MarkdownModel(
content: String = "",
error: String = ""
)
object MarkdownModel extends HasModelPropertyCreator[MarkdownModel] {
implicit val blank: Blank[MarkdownModel] = Blank.Simple(apply())
}
final case class MarkdownPageViewFactory[T <: MarkdownPageState]()(
rpc: MarkdownPageRPC
) extends AbstractCase with ViewFactory[T] {
override def create(): (MarkdownView, MarkdownPresenter[T]) = {
val model: ModelProperty[MarkdownModel] = ModelProperty.blank
(new MarkdownView(model), new MarkdownPresenter[T](model, rpc))
}
}
final class MarkdownPresenter[T <: MarkdownPageState](
model: ModelProperty[MarkdownModel],
rpc: MarkdownPageRPC
) extends Presenter[T] {
override def handleState(state: T): Unit = {
model.set(MarkdownModel.blank.value)
rpc.loadContent(state.page).onCompleteNow {
case Success(rawHtml) =>
model.subProp(_.content).set(rawHtml)
def scroll(): Unit =
dom.window.location.hash.opt.collect { case id if id.nonEmpty => id.tail }.foreach { id =>
dom.document.getElementById(id).opt match {
case Opt(element) =>
def loop(): Unit = {
val offsetTop = element.asInstanceOf[scalajs.js.Dynamic].offsetTop.asInstanceOf[Int]
if (dom.window.asInstanceOf[scalajs.js.Dynamic].scrollY.asInstanceOf[Int] != offsetTop) {
dom.window.scrollTo(0, offsetTop)
scalajs.js.timers.setTimeout(100)(loop())
}
}
loop()
case Opt.Empty =>
scalajs.js.timers.setTimeout(100)(scroll())
}
}
scroll()
case Failure(exception) => model.subProp(_.error).set(exception.toString)
}
}
}
final class MarkdownView(model: ReadableModelProperty[MarkdownModel]) extends View {
import io.udash.css.CssView._
import scalatags.JsDom.all._
override val getTemplate: Modifier = ISeq(
produce(model.roSubProp(_.error)) { error =>
error.opt.filter(_.nonEmpty).map(e =>
div(cls := "bootstrap")(
h1("Oops! Something went wrong :("),
p("An error occurred during rendering your page:"),
UdashAlert(alertStyle = BootstrapStyles.Color.Danger.toProperty)(e).render
).render
).toList
},
produce(model.roSubProp(_.content)) { content =>
content.opt.filter(_.nonEmpty).map(c =>
div(MarkdownStyles.markdownPage)(raw(c)).render
).toList
}
)
}
|
UdashFramework/udash-core
|
guide/commons/.js/src/main/scala/io/udash/web/commons/views/MarkdownView.scala
|
Scala
|
apache-2.0
| 2,972
|
package org.msgpack.core
import org.scalatest.exceptions.TestFailedException
import org.msgpack.core.MessagePack.Code
import scala.util.Random
import org.msgpack.value.ValueType
/**
* Created on 2014/05/07.
*/
class MessageFormatTest extends MessagePackSpec {
"MessageFormat" should {
"cover all byte codes" in {
def checkV(b:Byte, tpe:ValueType) {
try
MessageFormat.valueOf(b).getValueType shouldBe tpe
catch {
case e:TestFailedException =>
error(f"Failure when looking at byte ${b}%02x")
throw e
}
}
def checkF(b:Byte, f:MessageFormat) {
MessageFormat.valueOf(b) shouldBe f
}
def check(b:Byte, tpe:ValueType, f:MessageFormat) {
checkV(b, tpe)
checkF(b, f)
}
for(i <- 0 until 0x7f)
check(i.toByte, ValueType.INTEGER, MessageFormat.POSFIXINT)
for(i <- 0x80 until 0x8f)
check(i.toByte, ValueType.MAP, MessageFormat.FIXMAP)
for(i <- 0x90 until 0x9f)
check(i.toByte, ValueType.ARRAY, MessageFormat.FIXARRAY)
check(Code.NIL, ValueType.NIL, MessageFormat.NIL)
MessageFormat.valueOf(Code.NEVER_USED) shouldBe MessageFormat.NEVER_USED
for(i <- Seq(Code.TRUE, Code.FALSE))
check(i, ValueType.BOOLEAN, MessageFormat.BOOLEAN)
check(Code.BIN8, ValueType.BINARY, MessageFormat.BIN8)
check(Code.BIN16, ValueType.BINARY, MessageFormat.BIN16)
check(Code.BIN32, ValueType.BINARY, MessageFormat.BIN32)
check(Code.FIXEXT1, ValueType.EXTENDED, MessageFormat.FIXEXT1)
check(Code.FIXEXT2, ValueType.EXTENDED, MessageFormat.FIXEXT2)
check(Code.FIXEXT4, ValueType.EXTENDED, MessageFormat.FIXEXT4)
check(Code.FIXEXT8, ValueType.EXTENDED, MessageFormat.FIXEXT8)
check(Code.FIXEXT16, ValueType.EXTENDED, MessageFormat.FIXEXT16)
check(Code.EXT8, ValueType.EXTENDED, MessageFormat.EXT8)
check(Code.EXT16, ValueType.EXTENDED, MessageFormat.EXT16)
check(Code.EXT32, ValueType.EXTENDED, MessageFormat.EXT32)
check(Code.INT8, ValueType.INTEGER, MessageFormat.INT8)
check(Code.INT16, ValueType.INTEGER, MessageFormat.INT16)
check(Code.INT32, ValueType.INTEGER, MessageFormat.INT32)
check(Code.INT64, ValueType.INTEGER, MessageFormat.INT64)
check(Code.UINT8, ValueType.INTEGER, MessageFormat.UINT8)
check(Code.UINT16, ValueType.INTEGER, MessageFormat.UINT16)
check(Code.UINT32, ValueType.INTEGER, MessageFormat.UINT32)
check(Code.UINT64, ValueType.INTEGER, MessageFormat.UINT64)
check(Code.STR8, ValueType.STRING, MessageFormat.STR8)
check(Code.STR16, ValueType.STRING, MessageFormat.STR16)
check(Code.STR32, ValueType.STRING, MessageFormat.STR32)
check(Code.FLOAT32, ValueType.FLOAT, MessageFormat.FLOAT32)
check(Code.FLOAT64, ValueType.FLOAT, MessageFormat.FLOAT64)
check(Code.ARRAY16, ValueType.ARRAY, MessageFormat.ARRAY16)
check(Code.ARRAY32, ValueType.ARRAY, MessageFormat.ARRAY32)
for(i <- 0xe0 to 0xff)
check(i.toByte, ValueType.INTEGER, MessageFormat.NEGFIXINT)
}
"improve the valueOf performance" in {
val N = 1000000
val idx = (0 until N).map(x => Random.nextInt(256).toByte).toArray[Byte]
// Initialize
MessageFormat.valueOf(0.toByte)
time("lookup", repeat = 10) {
block("switch") {
var i = 0
while(i < N) {
MessageFormat.toMessageFormat(idx(i))
i += 1
}
}
block("table") {
var i = 0
while(i < N) {
MessageFormat.valueOf(idx(i))
i += 1
}
}
}
}
}
}
|
xerial/msgpack-java
|
msgpack-core/src/test/scala/org/msgpack/core/MessageFormatTest.scala
|
Scala
|
apache-2.0
| 3,723
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.Path
import org.mockito.ArgumentMatchers.{any, eq => meq}
import org.mockito.Mockito.when
import org.scalatest.mockito.MockitoSugar.mock
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.Pipeline.SharedReadWrite
import org.apache.spark.ml.feature.{HashingTF, MinMaxScaler}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.param.{IntParam, ParamMap}
import org.apache.spark.ml.util._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.types.StructType
class PipelineSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
abstract class MyModel extends Model[MyModel]
test("pipeline") {
val estimator0 = mock[Estimator[MyModel]]
val model0 = mock[MyModel]
val transformer1 = mock[Transformer]
val estimator2 = mock[Estimator[MyModel]]
val model2 = mock[MyModel]
val transformer3 = mock[Transformer]
val dataset0 = mock[DataFrame]
val dataset1 = mock[DataFrame]
val dataset2 = mock[DataFrame]
val dataset3 = mock[DataFrame]
val dataset4 = mock[DataFrame]
when(dataset0.toDF).thenReturn(dataset0)
when(dataset1.toDF).thenReturn(dataset1)
when(dataset2.toDF).thenReturn(dataset2)
when(dataset3.toDF).thenReturn(dataset3)
when(dataset4.toDF).thenReturn(dataset4)
when(estimator0.copy(any[ParamMap])).thenReturn(estimator0)
when(model0.copy(any[ParamMap])).thenReturn(model0)
when(transformer1.copy(any[ParamMap])).thenReturn(transformer1)
when(estimator2.copy(any[ParamMap])).thenReturn(estimator2)
when(model2.copy(any[ParamMap])).thenReturn(model2)
when(transformer3.copy(any[ParamMap])).thenReturn(transformer3)
when(estimator0.fit(meq(dataset0))).thenReturn(model0)
when(model0.transform(meq(dataset0))).thenReturn(dataset1)
when(model0.parent).thenReturn(estimator0)
when(transformer1.transform(meq(dataset1))).thenReturn(dataset2)
when(estimator2.fit(meq(dataset2))).thenReturn(model2)
when(model2.transform(meq(dataset2))).thenReturn(dataset3)
when(model2.parent).thenReturn(estimator2)
when(transformer3.transform(meq(dataset3))).thenReturn(dataset4)
val pipeline = new Pipeline()
.setStages(Array(estimator0, transformer1, estimator2, transformer3))
val pipelineModel = pipeline.fit(dataset0)
MLTestingUtils.checkCopyAndUids(pipeline, pipelineModel)
assert(pipelineModel.stages.length === 4)
assert(pipelineModel.stages(0).eq(model0))
assert(pipelineModel.stages(1).eq(transformer1))
assert(pipelineModel.stages(2).eq(model2))
assert(pipelineModel.stages(3).eq(transformer3))
val output = pipelineModel.transform(dataset0)
assert(output.eq(dataset4))
}
test("pipeline with duplicate stages") {
val estimator = mock[Estimator[MyModel]]
val pipeline = new Pipeline()
.setStages(Array(estimator, estimator))
val dataset = mock[DataFrame]
intercept[IllegalArgumentException] {
pipeline.fit(dataset)
}
}
test("Pipeline.copy") {
val hashingTF = new HashingTF()
.setNumFeatures(100)
val pipeline = new Pipeline("pipeline").setStages(Array[Transformer](hashingTF))
val copied = pipeline.copy(ParamMap(hashingTF.numFeatures -> 10))
assert(copied.uid === pipeline.uid,
"copy should create an instance with the same UID")
assert(copied.getStages(0).asInstanceOf[HashingTF].getNumFeatures === 10,
"copy should handle extra stage params")
}
test("PipelineModel.copy") {
val hashingTF = new HashingTF()
.setNumFeatures(100)
val model = new PipelineModel("pipelineModel", Array[Transformer](hashingTF))
.setParent(new Pipeline())
val copied = model.copy(ParamMap(hashingTF.numFeatures -> 10))
assert(copied.uid === model.uid,
"copy should create an instance with the same UID")
assert(copied.stages(0).asInstanceOf[HashingTF].getNumFeatures === 10,
"copy should handle extra stage params")
assert(copied.parent === model.parent,
"copy should create an instance with the same parent")
}
test("pipeline model constructors") {
val transform0 = mock[Transformer]
val model1 = mock[MyModel]
val stages = Array(transform0, model1)
val pipelineModel0 = new PipelineModel("pipeline0", stages)
assert(pipelineModel0.uid === "pipeline0")
assert(pipelineModel0.stages === stages)
val stagesAsList = stages.toList.asJava
val pipelineModel1 = new PipelineModel("pipeline1", stagesAsList)
assert(pipelineModel1.uid === "pipeline1")
assert(pipelineModel1.stages === stages)
}
test("Pipeline read/write") {
val writableStage = new WritableStage("writableStage").setIntParam(56)
val pipeline = new Pipeline().setStages(Array(writableStage))
val pipeline2 = testDefaultReadWrite(pipeline, testParams = false)
assert(pipeline2.getStages.length === 1)
assert(pipeline2.getStages(0).isInstanceOf[WritableStage])
val writableStage2 = pipeline2.getStages(0).asInstanceOf[WritableStage]
assert(writableStage.getIntParam === writableStage2.getIntParam)
}
test("Pipeline read/write with non-Writable stage") {
val unWritableStage = new UnWritableStage("unwritableStage")
val unWritablePipeline = new Pipeline().setStages(Array(unWritableStage))
withClue("Pipeline.write should fail when Pipeline contains non-Writable stage") {
intercept[UnsupportedOperationException] {
unWritablePipeline.write
}
}
}
test("PipelineModel read/write") {
val writableStage = new WritableStage("writableStage").setIntParam(56)
val pipeline =
new PipelineModel("pipeline_89329327", Array(writableStage.asInstanceOf[Transformer]))
val pipeline2 = testDefaultReadWrite(pipeline, testParams = false)
assert(pipeline2.stages.length === 1)
assert(pipeline2.stages(0).isInstanceOf[WritableStage])
val writableStage2 = pipeline2.stages(0).asInstanceOf[WritableStage]
assert(writableStage.getIntParam === writableStage2.getIntParam)
}
test("PipelineModel read/write: getStagePath") {
val stageUid = "myStage"
val stagesDir = new Path("pipeline", "stages").toString
def testStage(stageIdx: Int, numStages: Int, expectedPrefix: String): Unit = {
val path = SharedReadWrite.getStagePath(stageUid, stageIdx, numStages, stagesDir)
val expected = new Path(stagesDir, expectedPrefix + "_" + stageUid).toString
assert(path === expected)
}
testStage(0, 1, "0")
testStage(0, 9, "0")
testStage(0, 10, "00")
testStage(1, 10, "01")
testStage(12, 999, "012")
}
test("PipelineModel read/write with non-Writable stage") {
val unWritableStage = new UnWritableStage("unwritableStage")
val unWritablePipeline =
new PipelineModel("pipeline_328957", Array(unWritableStage.asInstanceOf[Transformer]))
withClue("PipelineModel.write should fail when PipelineModel contains non-Writable stage") {
intercept[UnsupportedOperationException] {
unWritablePipeline.write
}
}
}
test("pipeline validateParams") {
val df = Seq(
(1, Vectors.dense(0.0, 1.0, 4.0), 1.0),
(2, Vectors.dense(1.0, 0.0, 4.0), 2.0),
(3, Vectors.dense(1.0, 0.0, 5.0), 3.0),
(4, Vectors.dense(0.0, 0.0, 5.0), 4.0)
).toDF("id", "features", "label")
intercept[IllegalArgumentException] {
val scaler = new MinMaxScaler()
.setInputCol("features")
.setOutputCol("features_scaled")
.setMin(10)
.setMax(0)
val pipeline = new Pipeline().setStages(Array(scaler))
pipeline.fit(df)
}
}
test("Pipeline.setStages should handle Java Arrays being non-covariant") {
val stages0 = Array(new UnWritableStage("b"))
val stages1 = Array(new WritableStage("a"))
val steps = stages0 ++ stages1
val p = new Pipeline().setStages(steps)
}
}
/**
* Used to test [[Pipeline]] with `MLWritable` stages
*/
class WritableStage(override val uid: String) extends Transformer with MLWritable {
final val intParam: IntParam = new IntParam(this, "intParam", "doc")
def getIntParam: Int = $(intParam)
def setIntParam(value: Int): this.type = set(intParam, value)
setDefault(intParam -> 0)
override def copy(extra: ParamMap): WritableStage = defaultCopy(extra)
override def write: MLWriter = new DefaultParamsWriter(this)
override def transform(dataset: Dataset[_]): DataFrame = dataset.toDF
override def transformSchema(schema: StructType): StructType = schema
}
object WritableStage extends MLReadable[WritableStage] {
override def read: MLReader[WritableStage] = new DefaultParamsReader[WritableStage]
override def load(path: String): WritableStage = super.load(path)
}
/**
* Used to test [[Pipeline]] with non-`MLWritable` stages
*/
class UnWritableStage(override val uid: String) extends Transformer {
final val intParam: IntParam = new IntParam(this, "intParam", "doc")
setDefault(intParam -> 0)
override def copy(extra: ParamMap): UnWritableStage = defaultCopy(extra)
override def transform(dataset: Dataset[_]): DataFrame = dataset.toDF
override def transformSchema(schema: StructType): StructType = schema
}
|
pgandhi999/spark
|
mllib/src/test/scala/org/apache/spark/ml/PipelineSuite.scala
|
Scala
|
apache-2.0
| 10,193
|
package com.arcusys.learn.liferay.update.version260.certificate
import com.arcusys.learn.liferay.update.version240.certificate.CertificateTableComponent
import com.arcusys.valamis.model.PeriodTypes
import com.arcusys.valamis.persistence.common.DbNameUtils._
import com.arcusys.valamis.persistence.common.SlickProfile
trait ActivityGoalTableComponent extends CertificateTableComponent { self: SlickProfile =>
import driver.simple._
type ActivityGoal = (Long, String, Int, Int, PeriodTypes.Value)
class ActivityGoalTable(tag: Tag) extends Table[ActivityGoal](tag, tblName("CERT_ACTIVITY_GOAL")) {
implicit val ValidPeriodTypeMapper = MappedColumnType.base[PeriodTypes.PeriodType, String](
s => s.toString,
s => PeriodTypes.withName(s)
)
def certificateId = column[Long]("CERTIFICATE_ID")
def activityName = column[String]("ACTIVITY_NAME")
def count = column[Int]("COUNT")
def periodValue = column[Int]("PERIOD_VALUE")
def periodType = column[PeriodTypes.PeriodType]("PERIOD_TYPE")
def * = (certificateId, activityName, count, periodValue, periodType)
def PK = primaryKey(pkName("CERT_ACTIVITY_GOAL"), (certificateId, activityName))
def certificateFK = foreignKey(fkName("CERT_ACTIVITY_TO_CERT"), certificateId, TableQuery[CertificateTable])(x => x.id, onDelete = ForeignKeyAction.Cascade)
}
val activityGoals = TableQuery[ActivityGoalTable]
}
|
igor-borisov/valamis
|
learn-portlet/src/main/scala/com/arcusys/learn/liferay/update/version260/certificate/ActivityGoalTableComponent.scala
|
Scala
|
gpl-3.0
| 1,409
|
package com.avidmouse.spark.mongodb.examples.streaming
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.StreamingContext._
import com.avidmouse.spark.mongodb.Mongo
import com.avidmouse.spark.streaming.mongodb.dstream._
import play.api.libs.json._
/**
* @author avidmouse
* @version 0.1, 14-6-25
*/
object NetworkWordCount {
def main(args: Array[String]) {
if (args.length < 2) {
System.err.println("Usage: NetworkWordCount <hostname> <port> <mongoURI> <countCollection>")
System.exit(1)
}
// Create the context with a 1 second batch size
val sparkConf = new SparkConf().setAppName("NetworkWordCount")
val ssc = new StreamingContext(sparkConf, Seconds(1))
val mongo = Mongo(args(2))
// Create a socket stream on target ip:port and count the
// words in input stream of \\n delimited text (eg. generated by 'nc')
// Note that no duplication in storage level only for running locally.
// Replication necessary in distributed scenario for fault tolerance.
val lines = ssc.socketTextStream(args(0), args(1).toInt, StorageLevel.MEMORY_AND_DISK_SER)
val words = lines.flatMap(_.split(" "))
val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)
wordCounts.map {
case (word, count) => Json.obj("word" -> word, "count" -> count)
}.saveAsMongoDocument(mongo.collection(args(3)))
ssc.start()
ssc.awaitTermination()
}
}
|
avidmouse/mongo-spark
|
examples/src/main/scala/com/avidmouse/spark/mongodb/examples/streaming/NetworkWordCount.scala
|
Scala
|
apache-2.0
| 1,539
|
package org.http4s
package blazecore
package util
import cats.effect._
import cats.implicits._
import fs2._
import org.http4s.syntax.async._
import scala.concurrent._
private[http4s] trait EntityBodyWriter[F[_]] {
implicit protected def F: Effect[F]
/** The `ExecutionContext` on which to run computations, assumed to be stack safe. */
implicit protected def ec: ExecutionContext
/** Write a ByteVector to the wire.
* If a request is cancelled, or the stream is closed this method should
* return a failed Future with Cancelled as the exception
*
* @param chunk BodyChunk to write to wire
* @return a future letting you know when its safe to continue
*/
protected def writeBodyChunk(chunk: Chunk[Byte], flush: Boolean): Future[Unit]
/** Write the ending chunk and, in chunked encoding, a trailer to the
* wire. If a request is cancelled, or the stream is closed this
* method should return a failed Future with Cancelled as the
* exception, or a Future with a Boolean to indicate whether the
* connection is to be closed or not.
*
* @param chunk BodyChunk to write to wire
* @return a future letting you know when its safe to continue (if `false`) or
* to close the connection (if `true`)
*/
protected def writeEnd(chunk: Chunk[Byte]): Future[Boolean]
/** Called in the event of an Await failure to alert the pipeline to cleanup */
protected def exceptionFlush(): Future[Unit] = FutureUnit
/** Creates an effect that writes the contents of the EntityBody to the output.
* Cancelled exceptions fall through to the effect cb
* The writeBodyEnd triggers if there are no exceptions, and the result will
* be the result of the writeEnd call.
*
* @param p EntityBody to write out
* @return the Task which when run will unwind the Process
*/
def writeEntityBody(p: EntityBody[F]): F[Boolean] = {
val writeBody: F[Unit] = p.to(writeSink).compile.drain
val writeBodyEnd: F[Boolean] = F.fromFuture(writeEnd(Chunk.empty))
writeBody *> writeBodyEnd
}
/** Writes each of the body chunks, if the write fails it returns
* the failed future which throws an error.
* If it errors the error stream becomes the stream, which performs an
* exception flush and then the stream fails.
*/
private def writeSink: Sink[F, Byte] = { s =>
val writeStream: Stream[F, Unit] =
s.chunks.evalMap(chunk => F.fromFuture(writeBodyChunk(chunk, flush = false)))
val errorStream: Throwable => Stream[F, Unit] = e =>
Stream.eval(F.fromFuture(exceptionFlush())).flatMap(_ => Stream.raiseError(e))
writeStream.handleErrorWith(errorStream)
}
}
|
reactormonk/http4s
|
blaze-core/src/main/scala/org/http4s/blazecore/util/EntityBodyWriter.scala
|
Scala
|
apache-2.0
| 2,682
|
import sbt._
import Keys._
object FPInScalaBuild extends Build {
val opts = Project.defaultSettings ++ Seq(
scalaVersion := "2.11.5",
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
)
val scalatest = "org.scalatest" %% "scalatest" % "2.2.4" % "test"
val junit = "junit" % "junit" % "4.10" % "test"
lazy val root =
Project(id = "fpinscala",
base = file("."),
settings = opts ++ Seq(
onLoadMessage ~= (_ + nio2check())
)) aggregate (chapterCode, exercises, answers)
lazy val chapterCode =
Project(id = "chapter-code",
base = file("chaptercode"),
settings = opts)
lazy val exercises =
Project(id = "exercises",
base = file("exercises"),
settings = opts ++ Seq(
libraryDependencies ++= Seq(
scalatest,
junit
)
))
lazy val answers =
Project(id = "answers",
base = file("answers"),
settings = opts)
def nio2check(): String = {
val cls = "java.nio.channels.AsynchronousFileChannel"
try {Class.forName(cls); ""}
catch {case _: ClassNotFoundException =>
("\\nWARNING: JSR-203 \\"NIO.2\\" (" + cls + ") not found.\\n" +
"You are probably running Java < 1.7; answers will not compile.\\n" +
"You seem to be running " + System.getProperty("java.version") + ".\\n" +
"Try `project exercises' before compile, or upgrading your JDK.")
}
}
}
|
nathanvick/fpinscala
|
project/Build.scala
|
Scala
|
mit
| 1,543
|
/*****************************************
Emitting Generated Code
*******************************************/
class Snippet extends ((Array[Int])=>(Array[Int])) {
def apply(x0:Array[Int]): Array[Int] = {
val x1 = x0.length
val x2 = 100 - x1
val x3 = x2 > 10
val x6 = if (x3) {
val x4 = println("hello")
x4
} else {
()
}
x0
}
}
/*****************************************
End of Generated Code
*******************************************/
|
RomanTsegelskyi/lms-truffle
|
src/out/shonan-hmm1b_dyn.check.scala
|
Scala
|
gpl-2.0
| 489
|
import scala.quoted._, scala.deriving.*
inline def mcr: Any = ${mcrImpl}
def mcrImpl(using ctx: Quotes): Expr[Any] = {
val tpl: (Expr[1], Expr[2], Expr[3]) = ('{1}, '{2}, '{3})
'{val res: (1, 2, 3) = ${Expr.ofTuple(tpl)}; res}
}
|
dotty-staging/dotty
|
tests/pos-macros/toexproftuple.scala
|
Scala
|
apache-2.0
| 234
|
/*
* Copyright (c) 2012, 2013 Roberto Tyley
*
* This file is part of 'BFG Repo-Cleaner' - a tool for removing large
* or troublesome blobs from Git repositories.
*
* BFG Repo-Cleaner is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BFG Repo-Cleaner is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see http://www.gnu.org/licenses/ .
*/
package com.madgag.git.bfg.cleaner
import com.madgag.collection.concurrent.ConcurrentMultiMap
import com.madgag.git._
import com.madgag.git.bfg.GitUtil._
import com.madgag.git.bfg.cleaner.protection.{ProtectedObjectCensus, ProtectedObjectDirtReport}
import com.madgag.git.bfg.model.{Tree, TreeSubtrees, _}
import com.madgag.git.bfg.{CleaningMapper, Memo, MemoFunc, MemoUtil}
import org.eclipse.jgit.lib.Constants._
import org.eclipse.jgit.lib._
import org.eclipse.jgit.revwalk.{RevCommit, RevTag, RevWalk}
object ObjectIdCleaner {
case class Config(protectedObjectCensus: ProtectedObjectCensus,
objectIdSubstitutor: ObjectIdSubstitutor = ObjectIdSubstitutor.OldIdsPublic,
commitNodeCleaners: Seq[CommitNodeCleaner] = Seq.empty,
treeEntryListCleaners: Seq[Cleaner[Seq[Tree.Entry]]] = Seq.empty,
treeBlobsCleaners: Seq[Cleaner[TreeBlobs]] = Seq.empty,
treeSubtreesCleaners: Seq[Cleaner[TreeSubtrees]] = Seq.empty,
// messageCleaners? - covers both Tag and Commits
objectChecker: Option[ObjectChecker] = None) {
lazy val commitNodeCleaner = CommitNodeCleaner.chain(commitNodeCleaners)
lazy val treeEntryListCleaner = Function.chain(treeEntryListCleaners)
lazy val treeBlobsCleaner = Function.chain(treeBlobsCleaners)
lazy val treeSubtreesCleaner:Cleaner[TreeSubtrees] = Function.chain(treeSubtreesCleaners)
}
}
/*
* Knows how to clean an object, and what objects are protected...
*/
class ObjectIdCleaner(config: ObjectIdCleaner.Config, objectDB: ObjectDatabase, implicit val revWalk: RevWalk) extends CleaningMapper[ObjectId] {
import config._
val threadLocalResources = objectDB.threadLocalResources
val changesByFilename = new ConcurrentMultiMap[FileName, (ObjectId, ObjectId)]
val deletionsByFilename = new ConcurrentMultiMap[FileName, ObjectId]
// want to enforce that once any value is returned, it is 'good' and therefore an identity-mapped key as well
val memo: Memo[ObjectId, ObjectId] = MemoUtil.concurrentCleanerMemo(protectedObjectCensus.fixedObjectIds)
val commitMemo: Memo[ObjectId, ObjectId] = MemoUtil.concurrentCleanerMemo()
val tagMemo: Memo[ObjectId, ObjectId] = MemoUtil.concurrentCleanerMemo()
val treeMemo: Memo[ObjectId, ObjectId] = MemoUtil.concurrentCleanerMemo(protectedObjectCensus.treeIds.toSet[ObjectId])
def apply(objectId: ObjectId): ObjectId = memoClean(objectId)
val memoClean = memo {
uncachedClean
}
def cleanedObjectMap(): Map[ObjectId, ObjectId] =
Seq(memoClean, cleanCommit, cleanTag, cleanTree).map(_.asMap()).reduce(_ ++ _)
def uncachedClean: (ObjectId) => ObjectId = {
objectId =>
threadLocalResources.reader().open(objectId).getType match {
case OBJ_COMMIT => cleanCommit(objectId)
case OBJ_TREE => cleanTree(objectId)
case OBJ_TAG => cleanTag(objectId)
case _ => objectId // we don't currently clean isolated blobs... only clean within a tree context
}
}
def getCommit(commitId: AnyObjectId): RevCommit = revWalk synchronized (commitId asRevCommit)
def getTag(tagId: AnyObjectId): RevTag = revWalk synchronized (tagId asRevTag)
val cleanCommit: MemoFunc[ObjectId, ObjectId] = commitMemo { commitId =>
val originalRevCommit = getCommit(commitId)
val originalCommit = Commit(originalRevCommit)
val cleanedArcs = originalCommit.arcs cleanWith this
val kit = new CommitNodeCleaner.Kit(threadLocalResources, originalRevCommit, originalCommit, cleanedArcs, apply)
val updatedCommitNode = commitNodeCleaner.fixer(kit)(originalCommit.node)
val updatedCommit = Commit(updatedCommitNode, cleanedArcs)
if (updatedCommit != originalCommit) {
val commitBytes = updatedCommit.toBytes
objectChecker.foreach(_.checkCommit(commitBytes))
threadLocalResources.inserter().insert(OBJ_COMMIT, commitBytes)
} else {
originalRevCommit
}
}
val cleanBlob: Cleaner[ObjectId] = identity // Currently a NO-OP, we only clean at treeblob level
val cleanTree: MemoFunc[ObjectId, ObjectId] = treeMemo { originalObjectId =>
val entries = Tree.entriesFor(originalObjectId)(threadLocalResources.reader())
val cleanedTreeEntries = treeEntryListCleaner(entries)
val tree = Tree(cleanedTreeEntries)
val fixedTreeBlobs = treeBlobsCleaner(tree.blobs)
val cleanedSubtrees = TreeSubtrees(treeSubtreesCleaner(tree.subtrees).entryMap.map {
case (name, treeId) => (name, cleanTree(treeId))
}).withoutEmptyTrees
val treeBlobsChanged = fixedTreeBlobs != tree.blobs
if (entries != cleanedTreeEntries || treeBlobsChanged || cleanedSubtrees != tree.subtrees) {
val updatedTree = tree copyWith(cleanedSubtrees, fixedTreeBlobs)
val treeFormatter = updatedTree.formatter
objectChecker.foreach(_.checkTree(treeFormatter.toByteArray))
val updatedTreeId = treeFormatter.insertTo(threadLocalResources.inserter())
if (treeBlobsChanged) {
val changedFiles: Set[TreeBlobEntry] = tree.blobs.entries.toSet -- fixedTreeBlobs.entries.toSet
for (TreeBlobEntry(filename, _ , oldId) <- changedFiles) {
fixedTreeBlobs.entryMap.get(filename).map(_._2) match {
case Some(newId) => changesByFilename.addBinding(filename, (oldId, newId))
case None => deletionsByFilename.addBinding(filename, oldId)
}
}
}
updatedTreeId
} else {
originalObjectId
}
}
case class TreeBlobChange(oldId: ObjectId, newIdOpt: Option[ObjectId], filename: FileName)
val cleanTag: MemoFunc[ObjectId, ObjectId] = tagMemo { id =>
val originalTag = getTag(id)
replacement(originalTag.getObject).map {
cleanedObj =>
val tb = new TagBuilder
tb.setTag(originalTag.getTagName)
tb.setObjectId(cleanedObj, originalTag.getObject.getType)
tb.setTagger(originalTag.getTaggerIdent)
tb.setMessage(objectIdSubstitutor.replaceOldIds(originalTag.getFullMessage, threadLocalResources.reader(), apply))
val cleanedTag: ObjectId = threadLocalResources.inserter().insert(tb)
objectChecker.foreach(_.checkTag(tb.toByteArray))
cleanedTag
}.getOrElse(originalTag)
}
lazy val protectedDirt: Seq[ProtectedObjectDirtReport] = {
protectedObjectCensus.protectorRevsByObject.map {
case (protectedRevObj, refNames) =>
val originalContentObject = treeOrBlobPointedToBy(protectedRevObj).merge
val replacementTreeOrBlob = uncachedClean.replacement(originalContentObject)
ProtectedObjectDirtReport(protectedRevObj, originalContentObject, replacementTreeOrBlob)
}.toList
}
def stats() = Map("apply"->memoClean.stats(), "tree" -> cleanTree.stats(), "commit" -> cleanCommit.stats(), "tag" -> cleanTag.stats())
}
|
NeilBryant/bfg-repo-cleaner
|
bfg-library/src/main/scala/com/madgag/git/bfg/cleaner/ObjectIdCleaner.scala
|
Scala
|
gpl-3.0
| 7,696
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.io.{File, IOException}
import java.util.UUID
import org.apache.spark.SparkConf
import org.apache.spark.executor.ExecutorExitCode
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.util.{ShutdownHookManager, Utils}
/**
* Creates and maintains the logical mapping between logical blocks and physical on-disk
* locations. One block is mapped to one file with a name given by its BlockId.
*
* Block files are hashed among the directories listed in spark.local.dir (or in
* SPARK_LOCAL_DIRS, if it's set).
*/
private[spark] class DiskBlockManager(conf: SparkConf, deleteFilesOnStop: Boolean) extends Logging {
private[spark] val subDirsPerLocalDir = conf.get(config.DISKSTORE_SUB_DIRECTORIES)
/* Create one local directory for each path mentioned in spark.local.dir; then, inside this
* directory, create multiple subdirectories that we will hash files into, in order to avoid
* having really large inodes at the top level. */
private[spark] val localDirs: Array[File] = createLocalDirs(conf)
if (localDirs.isEmpty) {
logError("Failed to create any local dir.")
System.exit(ExecutorExitCode.DISK_STORE_FAILED_TO_CREATE_DIR)
}
private[spark] val localDirsString: Array[String] = localDirs.map(_.toString)
// The content of subDirs is immutable but the content of subDirs(i) is mutable. And the content
// of subDirs(i) is protected by the lock of subDirs(i)
private val subDirs = Array.fill(localDirs.length)(new Array[File](subDirsPerLocalDir))
private val shutdownHook = addShutdownHook()
/** Looks up a file by hashing it into one of our local subdirectories. */
// This method should be kept in sync with
// org.apache.spark.network.shuffle.ExecutorDiskUtils#getFile().
def getFile(filename: String): File = {
// Figure out which local directory it hashes to, and which subdirectory in that
val hash = Utils.nonNegativeHash(filename)
val dirId = hash % localDirs.length
val subDirId = (hash / localDirs.length) % subDirsPerLocalDir
// Create the subdirectory if it doesn't already exist
val subDir = subDirs(dirId).synchronized {
val old = subDirs(dirId)(subDirId)
if (old != null) {
old
} else {
val newDir = new File(localDirs(dirId), "%02x".format(subDirId))
if (!newDir.exists() && !newDir.mkdir()) {
throw new IOException(s"Failed to create local dir in $newDir.")
}
subDirs(dirId)(subDirId) = newDir
newDir
}
}
new File(subDir, filename)
}
def getFile(blockId: BlockId): File = getFile(blockId.name)
/** Check if disk block manager has a block. */
def containsBlock(blockId: BlockId): Boolean = {
getFile(blockId.name).exists()
}
/** List all the files currently stored on disk by the disk manager. */
def getAllFiles(): Seq[File] = {
// Get all the files inside the array of array of directories
subDirs.flatMap { dir =>
dir.synchronized {
// Copy the content of dir because it may be modified in other threads
dir.clone()
}
}.filter(_ != null).flatMap { dir =>
val files = dir.listFiles()
if (files != null) files else Seq.empty
}
}
/** List all the blocks currently stored on disk by the disk manager. */
def getAllBlocks(): Seq[BlockId] = {
getAllFiles().flatMap { f =>
try {
Some(BlockId(f.getName))
} catch {
case _: UnrecognizedBlockId =>
// Skip files which do not correspond to blocks, for example temporary
// files created by [[SortShuffleWriter]].
None
}
}
}
/** Produces a unique block id and File suitable for storing local intermediate results. */
def createTempLocalBlock(): (TempLocalBlockId, File) = {
var blockId = new TempLocalBlockId(UUID.randomUUID())
while (getFile(blockId).exists()) {
blockId = new TempLocalBlockId(UUID.randomUUID())
}
(blockId, getFile(blockId))
}
/** Produces a unique block id and File suitable for storing shuffled intermediate results. */
def createTempShuffleBlock(): (TempShuffleBlockId, File) = {
var blockId = new TempShuffleBlockId(UUID.randomUUID())
while (getFile(blockId).exists()) {
blockId = new TempShuffleBlockId(UUID.randomUUID())
}
(blockId, getFile(blockId))
}
/**
* Create local directories for storing block data. These directories are
* located inside configured local directories and won't
* be deleted on JVM exit when using the external shuffle service.
*/
private def createLocalDirs(conf: SparkConf): Array[File] = {
Utils.getConfiguredLocalDirs(conf).flatMap { rootDir =>
try {
val localDir = Utils.createDirectory(rootDir, "blockmgr")
logInfo(s"Created local directory at $localDir")
Some(localDir)
} catch {
case e: IOException =>
logError(s"Failed to create local dir in $rootDir. Ignoring this directory.", e)
None
}
}
}
private def addShutdownHook(): AnyRef = {
logDebug("Adding shutdown hook") // force eager creation of logger
ShutdownHookManager.addShutdownHook(ShutdownHookManager.TEMP_DIR_SHUTDOWN_PRIORITY + 1) { () =>
logInfo("Shutdown hook called")
DiskBlockManager.this.doStop()
}
}
/** Cleanup local dirs and stop shuffle sender. */
private[spark] def stop() {
// Remove the shutdown hook. It causes memory leaks if we leave it around.
try {
ShutdownHookManager.removeShutdownHook(shutdownHook)
} catch {
case e: Exception =>
logError(s"Exception while removing shutdown hook.", e)
}
doStop()
}
private def doStop(): Unit = {
if (deleteFilesOnStop) {
localDirs.foreach { localDir =>
if (localDir.isDirectory() && localDir.exists()) {
try {
if (!ShutdownHookManager.hasRootAsShutdownDeleteDir(localDir)) {
Utils.deleteRecursively(localDir)
}
} catch {
case e: Exception =>
logError(s"Exception while deleting local spark dir: $localDir", e)
}
}
}
}
}
}
|
pgandhi999/spark
|
core/src/main/scala/org/apache/spark/storage/DiskBlockManager.scala
|
Scala
|
apache-2.0
| 7,012
|
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.body
import io.gatling.commons.util.Io._
import io.gatling.commons.validation._
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session._
import io.gatling.core.session.el.{ El, ElCompiler }
import io.gatling.core.util.Resource
import io.gatling.core.util.cache.Cache
import com.github.benmanes.caffeine.cache.LoadingCache
class ElFileBodies(implicit configuration: GatlingConfiguration) {
val charset = configuration.core.charset
private val elFileBodyStringCache: LoadingCache[String, Validation[Expression[String]]] = {
def compileFile(path: String): Validation[Expression[String]] =
Resource.body(path).map { resource =>
withCloseable(resource.inputStream) {
_.toString(charset)
}
}.map(_.el[String])
Cache.newConcurrentLoadingCache(configuration.core.elFileBodiesCacheMaxCapacity, compileFile)
}
private val elFileBodyBytesCache: LoadingCache[String, Validation[Expression[Seq[Array[Byte]]]]] = {
def resource2BytesSeq(path: String): Validation[Expression[Seq[Array[Byte]]]] =
Resource.body(path).map { resource =>
ElCompiler.compile2BytesSeq(resource.string(charset), charset)
}
Cache.newConcurrentLoadingCache(configuration.core.elFileBodiesCacheMaxCapacity, resource2BytesSeq)
}
def asString(filePath: Expression[String]): Expression[String] =
session =>
for {
path <- filePath(session)
expression <- elFileBodyStringCache.get(path)
body <- expression(session)
} yield body
def asBytesSeq(filePath: Expression[String]): Expression[Seq[Array[Byte]]] =
session =>
for {
path <- filePath(session)
expression <- elFileBodyBytesCache.get(path)
body <- expression(session)
} yield body
}
|
ryez/gatling
|
gatling-core/src/main/scala/io/gatling/core/body/ElFileBodies.scala
|
Scala
|
apache-2.0
| 2,455
|
package forimpatient.chapter09
import scala.io.Source
/**
* Created by Iryna Kharaborkina on 8/6/16.
*
* Solution to the Chapter 09 Exercise 01 'Scala for the Impatient' by Horstmann C.S.
*
* Write a Scala code snippet that reverses the lines in a file (making the last line the first one, and so on).
*/
object Exercise01 extends App {
println("Chapter 09 Exercise 01")
val path = "LICENSE"
Source.fromFile(path).getLines().toArray.reverse.foreach(println(_))
}
|
Kiryna/Scala-for-the-Impatient
|
src/forimpatient/chapter09/Exercise01.scala
|
Scala
|
apache-2.0
| 485
|
package debug
object This extends App {
trait ThisTrait {
val traitParam = 1
def traitMethod() = 1
}
class ThisClass extends ThisTrait {
val classParam = 2
def classMethod() = 2
}
object ThisObject extends ThisClass {
val objectParam = 3
def objectMethod() = 3
def testMethod(): Unit = {
def innerTestMethodWithLambda(): Unit = {
Some(1).map { e =>
val debug = 1
}
}
innerTestMethodWithLambda()
}
}
ThisObject.testMethod()
}
|
Kwestor/scala-ide
|
org.scala-ide.sdt.debug.expression.tests/test-workspace/expr-eval-this/src/debug/This.scala
|
Scala
|
bsd-3-clause
| 524
|
package ark
object Maven2Sbt extends App {
object scala {
val version = "SCALA_VERSION$"
}
val xml = <dependencies>
<dependency>
<groupId>aaa</groupId>
<artifactId>bbb</artifactId>
<version>ccc</version>
<scope>compile</scope>
<type>pom</type>
</dependency>
<dependency>
<groupId>org.scalanlp</groupId>
<artifactId>scalala_${ scala.version }</artifactId>
<version>0.3.1</version>
</dependency>
<!-- Apache Commons -->
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>1.4</version>
</dependency>
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
<version>2.4</version>
</dependency>
<dependency>
<groupId>joda-time</groupId>
<artifactId>joda-time</artifactId>
<version>1.6</version>
</dependency>
<dependency>
<groupId>joda-time</groupId>
<artifactId>joda-time-hibernate</artifactId>
<version>1.1</version>
<exclusions>
<exclusion>
<groupId>cglib</groupId>
<artifactId>cglib-full</artifactId>
</exclusion>
<exclusion>
<groupId>ehcache</groupId>
<artifactId>ehcache</artifactId>
</exclusion>
<exclusion>
<groupId>org.hibernate</groupId>
<artifactId>hibernate</artifactId>
</exclusion>
<exclusion>
<groupId>antlr</groupId>
<artifactId>antlr</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${ scala.version }</version>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-compiler</artifactId>
<version>${ scala.version }</version>
</dependency>
<dependency>
<groupId>com.googlecode.scalaz</groupId>
<artifactId>scalaz-core_${ scala.version }</artifactId>
<version>5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.scala-tools.testing</groupId>
<artifactId>specs_${ scala.version }</artifactId>
<version>1.6.5-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.7</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.scala-tools.testing</groupId>
<artifactId>test-interface</artifactId>
<version>0.5</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.scalatest</groupId>
<artifactId>scalatest</artifactId>
<version>1.0.1-for-scala-${ scala.version }-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.easymock</groupId>
<artifactId>easymock</artifactId>
<version>2.5.1</version>
</dependency>
<dependency>
<groupId>org.easymock</groupId>
<artifactId>easymockclassextension</artifactId>
<version>2.4</version>
</dependency>
<dependency>
<groupId>org.scala-tools.testing</groupId>
<artifactId>scalacheck_${ scala.version }</artifactId>
<version>1.7</version>
</dependency>
<dependency>
<groupId>org.jmock</groupId>
<artifactId>jmock</artifactId>
<version>2.5.1</version>
</dependency>
<dependency>
<groupId>org.jmock</groupId>
<artifactId>jmock-legacy</artifactId>
<version>2.5.1</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<version>1.8.4</version>
</dependency>
<dependency>
<groupId>cglib</groupId>
<artifactId>cglib</artifactId>
<version>2.1_3</version>
</dependency>
<dependency>
<groupId>org.objenesis</groupId>
<artifactId>objenesis</artifactId>
<version>1.0</version>
</dependency>
<dependency>
<groupId>net.objectlab.kit.datecalc</groupId>
<artifactId>datecalc-joda</artifactId>
<version>1.1.0</version>
</dependency>
<dependency>
<groupId>net.objectlab.kit.datecalc</groupId>
<artifactId>datecalc-common</artifactId>
<version>1.1.0</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math</artifactId>
<version>2.0</version>
</dependency>
</dependencies>
val data: Seq[(String, String, String)] = (xml \\ "dependency") map { d =>
val groupId = d \\ "groupId" text
val artifactId = d \\ "artifactId" text
val versionNum = d \\ "version" text
(groupId, artifactId, versionNum)
}
val CrossBuildArtifact = """([\\w-]+)_\\$SCALA_VERSION\\$""".r
def dep(a: String, g: String, v: String, cross: Boolean) = {
val sep = if (cross) "%%" else "%"
val ident = a.split("-").map(_.capitalize).mkString
"""val %s = "%s" %s "%s" %% "%s" """ format (ident, g, sep, a, v)
}
val m = data map {
case (g, CrossBuildArtifact(a), v) => dep(a, g, v, true)
case (g, a, v) => dep(a, g, v, false)
} mkString ("\\n")
println(m)
}
|
lucaster/ark-calc
|
src/main/scala/ark/Maven2Sbt.scala
|
Scala
|
mit
| 6,668
|
package spire
package math
import org.scalatest.FunSuite
import spire.implicits.{eqOps => _, _}
import java.math.MathContext
class ComplexTest extends FunSuite {
test("create Complex[Double]") {
val (real, imag) = (3.0, 44.0)
val c = Complex(real, imag)
assert(c.real === real)
assert(c.imag === imag)
assert(c === c)
}
test("create Complex[BigDecimal]") {
implicit val mc = MathContext.DECIMAL128
val (real, imag) = (BigDecimal(222.0), BigDecimal(3483.0))
val c = Complex(real, imag)
assert(c.real === real)
assert(c.imag === imag)
assert(c === c)
}
test("some basic equality stuff") {
val one = Complex.one[Double]
val i = Complex.i[Double]
assert(one === 1)
assert(one === 1.0)
assert(one === Complex.one[Double])
assert(1 === one)
assert(1.0 === one)
assert(Complex.one[Double] === one)
assert(1 != i)
assert(1.0 != i)
assert(one != i)
assert(i != 1)
assert(i != 1.0)
assert(i != one)
}
test("complex arithmetic") {
val i = Complex.i[Double]
val a = 4.0 + 3.0*i
val b = 1.0 + 2.0*i
val c = 2.0 + 0.0*i
assert(a + b === 5.0+5.0*i)
assert(b + c === Complex(3.0, 2.0))
assert(b + c === Complex(3.0, 2.0))
assert(a - b === Complex(3.0, 1.0))
assert(b - c === Complex(-1.0, 2.0))
assert(a - c === Complex(2.0, 3.0))
assert(a * b === Complex(-2.0, 11.0))
assert(b * c === Complex(2.0, 4.0))
assert(a * c === Complex(8.0, 6.0))
assert(a / b === Complex(2.0, -1.0))
assert(b / c === Complex(0.5, 1.0))
assert(a / c === Complex(2.0, 1.5))
}
test("test e^(i * pi) with Double") {
val e = Complex(scala.math.E, 0.0)
val pi = Complex(scala.math.Pi, 0.0)
val i = Complex.i[Double]
val one = Complex.one[Double]
val z = e.pow(i * pi) + one
assert (z.real === 0.0)
assert (z.imag < 0.000000000000001) // sigh...
assert (z.imag > -0.000000000000001)
}
test("test roots of unity") {
val one = Complex.one[Double]
val i = Complex.i[Double]
assert(Complex.rootsOfUnity[Double](2) === Array(one, -one))
assert(Complex.rootsOfUnity[Double](4) === Array(one, i, -one, -i))
val theta = 2.0 * scala.math.Pi / 3.0
val c1 = math.cos(theta) + math.sin(theta) * i
val c2 = -one - c1
assert(Complex.rootsOfUnity[Double](3) === Array(one, c1, c2))
}
test("try using FastComplex") {
val fc = FastComplex
val a = fc(3.0, -2.0)
val b = fc(2.0, 1.0)
assert(fc.add(a, b) === fc(5.0, -1.0))
assert(fc.subtract(a, b) === fc(1.0, -3.0))
assert(fc.multiply(a, b) === fc(8.0, -1.0))
val e = fc(scala.math.E, 0.0)
val pi = fc(scala.math.Pi, 0.0)
val ipi = fc.multiply(fc.i, pi)
val e_ipi = fc.pow(e, ipi)
val z = fc.add(e_ipi, fc.one)
assert(fc.real(z) == 0.0F)
assert(fc.imag(z) < 0.000000001F)
assert(fc.multiply(fc.i, fc.i) === fc(-1f, 0f))
assert(fc.imag(fc(-1f, 0f)) === 0f)
}
test("try using FloatComplex") {
val fc = FastComplex
val a = FloatComplex(3.0, -2.0)
val b = FloatComplex(2.0, 1.0)
assert(a + b === FloatComplex(5.0, -1.0))
assert(a - b === FloatComplex(1.0, -3.0))
assert(a * b === FloatComplex(8.0, -1.0))
val i = FloatComplex.i
val one = FloatComplex.one
val e = FloatComplex(scala.math.E, 0.0)
val pi = FloatComplex(scala.math.Pi, 0.0)
val z = e.pow(i * pi) + one
assert(z.real == 0.0F)
assert(z.imag < 0.000000001F)
}
test("complex trigonometry") {
// these are just a spot check to avoid sign errors
assert(Complex(3.0, 5.0).sin == Complex(10.472508533940392, -73.46062169567367))
assert(Complex(3.0, -5.0).sin == Complex(10.472508533940392, 73.46062169567367))
assert(Complex(-3.0, 5.0).sin == Complex(-10.472508533940392, -73.46062169567367))
assert(Complex(-3.0, -5.0).sin == Complex(-10.472508533940392, 73.46062169567367))
assert(Complex(3.0, 5.0).cos == Complex(-73.46729221264526, -10.471557674805572))
assert(Complex(3.0, -5.0).cos == Complex(-73.46729221264526, 10.471557674805572))
assert(Complex(-3.0, 5.0).cos == Complex(-73.46729221264526, 10.471557674805572))
assert(Complex(-3.0, -5.0).cos == Complex(-73.46729221264526, -10.471557674805572))
}
test("complex norm") {
assert(Complex(3.0, 4.0).norm == 5.0)
// check against overflow
assert(Complex((3e20).toFloat, (4e20).toFloat).norm == (5e20).toFloat)
}
}
|
tixxit/spire
|
tests/src/test/scala/spire/math/ComplexTest.scala
|
Scala
|
mit
| 4,474
|
package com.sksamuel.elastic4s.requests.indexes
import com.sksamuel.elastic4s.Indexes
case class GetIndexTemplateRequest(indexes: Indexes)
|
sksamuel/elastic4s
|
elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/indexes/GetIndexTemplateRequest.scala
|
Scala
|
apache-2.0
| 141
|
package vearth.animal
/**
* @author vitchyr
*/
import vearth.World
trait Ant extends Drawable {
protected val world: World
protected var _isAlive: Boolean = true
def isAlive = _isAlive
def update() = {
move()
}
val width = world.width
val height = world.height
override def toString() = s"Ant @ ($x, $y)"
/** Functions to implement */
def move()
}
|
vpong/vearth
|
vearth/src/main/scala/animal/Ant.scala
|
Scala
|
mit
| 378
|
package org.jetbrains.plugins.scala
package lang
package parameterInfo
package typeParameterInfo
import java.awt.Color
import java.io.File
import com.intellij.codeInsight.hint.{HintUtil, ShowParameterInfoContext}
import com.intellij.lang.parameterInfo.ParameterInfoUIContext
import com.intellij.openapi.fileEditor.{FileEditorManager, OpenFileDescriptor}
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import com.intellij.psi.PsiElement
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import _root_.scala.util.Sorting
import scala.collection.mutable.ArrayBuffer
/**
* @author Aleksander Podkhalyuzin
* @since 26.04.2009
*/
abstract class TypeParameterInfoTestBase extends ScalaLightPlatformCodeInsightTestCaseAdapter {
val caretMarker = "/*caret*/"
protected def folderPath = baseRootPath() + "parameterInfo/typeParameterInfo/"
protected def doTest() {
import _root_.junit.framework.Assert._
val filePath = folderPath + getTestName(false) + ".scala"
val file = LocalFileSystem.getInstance.findFileByPath(filePath.replace(File.separatorChar, '/'))
assert(file != null, "file " + filePath + " not found")
val fileText = StringUtil.convertLineSeparators(FileUtil.loadFile(new File(file.getCanonicalPath), CharsetToolkit.UTF8))
configureFromFileTextAdapter(getTestName(false) + ".scala", fileText)
val scalaFile = getFileAdapter.asInstanceOf[ScalaFile]
val offset = fileText.indexOf(caretMarker)
assert(offset != -1, "Not specified caret marker in test case. Use /*caret*/ in scala file for this.")
val fileEditorManager = FileEditorManager.getInstance(getProjectAdapter)
val editor = fileEditorManager.openTextEditor(new OpenFileDescriptor(getProjectAdapter, file, offset), false)
val context = new ShowParameterInfoContext(editor, getProjectAdapter, scalaFile, offset, -1)
val handler = new ScalaTypeParameterInfoHandler
val leafElement = scalaFile.findElementAt(offset)
val element = PsiTreeUtil.getParentOfType(leafElement, handler.getArgumentListClass)
handler.findElementForParameterInfo(context)
val items = new ArrayBuffer[String]
for (item <- context.getItemsToShow) {
val uiContext = new ParameterInfoUIContext {
def getDefaultParameterColor: Color = HintUtil.INFORMATION_COLOR
def setupUIComponentPresentation(text: String, highlightStartOffset: Int, highlightEndOffset: Int,
isDisabled: Boolean, strikeout: Boolean, isDisabledBeforeHighlight: Boolean,
background: Color): String = {
items.append(text)
text
}
def isUIComponentEnabled: Boolean = false
def getCurrentParameterIndex: Int = 0
def getParameterOwner: PsiElement = element
def setUIComponentEnabled(enabled: Boolean) {}
}
handler.updateUI(item, uiContext)
}
val itemsArray = items.toArray
Sorting.quickSort[String](itemsArray)
val res = new StringBuilder("")
for (item <- itemsArray) res.append(item).append("\\n")
if (res.length > 0) res.replace(res.length - 1, res.length, "")
val lastPsi = scalaFile.findElementAt(scalaFile.getText.length - 1)
val text = lastPsi.getText
val output = lastPsi.getNode.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT => text.substring(2).trim
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tDOC_COMMENT =>
text.substring(2, text.length - 2).trim
case _ => assertTrue("Test result must be in last comment statement.", false)
}
assertEquals(output, res.toString())
}
}
|
triggerNZ/intellij-scala
|
test/org/jetbrains/plugins/scala/lang/parameterInfo/typeParameterInfo/TypeParameterInfoTestBase.scala
|
Scala
|
apache-2.0
| 3,931
|
package com.szadowsz.spark.ml.feature
import com.szadowsz.spark.ml.params.HasIsInclusive
import org.apache.spark.ml.Transformer
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.HasInputCols
import org.apache.spark.ml.util.Identifiable
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Dataset}
import org.slf4j.LoggerFactory
/**
* Created on 25/01/2017.
*/
class ColFilterTransformer(override val uid: String) extends Transformer with HasInputCols with HasIsInclusive {
protected val logger = LoggerFactory.getLogger("com.szadowsz.ulster.spark")
def this() = this(Identifiable.randomUID("colSelect"))
setDefault(isInclusive,false)
override def copy(extra: ParamMap): Transformer = defaultCopy(extra)
override def transformSchema(schema: StructType): StructType = {
StructType(if ($(isInclusive)) schema.filter(f => $(inputCols).contains(f.name)) else schema.filterNot(f => $(inputCols).contains(f.name)))
}
override def transform(dataset: Dataset[_]): DataFrame = {
logger.info("Executing stage {}",uid)
transformSchema(dataset.schema)
if ($(isInclusive))
dataset.select($(inputCols).head,$(inputCols).tail:_*)
else
dataset.drop($(inputCols):_*)
}
}
|
zakski/project-cadisainmduit
|
module/spark/src/main/scala/com/szadowsz/spark/ml/feature/ColFilterTransformer.scala
|
Scala
|
apache-2.0
| 1,277
|
import scala.language.{ implicitConversions }
import scala.reflect.runtime.universe._
import scala.tools.reflect.Eval
object Test extends App {
reify {
object lazyLib {
/** Delay the evaluation of an expression until it is needed. */
def delay[A](value: => A): Susp[A] = new SuspImpl[A](value)
/** Get the value of a delayed expression. */
implicit def force[A](s: Susp[A]): A = s()
/**
* Data type of suspended computations. (The name froms from ML.)
*/
abstract class Susp[+A] extends Function0[A]
/**
* Implementation of suspended computations, separated from the
* abstract class so that the type parameter can be invariant.
*/
class SuspImpl[A](lazyValue: => A) extends Susp[A] {
private var maybeValue: Option[A] = None
override def apply() = maybeValue match {
case None =>
val value = lazyValue
maybeValue = Some(value)
value
case Some(value) =>
value
}
override def toString() = maybeValue match {
case None => "Susp(?)"
case Some(value) => "Susp(" + value + ")"
}
}
}
import lazyLib._
val s: Susp[Int] = delay { println("evaluating..."); 3 }
println("s = " + s) // show that s is unevaluated
println("s() = " + s()) // evaluate s
println("s = " + s) // show that the value is saved
println("2 + s = " + (2 + s)) // implicit call to force()
val sl = delay { Some(3) }
val sl1: Susp[Some[Int]] = sl
val sl2: Susp[Option[Int]] = sl1 // the type is covariant
println("sl2 = " + sl2)
println("sl2() = " + sl2())
println("sl2 = " + sl2)
}.eval
}
|
som-snytt/dotty
|
tests/disabled/macro/run/reify_lazyevaluation.scala
|
Scala
|
apache-2.0
| 1,770
|
package im.mange.driveby
import java.io.File
trait FluentDriver extends BrowserAware {
def assert(condition: Condition, message: String = ""): this.type = { browser.assert(condition, message); this }
def clear(by: By): this.type = { browser.clear(by); this }
def click(by: By): this.type = { browser.click(by); this }
def enter(by: By, value: String, clear: Boolean = false): this.type = { browser.enter(by, value, clear); this }
def goto(url: String): this.type = { browser.goto(url); this }
def html: this.type = { browser.html; this }
def refresh(): this.type = { browser.refresh(); this }
def screenshot(file: File): this.type = { browser.screenshot(file); this }
def select(by: By, value: String): this.type = {browser.select(by, value); this}
def enterAndTabOut(id: String, value: String, clear: Boolean = false, tabOut: Boolean): this.type = {
enter(Id(id), if (tabOut) s"$value\\t" else value, clear); this
}
}
|
alltonp/driveby
|
src/main/scala/im/mange/driveby/FluentDriver.scala
|
Scala
|
apache-2.0
| 947
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.NotSerializableException
import java.util.Properties
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.Map
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Stack}
import scala.concurrent.duration._
import scala.language.existentials
import scala.language.postfixOps
import scala.util.control.NonFatal
import org.apache.commons.lang3.SerializationUtils
import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.partial.{ApproximateActionListener, ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd.RDD
import org.apache.spark.rpc.RpcTimeout
import org.apache.spark.storage._
import org.apache.spark.util._
import org.apache.spark.storage.BlockManagerMessages.BlockManagerHeartbeat
/**
* The high-level scheduling layer that implements stage-oriented scheduling. It computes a DAG of
* stages for each job, keeps track of which RDDs and stage outputs are materialized, and finds a
* minimal schedule to run the job. It then submits stages as TaskSets to an underlying
* TaskScheduler implementation that runs them on the cluster. A TaskSet contains fully independent
* tasks that can run right away based on the data that's already on the cluster (e.g. map output
* files from previous stages), though it may fail if this data becomes unavailable.
*
* Spark stages are created by breaking the RDD graph at shuffle boundaries. RDD operations with
* "narrow" dependencies, like map() and filter(), are pipelined together into one set of tasks
* in each stage, but operations with shuffle dependencies require multiple stages (one to write a
* set of map output files, and another to read those files after a barrier). In the end, every
* stage will have only shuffle dependencies on other stages, and may compute multiple operations
* inside it. The actual pipelining of these operations happens in the RDD.compute() functions of
* various RDDs (MappedRDD, FilteredRDD, etc).
*
* In addition to coming up with a DAG of stages, the DAGScheduler also determines the preferred
* locations to run each task on, based on the current cache status, and passes these to the
* low-level TaskScheduler. Furthermore, it handles failures due to shuffle output files being
* lost, in which case old stages may need to be resubmitted. Failures *within* a stage that are
* not caused by shuffle file loss are handled by the TaskScheduler, which will retry each task
* a small number of times before cancelling the whole stage.
*
* When looking through this code, there are several key concepts:
*
* - Jobs (represented by [[ActiveJob]]) are the top-level work items submitted to the scheduler.
* For example, when the user calls an action, like count(), a job will be submitted through
* submitJob. Each Job may require the execution of multiple stages to build intermediate data.
*
* - Stages ([[Stage]]) are sets of tasks that compute intermediate results in jobs, where each
* task computes the same function on partitions of the same RDD. Stages are separated at shuffle
* boundaries, which introduce a barrier (where we must wait for the previous stage to finish to
* fetch outputs). There are two types of stages: [[ResultStage]], for the final stage that
* executes an action, and [[ShuffleMapStage]], which writes map output files for a shuffle.
* Stages are often shared across multiple jobs, if these jobs reuse the same RDDs.
*
* - Tasks are individual units of work, each sent to one machine.
*
* - Cache tracking: the DAGScheduler figures out which RDDs are cached to avoid recomputing them
* and likewise remembers which shuffle map stages have already produced output files to avoid
* redoing the map side of a shuffle.
*
* - Preferred locations: the DAGScheduler also computes where to run each task in a stage based
* on the preferred locations of its underlying RDDs, or the location of cached or shuffle data.
*
* - Cleanup: all data structures are cleared when the running jobs that depend on them finish,
* to prevent memory leaks in a long-running application.
*
* To recover from failures, the same stage might need to run multiple times, which are called
* "attempts". If the TaskScheduler reports that a task failed because a map output file from a
* previous stage was lost, the DAGScheduler resubmits that lost stage. This is detected through a
* CompletionEvent with FetchFailed, or an ExecutorLost event. The DAGScheduler will wait a small
* amount of time to see whether other nodes or tasks fail, then resubmit TaskSets for any lost
* stage(s) that compute the missing tasks. As part of this process, we might also have to create
* Stage objects for old (finished) stages where we previously cleaned up the Stage object. Since
* tasks from the old attempt of a stage could still be running, care must be taken to map any
* events received in the correct Stage object.
*
* Here's a checklist to use when making or reviewing changes to this class:
*
* - All data structures should be cleared when the jobs involving them end to avoid indefinite
* accumulation of state in long-running programs.
*
* - When adding a new data structure, update `DAGSchedulerSuite.assertDataStructuresEmpty` to
* include the new structure. This will help to catch memory leaks.
*/
private[spark]
class DAGScheduler(
private[scheduler] val sc: SparkContext,
private[scheduler] val taskScheduler: TaskScheduler,
listenerBus: LiveListenerBus,
mapOutputTracker: MapOutputTrackerMaster,
blockManagerMaster: BlockManagerMaster,
env: SparkEnv,
clock: Clock = new SystemClock())
extends Logging {
def this(sc: SparkContext, taskScheduler: TaskScheduler) = {
this(
sc,
taskScheduler,
sc.listenerBus,
sc.env.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster],
sc.env.blockManager.master,
sc.env)
}
def this(sc: SparkContext) = this(sc, sc.taskScheduler)
private[scheduler] val metricsSource: DAGSchedulerSource = new DAGSchedulerSource(this)
private[scheduler] val nextJobId = new AtomicInteger(0)
private[scheduler] def numTotalJobs: Int = nextJobId.get()
private val nextStageId = new AtomicInteger(0)
private[scheduler] val jobIdToStageIds = new HashMap[Int, HashSet[Int]]
private[scheduler] val stageIdToStage = new HashMap[Int, Stage]
private[scheduler] val shuffleToMapStage = new HashMap[Int, ShuffleMapStage]
private[scheduler] val jobIdToActiveJob = new HashMap[Int, ActiveJob]
// Stages we need to run whose parents aren't done
private[scheduler] val waitingStages = new HashSet[Stage]
// Stages we are running right now
private[scheduler] val runningStages = new HashSet[Stage]
// Stages that must be resubmitted due to fetch failures
private[scheduler] val failedStages = new HashSet[Stage]
private[scheduler] val activeJobs = new HashSet[ActiveJob]
/**
* Contains the locations that each RDD's partitions are cached on. This map's keys are RDD ids
* and its values are arrays indexed by partition numbers. Each array value is the set of
* locations where that RDD partition is cached.
*
* All accesses to this map should be guarded by synchronizing on it (see SPARK-4454).
*/
private val cacheLocs = new HashMap[Int, IndexedSeq[Seq[TaskLocation]]]
// For tracking failed nodes, we use the MapOutputTracker's epoch number, which is sent with
// every task. When we detect a node failing, we note the current epoch number and failed
// executor, increment it for new tasks, and use this to ignore stray ShuffleMapTask results.
//
// TODO: Garbage collect information about failure epochs when we know there are no more
// stray messages to detect.
private val failedEpoch = new HashMap[String, Long]
private [scheduler] val outputCommitCoordinator = env.outputCommitCoordinator
// A closure serializer that we reuse.
// This is only safe because DAGScheduler runs in a single thread.
private val closureSerializer = SparkEnv.get.closureSerializer.newInstance()
/** If enabled, FetchFailed will not cause stage retry, in order to surface the problem. */
private val disallowStageRetryForTest = sc.getConf.getBoolean("spark.test.noStageRetry", false)
private val messageScheduler =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("dag-scheduler-message")
private[scheduler] val eventProcessLoop = new DAGSchedulerEventProcessLoop(this)
taskScheduler.setDAGScheduler(this)
/**
* Called by the TaskSetManager to report task's starting.
*/
def taskStarted(task: Task[_], taskInfo: TaskInfo) {
eventProcessLoop.post(BeginEvent(task, taskInfo))
}
/**
* Called by the TaskSetManager to report that a task has completed
* and results are being fetched remotely.
*/
def taskGettingResult(taskInfo: TaskInfo) {
eventProcessLoop.post(GettingResultEvent(taskInfo))
}
/**
* Called by the TaskSetManager to report task completions or failures.
*/
def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Map[Long, Any],
taskInfo: TaskInfo,
taskMetrics: TaskMetrics): Unit = {
eventProcessLoop.post(
CompletionEvent(task, reason, result, accumUpdates, taskInfo, taskMetrics))
}
/**
* Update metrics for in-progress tasks and let the master know that the BlockManager is still
* alive. Return true if the driver knows about the given block manager. Otherwise, return false,
* indicating that the block manager should re-register.
*/
def executorHeartbeatReceived(
execId: String,
taskMetrics: Array[(Long, Int, Int, TaskMetrics)], // (taskId, stageId, stateAttempt, metrics)
blockManagerId: BlockManagerId): Boolean = {
listenerBus.post(SparkListenerExecutorMetricsUpdate(execId, taskMetrics))
blockManagerMaster.driverEndpoint.askWithRetry[Boolean](
BlockManagerHeartbeat(blockManagerId), new RpcTimeout(600 seconds, "BlockManagerHeartbeat"))
}
/**
* Called by TaskScheduler implementation when an executor fails.
*/
def executorLost(execId: String): Unit = {
eventProcessLoop.post(ExecutorLost(execId))
}
/**
* Called by TaskScheduler implementation when a host is added.
*/
def executorAdded(execId: String, host: String): Unit = {
eventProcessLoop.post(ExecutorAdded(execId, host))
}
/**
* Called by the TaskSetManager to cancel an entire TaskSet due to either repeated failures or
* cancellation of the job itself.
*/
def taskSetFailed(taskSet: TaskSet, reason: String, exception: Option[Throwable]): Unit = {
eventProcessLoop.post(TaskSetFailed(taskSet, reason, exception))
}
private[scheduler]
def getCacheLocs(rdd: RDD[_]): IndexedSeq[Seq[TaskLocation]] = cacheLocs.synchronized {
// Note: this doesn't use `getOrElse()` because this method is called O(num tasks) times
if (!cacheLocs.contains(rdd.id)) {
// Note: if the storage level is NONE, we don't need to get locations from block manager.
val locs: IndexedSeq[Seq[TaskLocation]] = if (rdd.getStorageLevel == StorageLevel.NONE) {
IndexedSeq.fill(rdd.partitions.length)(Nil)
} else {
val blockIds =
rdd.partitions.indices.map(index => RDDBlockId(rdd.id, index)).toArray[BlockId]
blockManagerMaster.getLocations(blockIds).map { bms =>
bms.map(bm => TaskLocation(bm.host, bm.executorId))
}
}
cacheLocs(rdd.id) = locs
}
cacheLocs(rdd.id)
}
private def clearCacheLocs(): Unit = cacheLocs.synchronized {
cacheLocs.clear()
}
/**
* Get or create a shuffle map stage for the given shuffle dependency's map side.
*/
private def getShuffleMapStage(
shuffleDep: ShuffleDependency[_, _, _],
firstJobId: Int): ShuffleMapStage = {
shuffleToMapStage.get(shuffleDep.shuffleId) match {
case Some(stage) => stage
case None =>
// We are going to register ancestor shuffle dependencies
getAncestorShuffleDependencies(shuffleDep.rdd).foreach { dep =>
shuffleToMapStage(dep.shuffleId) = newOrUsedShuffleStage(dep, firstJobId)
}
// Then register current shuffleDep
val stage = newOrUsedShuffleStage(shuffleDep, firstJobId)
shuffleToMapStage(shuffleDep.shuffleId) = stage
stage
}
}
/**
* Helper function to eliminate some code re-use when creating new stages.
*/
private def getParentStagesAndId(rdd: RDD[_], firstJobId: Int): (List[Stage], Int) = {
val parentStages = getParentStages(rdd, firstJobId)
val id = nextStageId.getAndIncrement()
(parentStages, id)
}
/**
* Create a ShuffleMapStage as part of the (re)-creation of a shuffle map stage in
* newOrUsedShuffleStage. The stage will be associated with the provided firstJobId.
* Production of shuffle map stages should always use newOrUsedShuffleStage, not
* newShuffleMapStage directly.
*/
private def newShuffleMapStage(
rdd: RDD[_],
numTasks: Int,
shuffleDep: ShuffleDependency[_, _, _],
firstJobId: Int,
callSite: CallSite): ShuffleMapStage = {
val (parentStages: List[Stage], id: Int) = getParentStagesAndId(rdd, firstJobId)
val stage: ShuffleMapStage = new ShuffleMapStage(id, rdd, numTasks, parentStages,
firstJobId, callSite, shuffleDep)
stageIdToStage(id) = stage
updateJobIdStageIdMaps(firstJobId, stage)
stage
}
/**
* Create a ResultStage associated with the provided jobId.
*/
private def newResultStage(
rdd: RDD[_],
func: (TaskContext, Iterator[_]) => _,
partitions: Array[Int],
jobId: Int,
callSite: CallSite): ResultStage = {
val (parentStages: List[Stage], id: Int) = getParentStagesAndId(rdd, jobId)
val stage = new ResultStage(id, rdd, func, partitions, parentStages, jobId, callSite)
stageIdToStage(id) = stage
updateJobIdStageIdMaps(jobId, stage)
stage
}
/**
* Create a shuffle map Stage for the given RDD. The stage will also be associated with the
* provided firstJobId. If a stage for the shuffleId existed previously so that the shuffleId is
* present in the MapOutputTracker, then the number and location of available outputs are
* recovered from the MapOutputTracker
*/
private def newOrUsedShuffleStage(
shuffleDep: ShuffleDependency[_, _, _],
firstJobId: Int): ShuffleMapStage = {
val rdd = shuffleDep.rdd
val numTasks = rdd.partitions.length
val stage = newShuffleMapStage(rdd, numTasks, shuffleDep, firstJobId, rdd.creationSite)
if (mapOutputTracker.containsShuffle(shuffleDep.shuffleId)) {
val serLocs = mapOutputTracker.getSerializedMapOutputStatuses(shuffleDep.shuffleId)
val locs = MapOutputTracker.deserializeMapStatuses(serLocs)
(0 until locs.length).foreach { i =>
if (locs(i) ne null) {
// locs(i) will be null if missing
stage.addOutputLoc(i, locs(i))
}
}
} else {
// Kind of ugly: need to register RDDs with the cache and map output tracker here
// since we can't do it in the RDD constructor because # of partitions is unknown
logInfo("Registering RDD " + rdd.id + " (" + rdd.getCreationSite + ")")
mapOutputTracker.registerShuffle(shuffleDep.shuffleId, rdd.partitions.length)
}
stage
}
/**
* Get or create the list of parent stages for a given RDD. The new Stages will be created with
* the provided firstJobId.
*/
private def getParentStages(rdd: RDD[_], firstJobId: Int): List[Stage] = {
val parents = new HashSet[Stage]
val visited = new HashSet[RDD[_]]
// We are manually maintaining a stack here to prevent StackOverflowError
// caused by recursively visiting
val waitingForVisit = new Stack[RDD[_]]
def visit(r: RDD[_]) {
if (!visited(r)) {
visited += r
// Kind of ugly: need to register RDDs with the cache here since
// we can't do it in its constructor because # of partitions is unknown
for (dep <- r.dependencies) {
dep match {
case shufDep: ShuffleDependency[_, _, _] =>
parents += getShuffleMapStage(shufDep, firstJobId)
case _ =>
waitingForVisit.push(dep.rdd)
}
}
}
}
waitingForVisit.push(rdd)
while (waitingForVisit.nonEmpty) {
visit(waitingForVisit.pop())
}
parents.toList
}
/** Find ancestor shuffle dependencies that are not registered in shuffleToMapStage yet */
private def getAncestorShuffleDependencies(rdd: RDD[_]): Stack[ShuffleDependency[_, _, _]] = {
val parents = new Stack[ShuffleDependency[_, _, _]]
val visited = new HashSet[RDD[_]]
// We are manually maintaining a stack here to prevent StackOverflowError
// caused by recursively visiting
val waitingForVisit = new Stack[RDD[_]]
def visit(r: RDD[_]) {
if (!visited(r)) {
visited += r
for (dep <- r.dependencies) {
dep match {
case shufDep: ShuffleDependency[_, _, _] =>
if (!shuffleToMapStage.contains(shufDep.shuffleId)) {
parents.push(shufDep)
}
case _ =>
}
waitingForVisit.push(dep.rdd)
}
}
}
waitingForVisit.push(rdd)
while (waitingForVisit.nonEmpty) {
visit(waitingForVisit.pop())
}
parents
}
private def getMissingParentStages(stage: Stage): List[Stage] = {
val missing = new HashSet[Stage]
val visited = new HashSet[RDD[_]]
// We are manually maintaining a stack here to prevent StackOverflowError
// caused by recursively visiting
val waitingForVisit = new Stack[RDD[_]]
def visit(rdd: RDD[_]) {
if (!visited(rdd)) {
visited += rdd
val rddHasUncachedPartitions = getCacheLocs(rdd).contains(Nil)
if (rddHasUncachedPartitions) {
for (dep <- rdd.dependencies) {
dep match {
case shufDep: ShuffleDependency[_, _, _] =>
val mapStage = getShuffleMapStage(shufDep, stage.firstJobId)
if (!mapStage.isAvailable) {
missing += mapStage
}
case narrowDep: NarrowDependency[_] =>
waitingForVisit.push(narrowDep.rdd)
}
}
}
}
}
waitingForVisit.push(stage.rdd)
while (waitingForVisit.nonEmpty) {
visit(waitingForVisit.pop())
}
missing.toList
}
/**
* Registers the given jobId among the jobs that need the given stage and
* all of that stage's ancestors.
*/
private def updateJobIdStageIdMaps(jobId: Int, stage: Stage): Unit = {
def updateJobIdStageIdMapsList(stages: List[Stage]) {
if (stages.nonEmpty) {
val s = stages.head
s.jobIds += jobId
jobIdToStageIds.getOrElseUpdate(jobId, new HashSet[Int]()) += s.id
val parents: List[Stage] = getParentStages(s.rdd, jobId)
val parentsWithoutThisJobId = parents.filter { ! _.jobIds.contains(jobId) }
updateJobIdStageIdMapsList(parentsWithoutThisJobId ++ stages.tail)
}
}
updateJobIdStageIdMapsList(List(stage))
}
/**
* Removes state for job and any stages that are not needed by any other job. Does not
* handle cancelling tasks or notifying the SparkListener about finished jobs/stages/tasks.
*
* @param job The job whose state to cleanup.
*/
private def cleanupStateForJobAndIndependentStages(job: ActiveJob): Unit = {
val registeredStages = jobIdToStageIds.get(job.jobId)
if (registeredStages.isEmpty || registeredStages.get.isEmpty) {
logError("No stages registered for job " + job.jobId)
} else {
stageIdToStage.filterKeys(stageId => registeredStages.get.contains(stageId)).foreach {
case (stageId, stage) =>
val jobSet = stage.jobIds
if (!jobSet.contains(job.jobId)) {
logError(
"Job %d not registered for stage %d even though that stage was registered for the job"
.format(job.jobId, stageId))
} else {
def removeStage(stageId: Int) {
// data structures based on Stage
for (stage <- stageIdToStage.get(stageId)) {
if (runningStages.contains(stage)) {
logDebug("Removing running stage %d".format(stageId))
runningStages -= stage
}
for ((k, v) <- shuffleToMapStage.find(_._2 == stage)) {
shuffleToMapStage.remove(k)
}
if (waitingStages.contains(stage)) {
logDebug("Removing stage %d from waiting set.".format(stageId))
waitingStages -= stage
}
if (failedStages.contains(stage)) {
logDebug("Removing stage %d from failed set.".format(stageId))
failedStages -= stage
}
}
// data structures based on StageId
stageIdToStage -= stageId
logDebug("After removal of stage %d, remaining stages = %d"
.format(stageId, stageIdToStage.size))
}
jobSet -= job.jobId
if (jobSet.isEmpty) { // no other job needs this stage
removeStage(stageId)
}
}
}
}
jobIdToStageIds -= job.jobId
jobIdToActiveJob -= job.jobId
activeJobs -= job
job.finalStage match {
case r: ResultStage =>
r.resultOfJob = None
case m: ShuffleMapStage =>
m.mapStageJobs = m.mapStageJobs.filter(_ != job)
}
}
/**
* Submit an action job to the scheduler and get a JobWaiter object back. The JobWaiter object
* can be used to block until the the job finishes executing or can be used to cancel the job.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like first()
* @param callSite where in the user program this job was called
* @param resultHandler callback to pass each result to
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
*/
def submitJob[T, U](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
callSite: CallSite,
resultHandler: (Int, U) => Unit,
properties: Properties): JobWaiter[U] = {
// Check to make sure we are not launching a task on a partition that does not exist.
val maxPartitions = rdd.partitions.length
partitions.find(p => p >= maxPartitions || p < 0).foreach { p =>
throw new IllegalArgumentException(
"Attempting to access a non-existent partition: " + p + ". " +
"Total number of partitions: " + maxPartitions)
}
val jobId = nextJobId.getAndIncrement()
if (partitions.size == 0) {
// Return immediately if the job is running 0 tasks
return new JobWaiter[U](this, jobId, 0, resultHandler)
}
assert(partitions.size > 0)
val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _]
val waiter = new JobWaiter(this, jobId, partitions.size, resultHandler)
eventProcessLoop.post(JobSubmitted(
jobId, rdd, func2, partitions.toArray, callSite, waiter,
SerializationUtils.clone(properties)))
waiter
}
/**
* Run an action job on the given RDD and pass all the results to the resultHandler function as
* they arrive. Throws an exception if the job fials, or returns normally if successful.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like first()
* @param callSite where in the user program this job was called
* @param resultHandler callback to pass each result to
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
*/
def runJob[T, U](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
callSite: CallSite,
resultHandler: (Int, U) => Unit,
properties: Properties): Unit = {
val start = System.nanoTime
val waiter = submitJob(rdd, func, partitions, callSite, resultHandler, properties)
waiter.awaitResult() match {
case JobSucceeded =>
logInfo("Job %d finished: %s, took %f s".format
(waiter.jobId, callSite.shortForm, (System.nanoTime - start) / 1e9))
case JobFailed(exception: Exception) =>
logInfo("Job %d failed: %s, took %f s".format
(waiter.jobId, callSite.shortForm, (System.nanoTime - start) / 1e9))
// SPARK-8644: Include user stack trace in exceptions coming from DAGScheduler.
val callerStackTrace = Thread.currentThread().getStackTrace.tail
exception.setStackTrace(exception.getStackTrace ++ callerStackTrace)
throw exception
}
}
/**
* Run an approximate job on the given RDD and pass all the results to an ApproximateEvaluator
* as they arrive. Returns a partial result object from the evaluator.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param evaluator [[ApproximateEvaluator]] to receive the partial results
* @param callSite where in the user program this job was called
* @param timeout maximum time to wait for the job, in milliseconds
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
*/
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
callSite: CallSite,
timeout: Long,
properties: Properties): PartialResult[R] = {
val listener = new ApproximateActionListener(rdd, func, evaluator, timeout)
val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _]
val partitions = (0 until rdd.partitions.length).toArray
val jobId = nextJobId.getAndIncrement()
eventProcessLoop.post(JobSubmitted(
jobId, rdd, func2, partitions, callSite, listener, SerializationUtils.clone(properties)))
listener.awaitResult() // Will throw an exception if the job fails
}
/**
* Submit a shuffle map stage to run independently and get a JobWaiter object back. The waiter
* can be used to block until the the job finishes executing or can be used to cancel the job.
* This method is used for adaptive query planning, to run map stages and look at statistics
* about their outputs before submitting downstream stages.
*
* @param dependency the ShuffleDependency to run a map stage for
* @param callback function called with the result of the job, which in this case will be a
* single MapOutputStatistics object showing how much data was produced for each partition
* @param callSite where in the user program this job was submitted
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
*/
def submitMapStage[K, V, C](
dependency: ShuffleDependency[K, V, C],
callback: MapOutputStatistics => Unit,
callSite: CallSite,
properties: Properties): JobWaiter[MapOutputStatistics] = {
val rdd = dependency.rdd
val jobId = nextJobId.getAndIncrement()
if (rdd.partitions.length == 0) {
throw new SparkException("Can't run submitMapStage on RDD with 0 partitions")
}
// We create a JobWaiter with only one "task", which will be marked as complete when the whole
// map stage has completed, and will be passed the MapOutputStatistics for that stage.
// This makes it easier to avoid race conditions between the user code and the map output
// tracker that might result if we told the user the stage had finished, but then they queries
// the map output tracker and some node failures had caused the output statistics to be lost.
val waiter = new JobWaiter(this, jobId, 1, (i: Int, r: MapOutputStatistics) => callback(r))
eventProcessLoop.post(MapStageSubmitted(
jobId, dependency, callSite, waiter, SerializationUtils.clone(properties)))
waiter
}
/**
* Cancel a job that is running or waiting in the queue.
*/
def cancelJob(jobId: Int): Unit = {
logInfo("Asked to cancel job " + jobId)
eventProcessLoop.post(JobCancelled(jobId))
}
/**
* Cancel all jobs in the given job group ID.
*/
def cancelJobGroup(groupId: String): Unit = {
logInfo("Asked to cancel job group " + groupId)
eventProcessLoop.post(JobGroupCancelled(groupId))
}
/**
* Cancel all jobs that are running or waiting in the queue.
*/
def cancelAllJobs(): Unit = {
eventProcessLoop.post(AllJobsCancelled)
}
private[scheduler] def doCancelAllJobs() {
// Cancel all running jobs.
runningStages.map(_.firstJobId).foreach(handleJobCancellation(_,
reason = "as part of cancellation of all jobs"))
activeJobs.clear() // These should already be empty by this point,
jobIdToActiveJob.clear() // but just in case we lost track of some jobs...
submitWaitingStages()
}
/**
* Cancel all jobs associated with a running or scheduled stage.
*/
def cancelStage(stageId: Int) {
eventProcessLoop.post(StageCancelled(stageId))
}
/**
* Resubmit any failed stages. Ordinarily called after a small amount of time has passed since
* the last fetch failure.
*/
private[scheduler] def resubmitFailedStages() {
if (failedStages.size > 0) {
// Failed stages may be removed by job cancellation, so failed might be empty even if
// the ResubmitFailedStages event has been scheduled.
logInfo("Resubmitting failed stages")
clearCacheLocs()
val failedStagesCopy = failedStages.toArray
failedStages.clear()
for (stage <- failedStagesCopy.sortBy(_.firstJobId)) {
submitStage(stage)
}
}
submitWaitingStages()
}
/**
* Check for waiting or failed stages which are now eligible for resubmission.
* Ordinarily run on every iteration of the event loop.
*/
private def submitWaitingStages() {
// TODO: We might want to run this less often, when we are sure that something has become
// runnable that wasn't before.
logTrace("Checking for newly runnable parent stages")
logTrace("running: " + runningStages)
logTrace("waiting: " + waitingStages)
logTrace("failed: " + failedStages)
val waitingStagesCopy = waitingStages.toArray
waitingStages.clear()
for (stage <- waitingStagesCopy.sortBy(_.firstJobId)) {
submitStage(stage)
}
}
/** Finds the earliest-created active job that needs the stage */
// TODO: Probably should actually find among the active jobs that need this
// stage the one with the highest priority (highest-priority pool, earliest created).
// That should take care of at least part of the priority inversion problem with
// cross-job dependencies.
private def activeJobForStage(stage: Stage): Option[Int] = {
val jobsThatUseStage: Array[Int] = stage.jobIds.toArray.sorted
jobsThatUseStage.find(jobIdToActiveJob.contains)
}
private[scheduler] def handleJobGroupCancelled(groupId: String) {
// Cancel all jobs belonging to this job group.
// First finds all active jobs with this group id, and then kill stages for them.
val activeInGroup = activeJobs.filter { activeJob =>
Option(activeJob.properties).exists {
_.getProperty(SparkContext.SPARK_JOB_GROUP_ID) == groupId
}
}
val jobIds = activeInGroup.map(_.jobId)
jobIds.foreach(handleJobCancellation(_, "part of cancelled job group %s".format(groupId)))
submitWaitingStages()
}
private[scheduler] def handleBeginEvent(task: Task[_], taskInfo: TaskInfo) {
// Note that there is a chance that this task is launched after the stage is cancelled.
// In that case, we wouldn't have the stage anymore in stageIdToStage.
val stageAttemptId = stageIdToStage.get(task.stageId).map(_.latestInfo.attemptId).getOrElse(-1)
listenerBus.post(SparkListenerTaskStart(task.stageId, stageAttemptId, taskInfo))
submitWaitingStages()
}
private[scheduler] def handleTaskSetFailed(
taskSet: TaskSet,
reason: String,
exception: Option[Throwable]): Unit = {
stageIdToStage.get(taskSet.stageId).foreach { abortStage(_, reason, exception) }
submitWaitingStages()
}
private[scheduler] def cleanUpAfterSchedulerStop() {
for (job <- activeJobs) {
val error = new SparkException("Job cancelled because SparkContext was shut down")
job.listener.jobFailed(error)
// Tell the listeners that all of the running stages have ended. Don't bother
// cancelling the stages because if the DAG scheduler is stopped, the entire application
// is in the process of getting stopped.
val stageFailedMessage = "Stage cancelled because SparkContext was shut down"
// The `toArray` here is necessary so that we don't iterate over `runningStages` while
// mutating it.
runningStages.toArray.foreach { stage =>
markStageAsFinished(stage, Some(stageFailedMessage))
}
listenerBus.post(SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobFailed(error)))
}
}
private[scheduler] def handleGetTaskResult(taskInfo: TaskInfo) {
listenerBus.post(SparkListenerTaskGettingResult(taskInfo))
submitWaitingStages()
}
private[scheduler] def handleJobSubmitted(jobId: Int,
finalRDD: RDD[_],
func: (TaskContext, Iterator[_]) => _,
partitions: Array[Int],
callSite: CallSite,
listener: JobListener,
properties: Properties) {
var finalStage: ResultStage = null
try {
// New stage creation may throw an exception if, for example, jobs are run on a
// HadoopRDD whose underlying HDFS files have been deleted.
finalStage = newResultStage(finalRDD, func, partitions, jobId, callSite)
} catch {
case e: Exception =>
logWarning("Creating new stage failed due to exception - job: " + jobId, e)
listener.jobFailed(e)
return
}
val job = new ActiveJob(jobId, finalStage, callSite, listener, properties)
clearCacheLocs()
logInfo("Got job %s (%s) with %d output partitions".format(
job.jobId, callSite.shortForm, partitions.length))
logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")")
logInfo("Parents of final stage: " + finalStage.parents)
logInfo("Missing parents: " + getMissingParentStages(finalStage))
val jobSubmissionTime = clock.getTimeMillis()
jobIdToActiveJob(jobId) = job
activeJobs += job
finalStage.resultOfJob = Some(job)
val stageIds = jobIdToStageIds(jobId).toArray
val stageInfos = stageIds.flatMap(id => stageIdToStage.get(id).map(_.latestInfo))
listenerBus.post(
SparkListenerJobStart(job.jobId, jobSubmissionTime, stageInfos, properties))
submitStage(finalStage)
submitWaitingStages()
}
private[scheduler] def handleMapStageSubmitted(jobId: Int,
dependency: ShuffleDependency[_, _, _],
callSite: CallSite,
listener: JobListener,
properties: Properties) {
// Submitting this map stage might still require the creation of some parent stages, so make
// sure that happens.
var finalStage: ShuffleMapStage = null
try {
// New stage creation may throw an exception if, for example, jobs are run on a
// HadoopRDD whose underlying HDFS files have been deleted.
finalStage = getShuffleMapStage(dependency, jobId)
} catch {
case e: Exception =>
logWarning("Creating new stage failed due to exception - job: " + jobId, e)
listener.jobFailed(e)
return
}
val job = new ActiveJob(jobId, finalStage, callSite, listener, properties)
clearCacheLocs()
logInfo("Got map stage job %s (%s) with %d output partitions".format(
jobId, callSite.shortForm, dependency.rdd.partitions.size))
logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")")
logInfo("Parents of final stage: " + finalStage.parents)
logInfo("Missing parents: " + getMissingParentStages(finalStage))
val jobSubmissionTime = clock.getTimeMillis()
jobIdToActiveJob(jobId) = job
activeJobs += job
finalStage.mapStageJobs = job :: finalStage.mapStageJobs
val stageIds = jobIdToStageIds(jobId).toArray
val stageInfos = stageIds.flatMap(id => stageIdToStage.get(id).map(_.latestInfo))
listenerBus.post(
SparkListenerJobStart(job.jobId, jobSubmissionTime, stageInfos, properties))
submitStage(finalStage)
// If the whole stage has already finished, tell the listener and remove it
if (finalStage.isAvailable) {
markMapStageJobAsFinished(job, mapOutputTracker.getStatistics(dependency))
}
submitWaitingStages()
}
/** Submits stage, but first recursively submits any missing parents. */
private def submitStage(stage: Stage) {
val jobId = activeJobForStage(stage)
if (jobId.isDefined) {
logDebug("submitStage(" + stage + ")")
if (!waitingStages(stage) && !runningStages(stage) && !failedStages(stage)) {
val missing = getMissingParentStages(stage).sortBy(_.id)
logDebug("missing: " + missing)
if (missing.isEmpty) {
logInfo("Submitting " + stage + " (" + stage.rdd + "), which has no missing parents")
submitMissingTasks(stage, jobId.get)
} else {
for (parent <- missing) {
submitStage(parent)
}
waitingStages += stage
}
}
} else {
abortStage(stage, "No active job for stage " + stage.id, None)
}
}
/** Called when stage's parents are available and we can now do its task. */
private def submitMissingTasks(stage: Stage, jobId: Int) {
logDebug("submitMissingTasks(" + stage + ")")
// Get our pending tasks and remember them in our pendingTasks entry
stage.pendingPartitions.clear()
// First figure out the indexes of partition ids to compute.
val partitionsToCompute: Seq[Int] = stage.findMissingPartitions()
// Create internal accumulators if the stage has no accumulators initialized.
// Reset internal accumulators only if this stage is not partially submitted
// Otherwise, we may override existing accumulator values from some tasks
if (stage.internalAccumulators.isEmpty || stage.numPartitions == partitionsToCompute.size) {
stage.resetInternalAccumulators()
}
val properties = jobIdToActiveJob.get(stage.firstJobId).map(_.properties).orNull
runningStages += stage
// SparkListenerStageSubmitted should be posted before testing whether tasks are
// serializable. If tasks are not serializable, a SparkListenerStageCompleted event
// will be posted, which should always come after a corresponding SparkListenerStageSubmitted
// event.
outputCommitCoordinator.stageStart(stage.id)
val taskIdToLocations = try {
stage match {
case s: ShuffleMapStage =>
partitionsToCompute.map { id => (id, getPreferredLocs(stage.rdd, id))}.toMap
case s: ResultStage =>
val job = s.resultOfJob.get
partitionsToCompute.map { id =>
val p = s.partitions(id)
(id, getPreferredLocs(stage.rdd, p))
}.toMap
}
} catch {
case NonFatal(e) =>
stage.makeNewStageAttempt(partitionsToCompute.size)
listenerBus.post(SparkListenerStageSubmitted(stage.latestInfo, properties))
abortStage(stage, s"Task creation failed: $e\n${e.getStackTraceString}", Some(e))
runningStages -= stage
return
}
stage.makeNewStageAttempt(partitionsToCompute.size, taskIdToLocations.values.toSeq)
listenerBus.post(SparkListenerStageSubmitted(stage.latestInfo, properties))
// TODO: Maybe we can keep the taskBinary in Stage to avoid serializing it multiple times.
// Broadcasted binary for the task, used to dispatch tasks to executors. Note that we broadcast
// the serialized copy of the RDD and for each task we will deserialize it, which means each
// task gets a different copy of the RDD. This provides stronger isolation between tasks that
// might modify state of objects referenced in their closures. This is necessary in Hadoop
// where the JobConf/Configuration object is not thread-safe.
var taskBinary: Broadcast[Array[Byte]] = null
try {
// For ShuffleMapTask, serialize and broadcast (rdd, shuffleDep).
// For ResultTask, serialize and broadcast (rdd, func).
val taskBinaryBytes: Array[Byte] = stage match {
case stage: ShuffleMapStage =>
closureSerializer.serialize((stage.rdd, stage.shuffleDep): AnyRef).array()
case stage: ResultStage =>
closureSerializer.serialize((stage.rdd, stage.func): AnyRef).array()
}
taskBinary = sc.broadcast(taskBinaryBytes)
} catch {
// In the case of a failure during serialization, abort the stage.
case e: NotSerializableException =>
abortStage(stage, "Task not serializable: " + e.toString, Some(e))
runningStages -= stage
// Abort execution
return
case NonFatal(e) =>
abortStage(stage, s"Task serialization failed: $e\n${e.getStackTraceString}", Some(e))
runningStages -= stage
return
}
val tasks: Seq[Task[_]] = try {
stage match {
case stage: ShuffleMapStage =>
partitionsToCompute.map { id =>
val locs = taskIdToLocations(id)
val part = stage.rdd.partitions(id)
new ShuffleMapTask(stage.id, stage.latestInfo.attemptId,
taskBinary, part, locs, stage.internalAccumulators)
}
case stage: ResultStage =>
val job = stage.resultOfJob.get
partitionsToCompute.map { id =>
val p: Int = stage.partitions(id)
val part = stage.rdd.partitions(p)
val locs = taskIdToLocations(id)
new ResultTask(stage.id, stage.latestInfo.attemptId,
taskBinary, part, locs, id, stage.internalAccumulators)
}
}
} catch {
case NonFatal(e) =>
abortStage(stage, s"Task creation failed: $e\n${e.getStackTraceString}", Some(e))
runningStages -= stage
return
}
if (tasks.size > 0) {
logInfo("Submitting " + tasks.size + " missing tasks from " + stage + " (" + stage.rdd + ")")
stage.pendingPartitions ++= tasks.map(_.partitionId)
logDebug("New pending partitions: " + stage.pendingPartitions)
taskScheduler.submitTasks(new TaskSet(
tasks.toArray, stage.id, stage.latestInfo.attemptId, stage.firstJobId, properties))
stage.latestInfo.submissionTime = Some(clock.getTimeMillis())
} else {
// Because we posted SparkListenerStageSubmitted earlier, we should mark
// the stage as completed here in case there are no tasks to run
markStageAsFinished(stage, None)
val debugString = stage match {
case stage: ShuffleMapStage =>
s"Stage ${stage} is actually done; " +
s"(available: ${stage.isAvailable}," +
s"available outputs: ${stage.numAvailableOutputs}," +
s"partitions: ${stage.numPartitions})"
case stage : ResultStage =>
s"Stage ${stage} is actually done; (partitions: ${stage.numPartitions})"
}
logDebug(debugString)
}
}
/** Merge updates from a task to our local accumulator values */
private def updateAccumulators(event: CompletionEvent): Unit = {
val task = event.task
val stage = stageIdToStage(task.stageId)
if (event.accumUpdates != null) {
try {
Accumulators.add(event.accumUpdates)
event.accumUpdates.foreach { case (id, partialValue) =>
// In this instance, although the reference in Accumulators.originals is a WeakRef,
// it's guaranteed to exist since the event.accumUpdates Map exists
val acc = Accumulators.originals(id).get match {
case Some(accum) => accum.asInstanceOf[Accumulable[Any, Any]]
case None => throw new NullPointerException("Non-existent reference to Accumulator")
}
// To avoid UI cruft, ignore cases where value wasn't updated
if (acc.name.isDefined && partialValue != acc.zero) {
val name = acc.name.get
val value = s"${acc.value}"
stage.latestInfo.accumulables(id) =
new AccumulableInfo(id, name, None, value, acc.isInternal)
event.taskInfo.accumulables +=
new AccumulableInfo(id, name, Some(s"$partialValue"), value, acc.isInternal)
}
}
} catch {
// If we see an exception during accumulator update, just log the
// error and move on.
case e: Exception =>
logError(s"Failed to update accumulators for $task", e)
}
}
}
/**
* Responds to a task finishing. This is called inside the event loop so it assumes that it can
* modify the scheduler's internal state. Use taskEnded() to post a task end event from outside.
*/
private[scheduler] def handleTaskCompletion(event: CompletionEvent) {
val task = event.task
val stageId = task.stageId
val taskType = Utils.getFormattedClassName(task)
outputCommitCoordinator.taskCompleted(
stageId,
task.partitionId,
event.taskInfo.attemptNumber, // this is a task attempt number
event.reason)
// The success case is dealt with separately below, since we need to compute accumulator
// updates before posting.
if (event.reason != Success) {
val attemptId = task.stageAttemptId
listenerBus.post(SparkListenerTaskEnd(stageId, attemptId, taskType, event.reason,
event.taskInfo, event.taskMetrics))
}
if (!stageIdToStage.contains(task.stageId)) {
// Skip all the actions if the stage has been cancelled.
return
}
val stage = stageIdToStage(task.stageId)
event.reason match {
case Success =>
listenerBus.post(SparkListenerTaskEnd(stageId, stage.latestInfo.attemptId, taskType,
event.reason, event.taskInfo, event.taskMetrics))
stage.pendingPartitions -= task.partitionId
task match {
case rt: ResultTask[_, _] =>
// Cast to ResultStage here because it's part of the ResultTask
// TODO Refactor this out to a function that accepts a ResultStage
val resultStage = stage.asInstanceOf[ResultStage]
resultStage.resultOfJob match {
case Some(job) =>
if (!job.finished(rt.outputId)) {
updateAccumulators(event)
job.finished(rt.outputId) = true
job.numFinished += 1
// If the whole job has finished, remove it
if (job.numFinished == job.numPartitions) {
markStageAsFinished(resultStage)
cleanupStateForJobAndIndependentStages(job)
listenerBus.post(
SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobSucceeded))
}
// taskSucceeded runs some user code that might throw an exception. Make sure
// we are resilient against that.
try {
job.listener.taskSucceeded(rt.outputId, event.result)
} catch {
case e: Exception =>
// TODO: Perhaps we want to mark the resultStage as failed?
job.listener.jobFailed(new SparkDriverExecutionException(e))
}
}
case None =>
logInfo("Ignoring result from " + rt + " because its job has finished")
}
case smt: ShuffleMapTask =>
val shuffleStage = stage.asInstanceOf[ShuffleMapStage]
updateAccumulators(event)
val status = event.result.asInstanceOf[MapStatus]
val execId = status.location.executorId
logDebug("ShuffleMapTask finished on " + execId)
if (failedEpoch.contains(execId) && smt.epoch <= failedEpoch(execId)) {
logInfo(s"Ignoring possibly bogus $smt completion from executor $execId")
} else {
shuffleStage.addOutputLoc(smt.partitionId, status)
}
if (runningStages.contains(shuffleStage) && shuffleStage.pendingPartitions.isEmpty) {
markStageAsFinished(shuffleStage)
logInfo("looking for newly runnable stages")
logInfo("running: " + runningStages)
logInfo("waiting: " + waitingStages)
logInfo("failed: " + failedStages)
// We supply true to increment the epoch number here in case this is a
// recomputation of the map outputs. In that case, some nodes may have cached
// locations with holes (from when we detected the error) and will need the
// epoch incremented to refetch them.
// TODO: Only increment the epoch number if this is not the first time
// we registered these map outputs.
mapOutputTracker.registerMapOutputs(
shuffleStage.shuffleDep.shuffleId,
shuffleStage.outputLocs.map(_.headOption.orNull),
changeEpoch = true)
clearCacheLocs()
if (!shuffleStage.isAvailable) {
// Some tasks had failed; let's resubmit this shuffleStage
// TODO: Lower-level scheduler should also deal with this
logInfo("Resubmitting " + shuffleStage + " (" + shuffleStage.name +
") because some of its tasks had failed: " +
shuffleStage.outputLocs.zipWithIndex.filter(_._1.isEmpty)
.map(_._2).mkString(", "))
submitStage(shuffleStage)
} else {
// Mark any map-stage jobs waiting on this stage as finished
if (shuffleStage.mapStageJobs.nonEmpty) {
val stats = mapOutputTracker.getStatistics(shuffleStage.shuffleDep)
for (job <- shuffleStage.mapStageJobs) {
markMapStageJobAsFinished(job, stats)
}
}
}
// Note: newly runnable stages will be submitted below when we submit waiting stages
}
}
case Resubmitted =>
logInfo("Resubmitted " + task + ", so marking it as still running")
stage.pendingPartitions += task.partitionId
case FetchFailed(bmAddress, shuffleId, mapId, reduceId, failureMessage) =>
val failedStage = stageIdToStage(task.stageId)
val mapStage = shuffleToMapStage(shuffleId)
if (failedStage.latestInfo.attemptId != task.stageAttemptId) {
logInfo(s"Ignoring fetch failure from $task as it's from $failedStage attempt" +
s" ${task.stageAttemptId} and there is a more recent attempt for that stage " +
s"(attempt ID ${failedStage.latestInfo.attemptId}) running")
} else {
// It is likely that we receive multiple FetchFailed for a single stage (because we have
// multiple tasks running concurrently on different executors). In that case, it is
// possible the fetch failure has already been handled by the scheduler.
if (runningStages.contains(failedStage)) {
logInfo(s"Marking $failedStage (${failedStage.name}) as failed " +
s"due to a fetch failure from $mapStage (${mapStage.name})")
markStageAsFinished(failedStage, Some(failureMessage))
} else {
logDebug(s"Received fetch failure from $task, but its from $failedStage which is no " +
s"longer running")
}
if (disallowStageRetryForTest) {
abortStage(failedStage, "Fetch failure will not retry stage due to testing config",
None)
} else if (failedStage.failedOnFetchAndShouldAbort(task.stageAttemptId)) {
abortStage(failedStage, s"$failedStage (${failedStage.name}) " +
s"has failed the maximum allowable number of " +
s"times: ${Stage.MAX_CONSECUTIVE_FETCH_FAILURES}. " +
s"Most recent failure reason: ${failureMessage}", None)
} else if (failedStages.isEmpty) {
// Don't schedule an event to resubmit failed stages if failed isn't empty, because
// in that case the event will already have been scheduled.
// TODO: Cancel running tasks in the stage
logInfo(s"Resubmitting $mapStage (${mapStage.name}) and " +
s"$failedStage (${failedStage.name}) due to fetch failure")
messageScheduler.schedule(new Runnable {
override def run(): Unit = eventProcessLoop.post(ResubmitFailedStages)
}, DAGScheduler.RESUBMIT_TIMEOUT, TimeUnit.MILLISECONDS)
}
failedStages += failedStage
failedStages += mapStage
// Mark the map whose fetch failed as broken in the map stage
if (mapId != -1) {
mapStage.removeOutputLoc(mapId, bmAddress)
mapOutputTracker.unregisterMapOutput(shuffleId, mapId, bmAddress)
}
// TODO: mark the executor as failed only if there were lots of fetch failures on it
if (bmAddress != null) {
handleExecutorLost(bmAddress.executorId, fetchFailed = true, Some(task.epoch))
}
}
case commitDenied: TaskCommitDenied =>
// Do nothing here, left up to the TaskScheduler to decide how to handle denied commits
case exceptionFailure: ExceptionFailure =>
// Do nothing here, left up to the TaskScheduler to decide how to handle user failures
case TaskResultLost =>
// Do nothing here; the TaskScheduler handles these failures and resubmits the task.
case other =>
// Unrecognized failure - also do nothing. If the task fails repeatedly, the TaskScheduler
// will abort the job.
}
submitWaitingStages()
}
/**
* Responds to an executor being lost. This is called inside the event loop, so it assumes it can
* modify the scheduler's internal state. Use executorLost() to post a loss event from outside.
*
* We will also assume that we've lost all shuffle blocks associated with the executor if the
* executor serves its own blocks (i.e., we're not using external shuffle) OR a FetchFailed
* occurred, in which case we presume all shuffle data related to this executor to be lost.
*
* Optionally the epoch during which the failure was caught can be passed to avoid allowing
* stray fetch failures from possibly retriggering the detection of a node as lost.
*/
private[scheduler] def handleExecutorLost(
execId: String,
fetchFailed: Boolean,
maybeEpoch: Option[Long] = None) {
val currentEpoch = maybeEpoch.getOrElse(mapOutputTracker.getEpoch)
if (!failedEpoch.contains(execId) || failedEpoch(execId) < currentEpoch) {
failedEpoch(execId) = currentEpoch
logInfo("Executor lost: %s (epoch %d)".format(execId, currentEpoch))
blockManagerMaster.removeExecutor(execId)
if (!env.blockManager.externalShuffleServiceEnabled || fetchFailed) {
// TODO: This will be really slow if we keep accumulating shuffle map stages
for ((shuffleId, stage) <- shuffleToMapStage) {
stage.removeOutputsOnExecutor(execId)
val locs = stage.outputLocs.map(_.headOption.orNull)
mapOutputTracker.registerMapOutputs(shuffleId, locs, changeEpoch = true)
}
if (shuffleToMapStage.isEmpty) {
mapOutputTracker.incrementEpoch()
}
clearCacheLocs()
}
} else {
logDebug("Additional executor lost message for " + execId +
"(epoch " + currentEpoch + ")")
}
submitWaitingStages()
}
private[scheduler] def handleExecutorAdded(execId: String, host: String) {
// remove from failedEpoch(execId) ?
if (failedEpoch.contains(execId)) {
logInfo("Host added was in lost list earlier: " + host)
failedEpoch -= execId
}
submitWaitingStages()
}
private[scheduler] def handleStageCancellation(stageId: Int) {
stageIdToStage.get(stageId) match {
case Some(stage) =>
val jobsThatUseStage: Array[Int] = stage.jobIds.toArray
jobsThatUseStage.foreach { jobId =>
handleJobCancellation(jobId, s"because Stage $stageId was cancelled")
}
case None =>
logInfo("No active jobs to kill for Stage " + stageId)
}
submitWaitingStages()
}
private[scheduler] def handleJobCancellation(jobId: Int, reason: String = "") {
if (!jobIdToStageIds.contains(jobId)) {
logDebug("Trying to cancel unregistered job " + jobId)
} else {
failJobAndIndependentStages(
jobIdToActiveJob(jobId), "Job %d cancelled %s".format(jobId, reason))
}
submitWaitingStages()
}
/**
* Marks a stage as finished and removes it from the list of running stages.
*/
private def markStageAsFinished(stage: Stage, errorMessage: Option[String] = None): Unit = {
val serviceTime = stage.latestInfo.submissionTime match {
case Some(t) => "%.03f".format((clock.getTimeMillis() - t) / 1000.0)
case _ => "Unknown"
}
if (errorMessage.isEmpty) {
logInfo("%s (%s) finished in %s s".format(stage, stage.name, serviceTime))
stage.latestInfo.completionTime = Some(clock.getTimeMillis())
// Clear failure count for this stage, now that it's succeeded.
// We only limit consecutive failures of stage attempts,so that if a stage is
// re-used many times in a long-running job, unrelated failures don't eventually cause the
// stage to be aborted.
stage.clearFailures()
} else {
stage.latestInfo.stageFailed(errorMessage.get)
logInfo("%s (%s) failed in %s s".format(stage, stage.name, serviceTime))
}
outputCommitCoordinator.stageEnd(stage.id)
listenerBus.post(SparkListenerStageCompleted(stage.latestInfo))
runningStages -= stage
}
/**
* Aborts all jobs depending on a particular Stage. This is called in response to a task set
* being canceled by the TaskScheduler. Use taskSetFailed() to inject this event from outside.
*/
private[scheduler] def abortStage(
failedStage: Stage,
reason: String,
exception: Option[Throwable]): Unit = {
if (!stageIdToStage.contains(failedStage.id)) {
// Skip all the actions if the stage has been removed.
return
}
val dependentJobs: Seq[ActiveJob] =
activeJobs.filter(job => stageDependsOn(job.finalStage, failedStage)).toSeq
failedStage.latestInfo.completionTime = Some(clock.getTimeMillis())
for (job <- dependentJobs) {
failJobAndIndependentStages(job, s"Job aborted due to stage failure: $reason", exception)
}
if (dependentJobs.isEmpty) {
logInfo("Ignoring failure of " + failedStage + " because all jobs depending on it are done")
}
}
/** Fails a job and all stages that are only used by that job, and cleans up relevant state. */
private def failJobAndIndependentStages(
job: ActiveJob,
failureReason: String,
exception: Option[Throwable] = None): Unit = {
val error = new SparkException(failureReason, exception.getOrElse(null))
var ableToCancelStages = true
val shouldInterruptThread =
if (job.properties == null) false
else job.properties.getProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "false").toBoolean
// Cancel all independent, running stages.
val stages = jobIdToStageIds(job.jobId)
if (stages.isEmpty) {
logError("No stages registered for job " + job.jobId)
}
stages.foreach { stageId =>
val jobsForStage: Option[HashSet[Int]] = stageIdToStage.get(stageId).map(_.jobIds)
if (jobsForStage.isEmpty || !jobsForStage.get.contains(job.jobId)) {
logError(
"Job %d not registered for stage %d even though that stage was registered for the job"
.format(job.jobId, stageId))
} else if (jobsForStage.get.size == 1) {
if (!stageIdToStage.contains(stageId)) {
logError(s"Missing Stage for stage with id $stageId")
} else {
// This is the only job that uses this stage, so fail the stage if it is running.
val stage = stageIdToStage(stageId)
if (runningStages.contains(stage)) {
try { // cancelTasks will fail if a SchedulerBackend does not implement killTask
taskScheduler.cancelTasks(stageId, shouldInterruptThread)
markStageAsFinished(stage, Some(failureReason))
} catch {
case e: UnsupportedOperationException =>
logInfo(s"Could not cancel tasks for stage $stageId", e)
ableToCancelStages = false
}
}
}
}
}
if (ableToCancelStages) {
job.listener.jobFailed(error)
cleanupStateForJobAndIndependentStages(job)
listenerBus.post(SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobFailed(error)))
}
}
/** Return true if one of stage's ancestors is target. */
private def stageDependsOn(stage: Stage, target: Stage): Boolean = {
if (stage == target) {
return true
}
val visitedRdds = new HashSet[RDD[_]]
// We are manually maintaining a stack here to prevent StackOverflowError
// caused by recursively visiting
val waitingForVisit = new Stack[RDD[_]]
def visit(rdd: RDD[_]) {
if (!visitedRdds(rdd)) {
visitedRdds += rdd
for (dep <- rdd.dependencies) {
dep match {
case shufDep: ShuffleDependency[_, _, _] =>
val mapStage = getShuffleMapStage(shufDep, stage.firstJobId)
if (!mapStage.isAvailable) {
waitingForVisit.push(mapStage.rdd)
} // Otherwise there's no need to follow the dependency back
case narrowDep: NarrowDependency[_] =>
waitingForVisit.push(narrowDep.rdd)
}
}
}
}
waitingForVisit.push(stage.rdd)
while (waitingForVisit.nonEmpty) {
visit(waitingForVisit.pop())
}
visitedRdds.contains(target.rdd)
}
/**
* Gets the locality information associated with a partition of a particular RDD.
*
* This method is thread-safe and is called from both DAGScheduler and SparkContext.
*
* @param rdd whose partitions are to be looked at
* @param partition to lookup locality information for
* @return list of machines that are preferred by the partition
*/
private[spark]
def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = {
getPreferredLocsInternal(rdd, partition, new HashSet)
}
/**
* Recursive implementation for getPreferredLocs.
*
* This method is thread-safe because it only accesses DAGScheduler state through thread-safe
* methods (getCacheLocs()); please be careful when modifying this method, because any new
* DAGScheduler state accessed by it may require additional synchronization.
*/
private def getPreferredLocsInternal(
rdd: RDD[_],
partition: Int,
visited: HashSet[(RDD[_], Int)]): Seq[TaskLocation] = {
// If the partition has already been visited, no need to re-visit.
// This avoids exponential path exploration. SPARK-695
if (!visited.add((rdd, partition))) {
// Nil has already been returned for previously visited partitions.
return Nil
}
// If the partition is cached, return the cache locations
val cached = getCacheLocs(rdd)(partition)
if (cached.nonEmpty) {
return cached
}
// If the RDD has some placement preferences (as is the case for input RDDs), get those
val rddPrefs = rdd.preferredLocations(rdd.partitions(partition)).toList
if (rddPrefs.nonEmpty) {
return rddPrefs.map(TaskLocation(_))
}
// If the RDD has narrow dependencies, pick the first partition of the first narrow dependency
// that has any placement preferences. Ideally we would choose based on transfer sizes,
// but this will do for now.
rdd.dependencies.foreach {
case n: NarrowDependency[_] =>
for (inPart <- n.getParents(partition)) {
val locs = getPreferredLocsInternal(n.rdd, inPart, visited)
if (locs != Nil) {
return locs
}
}
case _ =>
}
Nil
}
/** Mark a map stage job as finished with the given output stats, and report to its listener. */
def markMapStageJobAsFinished(job: ActiveJob, stats: MapOutputStatistics): Unit = {
// In map stage jobs, we only create a single "task", which is to finish all of the stage
// (including reusing any previous map outputs, etc); so we just mark task 0 as done
job.finished(0) = true
job.numFinished += 1
job.listener.taskSucceeded(0, stats)
cleanupStateForJobAndIndependentStages(job)
listenerBus.post(SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobSucceeded))
}
def stop() {
logInfo("Stopping DAGScheduler")
messageScheduler.shutdownNow()
eventProcessLoop.stop()
taskScheduler.stop()
}
// Start the event thread and register the metrics source at the end of the constructor
env.metricsSystem.registerSource(metricsSource)
eventProcessLoop.start()
}
private[scheduler] class DAGSchedulerEventProcessLoop(dagScheduler: DAGScheduler)
extends EventLoop[DAGSchedulerEvent]("dag-scheduler-event-loop") with Logging {
private[this] val timer = dagScheduler.metricsSource.messageProcessingTimer
/**
* The main event loop of the DAG scheduler.
*/
override def onReceive(event: DAGSchedulerEvent): Unit = {
val timerContext = timer.time()
try {
doOnReceive(event)
} finally {
timerContext.stop()
}
}
private def doOnReceive(event: DAGSchedulerEvent): Unit = event match {
case JobSubmitted(jobId, rdd, func, partitions, callSite, listener, properties) =>
dagScheduler.handleJobSubmitted(jobId, rdd, func, partitions, callSite, listener, properties)
case MapStageSubmitted(jobId, dependency, callSite, listener, properties) =>
dagScheduler.handleMapStageSubmitted(jobId, dependency, callSite, listener, properties)
case StageCancelled(stageId) =>
dagScheduler.handleStageCancellation(stageId)
case JobCancelled(jobId) =>
dagScheduler.handleJobCancellation(jobId)
case JobGroupCancelled(groupId) =>
dagScheduler.handleJobGroupCancelled(groupId)
case AllJobsCancelled =>
dagScheduler.doCancelAllJobs()
case ExecutorAdded(execId, host) =>
dagScheduler.handleExecutorAdded(execId, host)
case ExecutorLost(execId) =>
dagScheduler.handleExecutorLost(execId, fetchFailed = false)
case BeginEvent(task, taskInfo) =>
dagScheduler.handleBeginEvent(task, taskInfo)
case GettingResultEvent(taskInfo) =>
dagScheduler.handleGetTaskResult(taskInfo)
case completion @ CompletionEvent(task, reason, _, _, taskInfo, taskMetrics) =>
dagScheduler.handleTaskCompletion(completion)
case TaskSetFailed(taskSet, reason, exception) =>
dagScheduler.handleTaskSetFailed(taskSet, reason, exception)
case ResubmitFailedStages =>
dagScheduler.resubmitFailedStages()
}
override def onError(e: Throwable): Unit = {
logError("DAGSchedulerEventProcessLoop failed; shutting down SparkContext", e)
try {
dagScheduler.doCancelAllJobs()
} catch {
case t: Throwable => logError("DAGScheduler failed to cancel all jobs.", t)
}
dagScheduler.sc.stop()
}
override def onStop(): Unit = {
// Cancel any active jobs in postStop hook
dagScheduler.cleanUpAfterSchedulerStop()
}
}
private[spark] object DAGScheduler {
// The time, in millis, to wait for fetch failure events to stop coming in after one is detected;
// this is a simplistic way to avoid resubmitting tasks in the non-fetchable map stage one by one
// as more failure events come in
val RESUBMIT_TIMEOUT = 200
}
|
pronix/spark
|
core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
|
Scala
|
apache-2.0
| 69,812
|
package de.choffmeister.microserviceutils.auth.utils
import java.security.SecureRandom
private[auth] object SecretGenerator {
private lazy val random = new SecureRandom()
def generate(bytesLength: Int): Array[Byte] = {
val bytes = new Array[Byte](bytesLength)
random.nextBytes(bytes)
bytes
}
def generateHex(bytesLength: Int): String = {
val bytes = generate(bytesLength)
val sb = new StringBuilder
bytes.foreach(b => sb.append(String.format("%02x", Byte.box(b))))
sb.toString
}
}
|
choffmeister/microservice-utils
|
microservice-utils-auth/src/main/scala/de/choffmeister/microserviceutils/auth/utils/SecretGenerator.scala
|
Scala
|
mit
| 523
|
package revisions
import scala.collection.concurrent
abstract class AbstractVersioned[T] extends Versioned {
protected val versions: concurrent.Map[Int, T] =
new concurrent.TrieMap[Int, T]
// a cache of the last read or write to this versioned object
// the lower 16 bits contain the version number of the segment
// the higher 16 bits contain the index of the value in the map
@volatile
protected var cache: Int = 0
@inline
final def get: T =
get(Revision.currentRevision.get())
@inline
final def get(rev: Revision[_]): T =
get(rev.current)
/* lookup the index in the map for the value in the revision
* history starting from the given segment.
* Returns -1 if there is no defined value (assumes that
* the global segment version counter starts from zero.)
*/
final protected def getIndex(seg: Segment): Int = {
val c = cache
if (seg.version == (c & 0xFFFF)) {
c >>> 16
} else {
var s = seg
while (!versions.contains(s.version)) {
s = s.parent
if (s eq null) return -1
}
cache = ((s.version << 16) | (seg.version & 0xFFFF))
s.version
}
}
/* throws NoSuchElementException if there is no
* value in the revision history starting from
* the given segment
*/
final protected def get(seg: Segment): T = {
val idx = getIndex(seg)
if (idx < 0)
throw new NoSuchElementException
else
versions(idx)
}
final protected def set(rev: Revision[_], x: T): Unit = {
val seg = rev.current
val v = seg.version
if (!versions.contains(v)) {
seg.written += this
}
cache = ((v << 16) | (v & 0xFFFF))
versions(v) = x
}
// release the value stored for a segment
final override def release(release: Segment): Unit =
versions.remove(release.version)
// collapse the value in the parent segment into the main revision
final override def collapse(main: Revision[_], parent: Segment): Unit = {
if (!versions.contains(main.current.version)) {
// if not already written in main, copy from parent
set(main, versions(parent.version))
}
// release value for parent
versions.remove(parent.version)
}
protected def computeMerge(main: Revision[_], joinRev: Revision[_], join: Segment): T
final override def merge(main: Revision[_], joinRev: Revision[_], join: Segment): Unit = {
require(versions.contains(join.version))
// walk back up the segment history of the revision to be joined
var s: Segment = joinRev.current
while (!versions.contains(s.version)) {
// while this value does not have a version in the segment
s = s.parent
}
if (s eq join) {
// only merge if the join segment was the last write
// in the segment history of the join revision
// merge the value into the master revision
set(main, computeMerge(main, joinRev, join))
}
}
}
|
dwhjames/scala-concurrent-revisions
|
src/main/scala/revisions/AbstractVersioned.scala
|
Scala
|
bsd-3-clause
| 2,924
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
class ViewSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val view = View[Float](Array(2, 5)).setName("view")
val input = Tensor[Float](1, 10).apply1(_ => Random.nextFloat())
runSerializationTest(view, input)
}
}
|
wzhongyuan/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/ViewSpec.scala
|
Scala
|
apache-2.0
| 1,045
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions.utils
import org.apache.flink.api.common.functions.InvalidTypesException
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.common.typeutils.CompositeType
import org.apache.flink.api.java.typeutils.{PojoField, PojoTypeInfo, TypeExtractor}
import org.apache.flink.table.api.dataview._
import org.apache.flink.table.api.{TableException, ValidationException}
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.dataview.{ListViewTypeInfo, MapViewTypeInfo}
import org.apache.flink.table.functions._
import org.apache.flink.table.plan.schema.FlinkTableFunctionImpl
import org.apache.flink.table.typeutils.FieldInfoUtils
import com.google.common.primitives.Primitives
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFactory}
import org.apache.calcite.sql.`type`.SqlOperandTypeChecker.Consistency
import org.apache.calcite.sql.`type`._
import org.apache.calcite.sql.{SqlCallBinding, SqlFunction, SqlOperandCountRange, SqlOperator}
import java.lang.reflect.{Method, Modifier}
import java.lang.{Integer => JInt, Long => JLong}
import java.sql.{Date, Time, Timestamp}
import java.util
import scala.collection.mutable
object UserDefinedFunctionUtils {
// ----------------------------------------------------------------------------------------------
// Utilities for user-defined methods
// ----------------------------------------------------------------------------------------------
/**
* Returns the signature of the eval method matching the given signature of [[TypeInformation]].
* Elements of the signature can be null (act as a wildcard).
*/
def getEvalMethodSignature(
function: UserDefinedFunction,
signature: Seq[TypeInformation[_]])
: Option[Array[Class[_]]] = {
getUserDefinedMethod(function, "eval", typeInfoToClass(signature)).map(_.getParameterTypes)
}
/**
* Returns the signature of the accumulate method matching the given signature
* of [[TypeInformation]]. Elements of the signature can be null (act as a wildcard).
*/
def getAccumulateMethodSignature(
function: ImperativeAggregateFunction[_, _],
signature: Seq[TypeInformation[_]])
: Option[Array[Class[_]]] = {
val accType = TypeExtractor.createTypeInfo(
function, classOf[ImperativeAggregateFunction[_, _]], function.getClass, 1)
val input = (Array(accType) ++ signature).toSeq
getUserDefinedMethod(
function,
"accumulate",
typeInfoToClass(input)).map(_.getParameterTypes)
}
def getParameterTypes(
function: UserDefinedFunction,
signature: Array[Class[_]]): Array[TypeInformation[_]] = {
signature.map { c =>
try {
TypeExtractor.getForClass(c)
} catch {
case ite: InvalidTypesException =>
throw new ValidationException(
s"Parameter types of function '${function.getClass.getCanonicalName}' cannot be " +
s"automatically determined. Please provide type information manually.")
}
}
}
/**
* Returns user defined method matching the given name and signature.
*
* @param function function instance
* @param methodName method name
* @param methodSignature an array of raw Java classes. We compare the raw Java classes not the
* TypeInformation. TypeInformation does not matter during runtime (e.g.
* within a MapFunction)
*/
def getUserDefinedMethod(
function: UserDefinedFunction,
methodName: String,
methodSignature: Array[Class[_]])
: Option[Method] = {
val methods = checkAndExtractMethods(function, methodName)
val filtered = methods
// go over all the methods and filter out matching methods
.filter {
case cur if !cur.isVarArgs =>
val signatures = cur.getParameterTypes
// match parameters of signature to actual parameters
methodSignature.length == signatures.length &&
signatures.zipWithIndex.forall { case (clazz, i) =>
parameterTypeApplicable(methodSignature(i), clazz)
}
case cur if cur.isVarArgs =>
val signatures = cur.getParameterTypes
methodSignature.zipWithIndex.forall {
// non-varargs
case (clazz, i) if i < signatures.length - 1 =>
parameterTypeApplicable(clazz, signatures(i))
// varargs
case (clazz, i) if i >= signatures.length - 1 =>
parameterTypeApplicable(clazz, signatures.last.getComponentType)
} || (methodSignature.isEmpty && signatures.length == 1) // empty varargs
}
// if there is a fixed method, compiler will call this method preferentially
val fixedMethodsCount = filtered.count(!_.isVarArgs)
val found = filtered.filter { cur =>
fixedMethodsCount > 0 && !cur.isVarArgs ||
fixedMethodsCount == 0 && cur.isVarArgs
}
val maximallySpecific = if (found.length > 1) {
implicit val methodOrdering = new scala.Ordering[Method] {
override def compare(x: Method, y: Method): Int = {
def specificThan(left: Method, right: Method) = {
// left parameter type is more specific than right parameter type
left.getParameterTypes.zip(right.getParameterTypes).forall {
case (leftParameterType, rightParameterType) =>
parameterTypeApplicable(leftParameterType, rightParameterType)
} &&
// non-equal
left.getParameterTypes.zip(right.getParameterTypes).exists {
case (leftParameterType, rightParameterType) =>
!parameterTypeEquals(leftParameterType, rightParameterType)
}
}
if (specificThan(x, y)) {
1
} else if (specificThan(y, x)) {
-1
} else {
0
}
}
}
val max = found.max
found.filter(methodOrdering.compare(max, _) == 0)
} else {
found
}
// check if there is a Scala varargs annotation
if (maximallySpecific.isEmpty &&
methods.exists { method =>
val signatures = method.getParameterTypes
signatures.zipWithIndex.forall {
case (clazz, i) if i < signatures.length - 1 =>
parameterTypeApplicable(methodSignature(i), clazz)
case (clazz, i) if i == signatures.length - 1 =>
clazz.getName.equals("scala.collection.Seq")
}
}) {
throw new ValidationException(
s"Scala-style variable arguments in '$methodName' methods are not supported. Please " +
s"add a @scala.annotation.varargs annotation.")
} else if (maximallySpecific.length > 1) {
throw new ValidationException(
s"Found multiple '$methodName' methods which match the signature.")
}
maximallySpecific.headOption
}
/**
* Checks if a given method exists in the given function
*/
def ifMethodExistInFunction(method: String, function: UserDefinedFunction): Boolean = {
val methods = function
.getClass
.getMethods
.filter {
m => m.getName == method
}
!methods.isEmpty
}
/**
* Extracts methods and throws a [[ValidationException]] if no implementation
* can be found, or implementation does not match the requirements.
*/
def checkAndExtractMethods(
function: UserDefinedFunction,
methodName: String): Array[Method] = {
val methods = function
.getClass
.getMethods
.filter { m =>
val modifiers = m.getModifiers
m.getName == methodName &&
Modifier.isPublic(modifiers) &&
!Modifier.isAbstract(modifiers) &&
!(function.isInstanceOf[TableFunction[_]] && Modifier.isStatic(modifiers))
}
if (methods.isEmpty) {
throw new ValidationException(
s"Function class '${function.getClass.getCanonicalName}' does not implement at least " +
s"one method named '$methodName' which is public, not abstract and " +
s"(in case of table functions) not static.")
}
methods
}
def getMethodSignatures(
function: UserDefinedFunction,
methodName: String): Array[Array[Class[_]]] = {
checkAndExtractMethods(function, methodName).map(_.getParameterTypes)
}
// ----------------------------------------------------------------------------------------------
// Utilities for SQL functions
// ----------------------------------------------------------------------------------------------
/**
* Creates [[SqlFunction]] for a [[ScalarFunction]]
*
* @param name function name
* @param function scalar function
* @param typeFactory type factory
* @return the ScalarSqlFunction
*/
def createScalarSqlFunction(
name: String,
displayName: String,
function: ScalarFunction,
typeFactory: FlinkTypeFactory)
: SqlFunction = {
new ScalarSqlFunction(name, displayName, function, typeFactory)
}
/**
* Creates [[SqlFunction]] for a [[TableFunction]]
*
* @param name function name
* @param tableFunction table function
* @param resultType the type information of returned table
* @param typeFactory type factory
* @return the TableSqlFunction
*/
def createTableSqlFunction(
name: String,
displayName: String,
tableFunction: TableFunction[_],
resultType: TypeInformation[_],
typeFactory: FlinkTypeFactory)
: SqlFunction = {
val (fieldNames, fieldIndexes, _) = UserDefinedFunctionUtils.getFieldInfo(resultType)
val function = new FlinkTableFunctionImpl(resultType, fieldIndexes, fieldNames)
new TableSqlFunction(name, displayName, tableFunction, resultType, typeFactory, function)
}
/**
* Creates [[SqlFunction]] for an [[AggregateFunction]]
*
* @param name function name
* @param aggFunction aggregate function
* @param typeFactory type factory
* @return the TableSqlFunction
*/
def createAggregateSqlFunction(
name: String,
displayName: String,
aggFunction: ImperativeAggregateFunction[_, _],
resultType: TypeInformation[_],
accTypeInfo: TypeInformation[_],
typeFactory: FlinkTypeFactory)
: SqlFunction = {
//check if a qualified accumulate method exists before create Sql function
checkAndExtractMethods(aggFunction, "accumulate")
AggSqlFunction(
name,
displayName,
aggFunction,
resultType,
accTypeInfo,
typeFactory)
}
/**
* Creates a [[SqlOperandTypeChecker]] for SQL validation of
* eval functions (scalar and table functions).
*/
def createEvalOperandMetadata(
name: String,
function: UserDefinedFunction)
: SqlOperandMetadata = {
val methods = checkAndExtractMethods(function, "eval")
new SqlOperandMetadata {
override def getAllowedSignatures(op: SqlOperator, opName: String): String = {
s"$opName[${signaturesToString(function, "eval")}]"
}
override def getOperandCountRange: SqlOperandCountRange = {
var min = 254
var max = -1
var isVarargs = false
methods.foreach( m => {
var len = m.getParameterTypes.length
if (len > 0 && m.isVarArgs && m.getParameterTypes()(len - 1).isArray) {
isVarargs = true
len = len - 1
}
max = Math.max(len, max)
min = Math.min(len, min)
})
if (isVarargs) {
// if eval method is varargs, set max to -1 to skip length check in Calcite
max = -1
}
SqlOperandCountRanges.between(min, max)
}
override def checkOperandTypes(
callBinding: SqlCallBinding,
throwOnFailure: Boolean)
: Boolean = {
val operandTypeInfo = getOperandTypeInfo(callBinding)
val foundSignature = getEvalMethodSignature(function, operandTypeInfo)
if (foundSignature.isEmpty) {
if (throwOnFailure) {
throw new ValidationException(
s"Given parameters of function '$name' do not match any signature. \\n" +
s"Actual: ${signatureToString(operandTypeInfo)} \\n" +
s"Expected: ${signaturesToString(function, "eval")}")
} else {
false
}
} else {
true
}
}
override def isOptional(i: Int): Boolean = false
override def getConsistency: Consistency = Consistency.NONE
override def paramTypes(typeFactory: RelDataTypeFactory): util.List[RelDataType] =
throw new UnsupportedOperationException("SqlOperandMetadata.paramTypes " +
"should never be invoked")
override def paramNames(): util.List[String] =
throw new UnsupportedOperationException("SqlOperandMetadata.paramNames " +
"should never be invoked")
}
}
/**
* Creates a [[SqlOperandTypeInference]] for the SQL validation of eval functions
* (scalar and table functions).
*/
def createEvalOperandTypeInference(
name: String,
function: UserDefinedFunction,
typeFactory: FlinkTypeFactory)
: SqlOperandTypeInference = {
new SqlOperandTypeInference {
override def inferOperandTypes(
callBinding: SqlCallBinding,
returnType: RelDataType,
operandTypes: Array[RelDataType]): Unit = {
val operandTypeInfo = getOperandTypeInfo(callBinding)
val foundSignature = getEvalMethodSignature(function, operandTypeInfo)
.getOrElse(throw new ValidationException(
s"Given parameters of function '$name' do not match any signature. \\n" +
s"Actual: ${signatureToString(operandTypeInfo)} \\n" +
s"Expected: ${signaturesToString(function, "eval")}"))
val inferredTypes = function match {
case sf: ScalarFunction =>
sf.getParameterTypes(foundSignature)
.map(typeFactory.createTypeFromTypeInfo(_, isNullable = true))
case tf: TableFunction[_] =>
tf.getParameterTypes(foundSignature)
.map(typeFactory.createTypeFromTypeInfo(_, isNullable = true))
case _ => throw new TableException("Unsupported function.")
}
for (i <- operandTypes.indices) {
if (i < inferredTypes.length - 1) {
operandTypes(i) = inferredTypes(i)
} else if (null != inferredTypes.last.getComponentType) {
// last argument is a collection, the array type
operandTypes(i) = inferredTypes.last.getComponentType
} else {
operandTypes(i) = inferredTypes.last
}
}
}
}
}
// ----------------------------------------------------------------------------------------------
// Utilities for user-defined functions
// ----------------------------------------------------------------------------------------------
/**
* Remove StateView fields from accumulator type information.
*
* @param index index of aggregate function
* @param acc accumulator
* @param accType accumulator type information, only support pojo type
* @param isStateBackedDataViews is data views use state backend
* @return mapping of accumulator type information and data view config which contains id,
* field name and state descriptor
*/
def removeStateViewFieldsFromAccTypeInfo[ACC](
index: Int,
acc: ACC,
accType: TypeInformation[_],
isStateBackedDataViews: Boolean)
: (TypeInformation[_], Option[Seq[DataViewSpec[_]]]) = {
/** Recursively checks if composite type includes a data view type. */
def includesDataView(ct: CompositeType[_]): Boolean = {
(0 until ct.getArity).exists(i =>
ct.getTypeAt(i) match {
case nestedCT: CompositeType[_] => includesDataView(nestedCT)
case t: TypeInformation[_] if t.getTypeClass == classOf[ListView[_]] => true
case t: TypeInformation[_] if t.getTypeClass == classOf[MapView[_, _]] => true
case _ => false
}
)
}
accType match {
case pojoType: PojoTypeInfo[_] if pojoType.getArity > 0 =>
val arity = pojoType.getArity
val newPojoFields = new util.ArrayList[PojoField]()
val accumulatorSpecs = new mutable.ArrayBuffer[DataViewSpec[_]]
for (i <- 0 until arity) {
val pojoField = pojoType.getPojoFieldAt(i)
val field = pojoField.getField
val fieldName = field.getName
field.setAccessible(true)
pojoField.getTypeInformation match {
case ct: CompositeType[_] if includesDataView(ct) =>
throw new TableException(
"MapView and ListView only supported at first level of accumulators of Pojo type.")
case map: MapViewTypeInfo[_, _] =>
val mapView = field.get(acc).asInstanceOf[MapView[_, _]]
if (mapView != null) {
val keyTypeInfo = mapView.keyType
val valueTypeInfo = mapView.valueType
val newTypeInfo = if (keyTypeInfo != null && valueTypeInfo != null) {
new MapViewTypeInfo(keyTypeInfo, valueTypeInfo)
} else {
map
}
// create map view specs with unique id (used as state name)
var spec = MapViewSpec(
"agg" + index + "$" + fieldName,
field,
newTypeInfo)
accumulatorSpecs += spec
if (!isStateBackedDataViews) {
// add data view field if it is not backed by a state backend.
// data view fields which are backed by state backend are not serialized.
newPojoFields.add(new PojoField(field, newTypeInfo))
}
}
case list: ListViewTypeInfo[_] =>
val listView = field.get(acc).asInstanceOf[ListView[_]]
if (listView != null) {
val elementTypeInfo = listView.elementType
val newTypeInfo = if (elementTypeInfo != null) {
new ListViewTypeInfo(elementTypeInfo)
} else {
list
}
// create list view specs with unique is (used as state name)
var spec = ListViewSpec(
"agg" + index + "$" + fieldName,
field,
newTypeInfo)
accumulatorSpecs += spec
if (!isStateBackedDataViews) {
// add data view field if it is not backed by a state backend.
// data view fields which are backed by state backend are not serialized.
newPojoFields.add(new PojoField(field, newTypeInfo))
}
}
case _ => newPojoFields.add(pojoField)
}
}
(new PojoTypeInfo(accType.getTypeClass, newPojoFields), Some(accumulatorSpecs))
case ct: CompositeType[_] if includesDataView(ct) =>
throw new TableException(
"MapView and ListView only supported in accumulators of POJO type.")
case _ => (accType, None)
}
}
/**
* Internal method of [[ScalarFunction#getResultType()]] that does some pre-checking and uses
* [[TypeExtractor]] as default return type inference.
*/
def getResultTypeOfScalarFunction(
function: ScalarFunction,
signature: Array[Class[_]])
: TypeInformation[_] = {
val userDefinedTypeInfo = function.getResultType(signature)
if (userDefinedTypeInfo != null) {
userDefinedTypeInfo
} else {
try {
TypeExtractor.getForClass(getResultTypeClassOfScalarFunction(function, signature))
} catch {
case ite: InvalidTypesException =>
throw new ValidationException(
s"Return type of scalar function '${function.getClass.getCanonicalName}' cannot be " +
s"automatically determined. Please provide type information manually.")
}
}
}
/**
* Returns the return type of the evaluation method matching the given signature.
*/
def getResultTypeClassOfScalarFunction(
function: ScalarFunction,
signature: Array[Class[_]])
: Class[_] = {
// find method for signature
val evalMethod = checkAndExtractMethods(function, "eval")
.find(m => signature.sameElements(m.getParameterTypes))
.getOrElse(throw new IllegalArgumentException("Given signature is invalid."))
evalMethod.getReturnType
}
// ----------------------------------------------------------------------------------------------
// Miscellaneous
// ----------------------------------------------------------------------------------------------
/**
* Returns field names and field positions for a given [[TypeInformation]].
*
* Field names are automatically extracted for
* [[org.apache.flink.api.common.typeutils.CompositeType]].
*
* @param inputType The TypeInformation to extract the field names and positions from.
* @return A tuple of two arrays holding the field names and corresponding field positions.
*/
def getFieldInfo(inputType: TypeInformation[_])
: (Array[String], Array[Int], Array[TypeInformation[_]]) = {
(FieldInfoUtils.getFieldNames(inputType),
FieldInfoUtils.getFieldIndices(inputType),
FieldInfoUtils.getFieldTypes(inputType))
}
/**
* Prints one signature consisting of classes.
*/
def signatureToString(signature: Array[Class[_]]): String =
signature.map { clazz =>
if (clazz == null) {
"null"
} else {
clazz.getCanonicalName
}
}.mkString("(", ", ", ")")
/**
* Prints one signature consisting of TypeInformation.
*/
def signatureToString(signature: Seq[TypeInformation[_]]): String = {
signatureToString(typeInfoToClass(signature))
}
/**
* Prints all signatures of methods with given name in a class.
*/
def signaturesToString(function: UserDefinedFunction, name: String): String = {
getMethodSignatures(function, name).map(signatureToString).mkString(", ")
}
/**
* Extracts type classes of [[TypeInformation]] in a null-aware way.
*/
def typeInfoToClass(typeInfos: Seq[TypeInformation[_]]): Array[Class[_]] =
typeInfos.map { typeInfo =>
if (typeInfo == null) {
null
} else {
typeInfo.getTypeClass
}
}.toArray
/**
* Compares parameter candidate classes with expected classes. If true, the parameters match.
* Candidate can be null (acts as a wildcard).
*/
private def parameterTypeApplicable(candidate: Class[_], expected: Class[_]): Boolean =
parameterTypeEquals(candidate, expected) ||
((expected != null && expected.isAssignableFrom(candidate)) ||
expected.isPrimitive && Primitives.wrap(expected).isAssignableFrom(candidate))
private def parameterTypeEquals(candidate: Class[_], expected: Class[_]): Boolean =
candidate == null ||
candidate == expected ||
expected.isPrimitive && Primitives.wrap(expected) == candidate ||
// time types
candidate == classOf[Date] && (expected == classOf[Int] || expected == classOf[JInt]) ||
candidate == classOf[Time] && (expected == classOf[Int] || expected == classOf[JInt]) ||
candidate == classOf[Timestamp] && (expected == classOf[Long] || expected == classOf[JLong]) ||
// arrays
(candidate.isArray && expected.isArray &&
(candidate.getComponentType == expected.getComponentType))
def getOperandTypeInfo(callBinding: SqlCallBinding): Seq[TypeInformation[_]] = {
val operandTypes = for (i <- 0 until callBinding.getOperandCount)
yield callBinding.getOperandType(i)
operandTypes.map { operandType =>
if (operandType.getSqlTypeName == SqlTypeName.NULL) {
null
} else {
FlinkTypeFactory.toTypeInfo(operandType)
}
}
}
}
|
rmetzger/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/functions/utils/UserDefinedFunctionUtils.scala
|
Scala
|
apache-2.0
| 24,903
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io.{InputStream, NotSerializableException}
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import scala.collection.Map
import scala.collection.mutable.Queue
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import akka.actor.{Props, SupervisorStrategy}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
import org.apache.spark._
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.input.FixedLengthBinaryInputFormat
import org.apache.spark.rdd.{RDD, RDDOperationScope}
import org.apache.spark.serializer.SerializationDebugger
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContextState._
import org.apache.spark.streaming.dstream._
import org.apache.spark.streaming.receiver.{ActorReceiver, ActorSupervisorStrategy, Receiver}
import org.apache.spark.streaming.scheduler.{JobScheduler, StreamingListener}
import org.apache.spark.streaming.ui.{StreamingJobProgressListener, StreamingTab}
import org.apache.spark.util.{CallSite, ShutdownHookManager, ThreadUtils, Utils}
/**
* Main entry point for Spark Streaming functionality. It provides methods used to create
* [[org.apache.spark.streaming.dstream.DStream]]s from various input sources. It can be either
* created by providing a Spark master URL and an appName, or from a org.apache.spark.SparkConf
* configuration (see core Spark documentation), or from an existing org.apache.spark.SparkContext.
* The associated SparkContext can be accessed using `context.sparkContext`. After
* creating and transforming DStreams, the streaming computation can be started and stopped
* using `context.start()` and `context.stop()`, respectively.
* `context.awaitTermination()` allows the current thread to wait for the termination
* of the context by `stop()` or by an exception.
*/
class StreamingContext private[streaming] (
sc_ : SparkContext,
cp_ : Checkpoint,
batchDur_ : Duration
) extends Logging {
/**
* Create a StreamingContext using an existing SparkContext.
* @param sparkContext existing SparkContext
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(sparkContext: SparkContext, batchDuration: Duration) = {
this(sparkContext, null, batchDuration)
}
/**
* Create a StreamingContext by providing the configuration necessary for a new SparkContext.
* @param conf a org.apache.spark.SparkConf object specifying Spark parameters
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(conf: SparkConf, batchDuration: Duration) = {
this(StreamingContext.createNewSparkContext(conf), null, batchDuration)
}
/**
* Create a StreamingContext by providing the details necessary for creating a new SparkContext.
* @param master cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName a name for your job, to display on the cluster web UI
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(
master: String,
appName: String,
batchDuration: Duration,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(StreamingContext.createNewSparkContext(master, appName, sparkHome, jars, environment),
null, batchDuration)
}
/**
* Recreate a StreamingContext from a checkpoint file.
* @param path Path to the directory that was specified as the checkpoint directory
* @param hadoopConf Optional, configuration object if necessary for reading from
* HDFS compatible filesystems
*/
def this(path: String, hadoopConf: Configuration) =
this(null, CheckpointReader.read(path, new SparkConf(), hadoopConf).get, null)
/**
* Recreate a StreamingContext from a checkpoint file.
* @param path Path to the directory that was specified as the checkpoint directory
*/
def this(path: String) = this(path, SparkHadoopUtil.get.conf)
/**
* Recreate a StreamingContext from a checkpoint file using an existing SparkContext.
* @param path Path to the directory that was specified as the checkpoint directory
* @param sparkContext Existing SparkContext
*/
def this(path: String, sparkContext: SparkContext) = {
this(
sparkContext,
CheckpointReader.read(path, sparkContext.conf, sparkContext.hadoopConfiguration).get,
null)
}
if (sc_ == null && cp_ == null) {
throw new Exception("Spark Streaming cannot be initialized with " +
"both SparkContext and checkpoint as null")
}
private[streaming] val isCheckpointPresent = (cp_ != null)
private[streaming] val sc: SparkContext = {
if (sc_ != null) {
sc_
} else if (isCheckpointPresent) {
SparkContext.getOrCreate(cp_.createSparkConf())
} else {
throw new SparkException("Cannot create StreamingContext without a SparkContext")
}
}
if (sc.conf.get("spark.master") == "local" || sc.conf.get("spark.master") == "local[1]") {
logWarning("spark.master should be set as local[n], n > 1 in local mode if you have receivers" +
" to get data, otherwise Spark jobs will not get resources to process the received data.")
}
private[streaming] val conf = sc.conf
private[streaming] val env = sc.env
private[streaming] val graph: DStreamGraph = {
if (isCheckpointPresent) {
cp_.graph.setContext(this)
cp_.graph.restoreCheckpointData()
cp_.graph
} else {
require(batchDur_ != null, "Batch duration for StreamingContext cannot be null")
val newGraph = new DStreamGraph()
newGraph.setBatchDuration(batchDur_)
newGraph
}
}
private val nextInputStreamId = new AtomicInteger(0)
private[streaming] var checkpointDir: String = {
if (isCheckpointPresent) {
sc.setCheckpointDir(cp_.checkpointDir)
cp_.checkpointDir
} else {
null
}
}
private[streaming] val checkpointDuration: Duration = {
if (isCheckpointPresent) cp_.checkpointDuration else graph.batchDuration
}
private[streaming] val scheduler = new JobScheduler(this)
private[streaming] val waiter = new ContextWaiter
private[streaming] val progressListener = new StreamingJobProgressListener(this)
private[streaming] val uiTab: Option[StreamingTab] =
if (conf.getBoolean("spark.ui.enabled", true)) {
Some(new StreamingTab(this))
} else {
None
}
/* Initializing a streamingSource to register metrics */
private val streamingSource = new StreamingSource(this)
private var state: StreamingContextState = INITIALIZED
private val startSite = new AtomicReference[CallSite](null)
private[streaming] def getStartSite(): CallSite = startSite.get()
private var shutdownHookRef: AnyRef = _
// The streaming scheduler and other threads started by the StreamingContext
// should not inherit jobs group and job descriptions from the thread that
// start the context. This configuration allows jobs group and job description
// to be cleared in threads related to streaming. See SPARK-10649.
sparkContext.conf.set("spark.localProperties.clone", "true")
conf.getOption("spark.streaming.checkpoint.directory").foreach(checkpoint)
/**
* Return the associated Spark context
*/
def sparkContext: SparkContext = sc
/**
* Set each DStreams in this context to remember RDDs it generated in the last given duration.
* DStreams remember RDDs only for a limited duration of time and releases them for garbage
* collection. This method allows the developer to specify how long to remember the RDDs (
* if the developer wishes to query old data outside the DStream computation).
* @param duration Minimum duration that each DStream should remember its RDDs
*/
def remember(duration: Duration) {
graph.remember(duration)
}
/**
* Set the context to periodically checkpoint the DStream operations for driver
* fault-tolerance.
* @param directory HDFS-compatible directory where the checkpoint data will be reliably stored.
* Note that this must be a fault-tolerant file system like HDFS for
*/
def checkpoint(directory: String) {
if (directory != null) {
val path = new Path(directory)
val fs = path.getFileSystem(sparkContext.hadoopConfiguration)
fs.mkdirs(path)
val fullPath = fs.getFileStatus(path).getPath().toString
sc.setCheckpointDir(fullPath)
checkpointDir = fullPath
} else {
checkpointDir = null
}
}
private[streaming] def isCheckpointingEnabled: Boolean = {
checkpointDir != null
}
private[streaming] def initialCheckpoint: Checkpoint = {
if (isCheckpointPresent) cp_ else null
}
private[streaming] def getNewInputStreamId() = nextInputStreamId.getAndIncrement()
/**
* Execute a block of code in a scope such that all new DStreams created in this body will
* be part of the same scope. For more detail, see the comments in `doCompute`.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[streaming] def withScope[U](body: => U): U = sparkContext.withScope(body)
/**
* Execute a block of code in a scope such that all new DStreams created in this body will
* be part of the same scope. For more detail, see the comments in `doCompute`.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[streaming] def withNamedScope[U](name: String)(body: => U): U = {
RDDOperationScope.withScope(sc, name, allowNesting = false, ignoreParent = false)(body)
}
/**
* Create an input stream with any arbitrary user implemented receiver.
* Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html
* @param receiver Custom implementation of Receiver
*
* @deprecated As of 1.0.0", replaced by `receiverStream`.
*/
@deprecated("Use receiverStream", "1.0.0")
def networkStream[T: ClassTag](receiver: Receiver[T]): ReceiverInputDStream[T] = {
withNamedScope("network stream") {
receiverStream(receiver)
}
}
/**
* Create an input stream with any arbitrary user implemented receiver.
* Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html
* @param receiver Custom implementation of Receiver
*/
def receiverStream[T: ClassTag](receiver: Receiver[T]): ReceiverInputDStream[T] = {
withNamedScope("receiver stream") {
new PluggableInputDStream[T](this, receiver)
}
}
/**
* Create an input stream with any arbitrary user implemented actor receiver.
* Find more details at: http://spark.apache.org/docs/latest/streaming-custom-receivers.html
* @param props Props object defining creation of the actor
* @param name Name of the actor
* @param storageLevel RDD storage level (default: StorageLevel.MEMORY_AND_DISK_SER_2)
*
* @note An important point to note:
* Since Actor may exist outside the spark framework, It is thus user's responsibility
* to ensure the type safety, i.e parametrized type of data received and actorStream
* should be same.
*/
def actorStream[T: ClassTag](
props: Props,
name: String,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2,
supervisorStrategy: SupervisorStrategy = ActorSupervisorStrategy.defaultStrategy
): ReceiverInputDStream[T] = withNamedScope("actor stream") {
receiverStream(new ActorReceiver[T](props, name, storageLevel, supervisorStrategy))
}
/**
* Create a input stream from TCP source hostname:port. Data is received using
* a TCP socket and the receive bytes is interpreted as UTF8 encoded `\\n` delimited
* lines.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
*/
def socketTextStream(
hostname: String,
port: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[String] = withNamedScope("socket text stream") {
socketStream[String](hostname, port, SocketReceiver.bytesToLines, storageLevel)
}
/**
* Create a input stream from TCP source hostname:port. Data is received using
* a TCP socket and the receive bytes it interepreted as object using the given
* converter.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param converter Function to convert the byte stream to objects
* @param storageLevel Storage level to use for storing the received objects
* @tparam T Type of the objects received (after converting bytes to objects)
*/
def socketStream[T: ClassTag](
hostname: String,
port: Int,
converter: (InputStream) => Iterator[T],
storageLevel: StorageLevel
): ReceiverInputDStream[T] = {
new SocketInputDStream[T](this, hostname, port, converter, storageLevel)
}
/**
* Create a input stream from network source hostname:port, where data is received
* as serialized blocks (serialized using the Spark's serializer) that can be directly
* pushed into the block manager without deserializing them. This is the most efficient
* way to receive data.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
* @tparam T Type of the objects in the received blocks
*/
def rawSocketStream[T: ClassTag](
hostname: String,
port: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[T] = withNamedScope("raw socket stream") {
new RawInputDStream[T](this, hostname, port, storageLevel)
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory)
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system.
* @param directory HDFS directory to monitor for new file
* @param filter Function to filter paths to process
* @param newFilesOnly Should process only new files and ignore existing files in the directory
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String, filter: Path => Boolean, newFilesOnly: Boolean): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory, filter, newFilesOnly)
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
* @param filter Function to filter paths to process
* @param newFilesOnly Should process only new files and ignore existing files in the directory
* @param conf Hadoop configuration
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String,
filter: Path => Boolean,
newFilesOnly: Boolean,
conf: Configuration): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory, filter, newFilesOnly, Option(conf))
}
/**
* Create a input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them as text files (using key as LongWritable, value
* as Text and input format as TextInputFormat). Files must be written to the
* monitored directory by "moving" them from another location within the same
* file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
*/
def textFileStream(directory: String): DStream[String] = withNamedScope("text file stream") {
fileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString)
}
/**
* :: Experimental ::
*
* Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them as flat binary files, assuming a fixed length per record,
* generating one byte array per record. Files must be written to the monitored directory
* by "moving" them from another location within the same file system. File names
* starting with . are ignored.
*
* '''Note:''' We ensure that the byte array for each record in the
* resulting RDDs of the DStream has the provided record length.
*
* @param directory HDFS directory to monitor for new file
* @param recordLength length of each record in bytes
*/
@Experimental
def binaryRecordsStream(
directory: String,
recordLength: Int): DStream[Array[Byte]] = withNamedScope("binary records stream") {
val conf = sc_.hadoopConfiguration
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = fileStream[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](
directory, FileInputDStream.defaultFilter: Path => Boolean, newFilesOnly = true, conf)
val data = br.map { case (k, v) =>
val bytes = v.getBytes
require(bytes.length == recordLength, "Byte array does not have correct length. " +
s"${bytes.length} did not equal recordLength: $recordLength")
bytes
}
data
}
/**
* Create an input stream from a queue of RDDs. In each batch,
* it will process either one or all of the RDDs returned by the queue.
*
* NOTE: Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of
* those RDDs, so `queueStream` doesn't support checkpointing.
*
* @param queue Queue of RDDs
* @param oneAtATime Whether only one RDD should be consumed from the queue in every interval
* @tparam T Type of objects in the RDD
*/
def queueStream[T: ClassTag](
queue: Queue[RDD[T]],
oneAtATime: Boolean = true
): InputDStream[T] = {
queueStream(queue, oneAtATime, sc.makeRDD(Seq[T](), 1))
}
/**
* Create an input stream from a queue of RDDs. In each batch,
* it will process either one or all of the RDDs returned by the queue.
*
* NOTE: Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of
* those RDDs, so `queueStream` doesn't support checkpointing.
*
* @param queue Queue of RDDs
* @param oneAtATime Whether only one RDD should be consumed from the queue in every interval
* @param defaultRDD Default RDD is returned by the DStream when the queue is empty.
* Set as null if no RDD should be returned when empty
* @tparam T Type of objects in the RDD
*/
def queueStream[T: ClassTag](
queue: Queue[RDD[T]],
oneAtATime: Boolean,
defaultRDD: RDD[T]
): InputDStream[T] = {
new QueueInputDStream(this, queue, oneAtATime, defaultRDD)
}
/**
* Create a unified DStream from multiple DStreams of the same type and same slide duration.
*/
def union[T: ClassTag](streams: Seq[DStream[T]]): DStream[T] = withScope {
new UnionDStream[T](streams.toArray)
}
/**
* Create a new DStream in which each RDD is generated by applying a function on RDDs of
* the DStreams.
*/
def transform[T: ClassTag](
dstreams: Seq[DStream[_]],
transformFunc: (Seq[RDD[_]], Time) => RDD[T]
): DStream[T] = withScope {
new TransformedDStream[T](dstreams, sparkContext.clean(transformFunc))
}
/** Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for
* receiving system events related to streaming.
*/
def addStreamingListener(streamingListener: StreamingListener) {
scheduler.listenerBus.addListener(streamingListener)
}
private def validate() {
assert(graph != null, "Graph is null")
graph.validate()
require(
!isCheckpointingEnabled || checkpointDuration != null,
"Checkpoint directory has been set, but the graph checkpointing interval has " +
"not been set. Please use StreamingContext.checkpoint() to set the interval."
)
// Verify whether the DStream checkpoint is serializable
if (isCheckpointingEnabled) {
val checkpoint = new Checkpoint(this, Time.apply(0))
try {
Checkpoint.serialize(checkpoint, conf)
} catch {
case e: NotSerializableException =>
throw new NotSerializableException(
"DStream checkpointing has been enabled but the DStreams with their functions " +
"are not serializable\\n" +
SerializationDebugger.improveException(checkpoint, e).getMessage()
)
}
}
if (Utils.isDynamicAllocationEnabled(sc.conf)) {
logWarning("Dynamic Allocation is enabled for this application. " +
"Enabling Dynamic allocation for Spark Streaming applications can cause data loss if " +
"Write Ahead Log is not enabled for non-replayable sources like Flume. " +
"See the programming guide for details on how to enable the Write Ahead Log")
}
}
/**
* :: DeveloperApi ::
*
* Return the current state of the context. The context can be in three possible states -
* - StreamingContextState.INTIALIZED - The context has been created, but not been started yet.
* Input DStreams, transformations and output operations can be created on the context.
* - StreamingContextState.ACTIVE - The context has been started, and been not stopped.
* Input DStreams, transformations and output operations cannot be created on the context.
* - StreamingContextState.STOPPED - The context has been stopped and cannot be used any more.
*/
@DeveloperApi
def getState(): StreamingContextState = synchronized {
state
}
/**
* Start the execution of the streams.
*
* @throws IllegalStateException if the StreamingContext is already stopped.
*/
def start(): Unit = synchronized {
state match {
case INITIALIZED =>
startSite.set(DStream.getCreationSite())
StreamingContext.ACTIVATION_LOCK.synchronized {
StreamingContext.assertNoOtherContextIsActive()
try {
validate()
// Start the streaming scheduler in a new thread, so that thread local properties
// like call sites and job groups can be reset without affecting those of the
// current thread.
ThreadUtils.runInNewThread("streaming-start") {
sparkContext.setCallSite(startSite.get)
sparkContext.clearJobGroup()
sparkContext.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "false")
scheduler.start()
}
state = StreamingContextState.ACTIVE
} catch {
case NonFatal(e) =>
logError("Error starting the context, marking it as stopped", e)
scheduler.stop(false)
state = StreamingContextState.STOPPED
throw e
}
StreamingContext.setActiveContext(this)
}
shutdownHookRef = ShutdownHookManager.addShutdownHook(
StreamingContext.SHUTDOWN_HOOK_PRIORITY)(stopOnShutdown)
// Registering Streaming Metrics at the start of the StreamingContext
assert(env.metricsSystem != null)
env.metricsSystem.registerSource(streamingSource)
uiTab.foreach(_.attach())
logInfo("StreamingContext started")
case ACTIVE =>
logWarning("StreamingContext has already been started")
case STOPPED =>
throw new IllegalStateException("StreamingContext has already been stopped")
}
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
*/
def awaitTermination() {
waiter.waitForStopOrError()
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
* @param timeout time to wait in milliseconds
*
* @deprecated As of 1.3.0, replaced by `awaitTerminationOrTimeout(Long)`.
*/
@deprecated("Use awaitTerminationOrTimeout(Long) instead", "1.3.0")
def awaitTermination(timeout: Long) {
waiter.waitForStopOrError(timeout)
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
*
* @param timeout time to wait in milliseconds
* @return `true` if it's stopped; or throw the reported error during the execution; or `false`
* if the waiting time elapsed before returning from the method.
*/
def awaitTerminationOrTimeout(timeout: Long): Boolean = {
waiter.waitForStopOrError(timeout)
}
/**
* Stop the execution of the streams immediately (does not wait for all received data
* to be processed). By default, if `stopSparkContext` is not specified, the underlying
* SparkContext will also be stopped. This implicit behavior can be configured using the
* SparkConf configuration spark.streaming.stopSparkContextByDefault.
*
* @param stopSparkContext If true, stops the associated SparkContext. The underlying SparkContext
* will be stopped regardless of whether this StreamingContext has been
* started.
*/
def stop(
stopSparkContext: Boolean = conf.getBoolean("spark.streaming.stopSparkContextByDefault", true)
): Unit = synchronized {
stop(stopSparkContext, false)
}
/**
* Stop the execution of the streams, with option of ensuring all received data
* has been processed.
*
* @param stopSparkContext if true, stops the associated SparkContext. The underlying SparkContext
* will be stopped regardless of whether this StreamingContext has been
* started.
* @param stopGracefully if true, stops gracefully by waiting for the processing of all
* received data to be completed
*/
def stop(stopSparkContext: Boolean, stopGracefully: Boolean): Unit = {
var shutdownHookRefToRemove: AnyRef = null
synchronized {
try {
state match {
case INITIALIZED =>
logWarning("StreamingContext has not been started yet")
case STOPPED =>
logWarning("StreamingContext has already been stopped")
case ACTIVE =>
scheduler.stop(stopGracefully)
// Removing the streamingSource to de-register the metrics on stop()
env.metricsSystem.removeSource(streamingSource)
uiTab.foreach(_.detach())
StreamingContext.setActiveContext(null)
waiter.notifyStop()
if (shutdownHookRef != null) {
shutdownHookRefToRemove = shutdownHookRef
shutdownHookRef = null
}
logInfo("StreamingContext stopped successfully")
}
} finally {
// The state should always be Stopped after calling `stop()`, even if we haven't started yet
state = STOPPED
}
}
if (shutdownHookRefToRemove != null) {
ShutdownHookManager.removeShutdownHook(shutdownHookRefToRemove)
}
// Even if we have already stopped, we still need to attempt to stop the SparkContext because
// a user might stop(stopSparkContext = false) and then call stop(stopSparkContext = true).
if (stopSparkContext) sc.stop()
}
private def stopOnShutdown(): Unit = {
val stopGracefully = conf.getBoolean("spark.streaming.stopGracefullyOnShutdown", false)
logInfo(s"Invoking stop(stopGracefully=$stopGracefully) from shutdown hook")
// Do not stop SparkContext, let its own shutdown hook stop it
stop(stopSparkContext = false, stopGracefully = stopGracefully)
}
}
/**
* StreamingContext object contains a number of utility functions related to the
* StreamingContext class.
*/
object StreamingContext extends Logging {
/**
* Lock that guards activation of a StreamingContext as well as access to the singleton active
* StreamingContext in getActiveOrCreate().
*/
private val ACTIVATION_LOCK = new Object()
private val SHUTDOWN_HOOK_PRIORITY = ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY + 1
private val activeContext = new AtomicReference[StreamingContext](null)
private def assertNoOtherContextIsActive(): Unit = {
ACTIVATION_LOCK.synchronized {
if (activeContext.get() != null) {
throw new IllegalStateException(
"Only one StreamingContext may be started in this JVM. " +
"Currently running StreamingContext was started at" +
activeContext.get.getStartSite().longForm)
}
}
}
private def setActiveContext(ssc: StreamingContext): Unit = {
ACTIVATION_LOCK.synchronized {
activeContext.set(ssc)
}
}
/**
* :: Experimental ::
*
* Get the currently active context, if there is one. Active means started but not stopped.
*/
@Experimental
def getActive(): Option[StreamingContext] = {
ACTIVATION_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* @deprecated As of 1.3.0, replaced by implicit functions in the DStream companion object.
* This is kept here only for backward compatibility.
*/
@deprecated("Replaced by implicit functions in the DStream companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
def toPairDStreamFunctions[K, V](stream: DStream[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null)
: PairDStreamFunctions[K, V] = {
DStream.toPairDStreamFunctions(stream)(kt, vt, ord)
}
/**
* :: Experimental ::
*
* Either return the "active" StreamingContext (that is, started but not stopped), or create a
* new StreamingContext that is
* @param creatingFunc Function to create a new StreamingContext
*/
@Experimental
def getActiveOrCreate(creatingFunc: () => StreamingContext): StreamingContext = {
ACTIVATION_LOCK.synchronized {
getActive().getOrElse { creatingFunc() }
}
}
/**
* :: Experimental ::
*
* Either get the currently active StreamingContext (that is, started but not stopped),
* OR recreate a StreamingContext from checkpoint data in the given path. If checkpoint data
* does not exist in the provided, then create a new StreamingContext by calling the provided
* `creatingFunc`.
*
* @param checkpointPath Checkpoint directory used in an earlier StreamingContext program
* @param creatingFunc Function to create a new StreamingContext
* @param hadoopConf Optional Hadoop configuration if necessary for reading from the
* file system
* @param createOnError Optional, whether to create a new StreamingContext if there is an
* error in reading checkpoint data. By default, an exception will be
* thrown on error.
*/
@Experimental
def getActiveOrCreate(
checkpointPath: String,
creatingFunc: () => StreamingContext,
hadoopConf: Configuration = SparkHadoopUtil.get.conf,
createOnError: Boolean = false
): StreamingContext = {
ACTIVATION_LOCK.synchronized {
getActive().getOrElse { getOrCreate(checkpointPath, creatingFunc, hadoopConf, createOnError) }
}
}
/**
* Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
* If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
* recreated from the checkpoint data. If the data does not exist, then the StreamingContext
* will be created by called the provided `creatingFunc`.
*
* @param checkpointPath Checkpoint directory used in an earlier StreamingContext program
* @param creatingFunc Function to create a new StreamingContext
* @param hadoopConf Optional Hadoop configuration if necessary for reading from the
* file system
* @param createOnError Optional, whether to create a new StreamingContext if there is an
* error in reading checkpoint data. By default, an exception will be
* thrown on error.
*/
def getOrCreate(
checkpointPath: String,
creatingFunc: () => StreamingContext,
hadoopConf: Configuration = SparkHadoopUtil.get.conf,
createOnError: Boolean = false
): StreamingContext = {
val checkpointOption = CheckpointReader.read(
checkpointPath, new SparkConf(), hadoopConf, createOnError)
checkpointOption.map(new StreamingContext(null, _, null)).getOrElse(creatingFunc())
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to StreamingContext.
*/
def jarOfClass(cls: Class[_]): Option[String] = SparkContext.jarOfClass(cls)
private[streaming] def createNewSparkContext(conf: SparkConf): SparkContext = {
new SparkContext(conf)
}
private[streaming] def createNewSparkContext(
master: String,
appName: String,
sparkHome: String,
jars: Seq[String],
environment: Map[String, String]
): SparkContext = {
val conf = SparkContext.updatedConf(
new SparkConf(), master, appName, sparkHome, jars, environment)
new SparkContext(conf)
}
private[streaming] def rddToFileName[T](prefix: String, suffix: String, time: Time): String = {
if (prefix == null) {
time.milliseconds.toString
} else if (suffix == null || suffix.length ==0) {
prefix + "-" + time.milliseconds
} else {
prefix + "-" + time.milliseconds + "." + suffix
}
}
}
|
practice-vishnoi/dev-spark-1
|
streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala
|
Scala
|
apache-2.0
| 36,416
|
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.ml.tensorflow
import com.johnsnowlabs.ml.tensorflow.sentencepiece._
import com.johnsnowlabs.ml.tensorflow.sign.{ModelSignatureConstants, ModelSignatureManager}
import com.johnsnowlabs.nlp.{Annotation, AnnotatorType}
import com.johnsnowlabs.nlp.annotators.tokenizer.normalizer.MosesPunctNormalizer
import scala.collection.JavaConverters._
/** MarianTransformer: Fast Neural Machine Translation
*
* MarianTransformer uses models trained by MarianNMT.
*
* Marian is an efficient, free Neural Machine Translation framework written in pure C++ with minimal dependencies.
* It is mainly being developed by the Microsoft Translator team. Many academic (most notably the University of Edinburgh and in the past the Adam Mickiewicz University in Poznań) and commercial contributors help with its development.
*
* It is currently the engine behind the Microsoft Translator Neural Machine Translation services and being deployed by many companies, organizations and research projects (see below for an incomplete list).
*
* '''Sources''' :
* MarianNMT [[https://marian-nmt.github.io/]]
* Marian: Fast Neural Machine Translation in C++ [[https://www.aclweb.org/anthology/P18-4020/]]
*
* @param tensorflow LanguageDetectorDL Model wrapper with TensorFlow Wrapper
* @param configProtoBytes Configuration for TensorFlow session
* @param sppSrc Contains the vocabulary for the target language.
* @param sppTrg Contains the vocabulary for the source language
*/
class TensorflowMarian(val tensorflow: TensorflowWrapper,
val sppSrc: SentencePieceWrapper,
val sppTrg: SentencePieceWrapper,
configProtoBytes: Option[Array[Byte]] = None,
signatures: Option[Map[String, String]] = None
) extends Serializable {
val _tfMarianSignatures: Map[String, String] = signatures.getOrElse(ModelSignatureManager.apply())
private val langCodeRe = ">>.+<<".r
private def sessionWarmup(): Unit = {
val dummyInput = Array.fill(1)(0)
tag(Seq(dummyInput), 0, 0, 0, 1)
}
sessionWarmup()
def tag(batch: Seq[Array[Int]],
maxOutputLength: Int,
paddingTokenId: Int,
eosTokenId: Int,
vocabSize: Int,
ignoreTokenIds: Array[Int] = Array()): Array[Array[Int]] = {
/* Actual size of each sentence to skip padding in the TF model */
val sequencesLength = batch.map(x => x.length).toArray
val maxSentenceLength = sequencesLength.max
//Run encoder
val tensorEncoder = new TensorResources()
val inputDim = batch.length * maxSentenceLength
val encoderInputIdsBuffers = tensorEncoder.createIntBuffer(batch.length * maxSentenceLength)
val encoderAttentionMaskBuffers = tensorEncoder.createIntBuffer(batch.length * maxSentenceLength)
val decoderAttentionMaskBuffers = tensorEncoder.createIntBuffer(batch.length * maxSentenceLength)
val shape = Array(batch.length.toLong, maxSentenceLength)
batch.zipWithIndex.foreach { case (tokenIds, idx) =>
// this one marks the beginning of each sentence in the flatten structure
val offset = idx * maxSentenceLength
val diff = maxSentenceLength - tokenIds.length
val s = tokenIds.take(maxSentenceLength) ++ Array.fill[Int](diff)(paddingTokenId)
encoderInputIdsBuffers.offset(offset).write(s)
val mask = s.map(x => if (x != paddingTokenId) 1 else 0)
encoderAttentionMaskBuffers.offset(offset).write(mask)
decoderAttentionMaskBuffers.offset(offset).write(mask)
}
val encoderInputIdsTensors = tensorEncoder.createIntBufferTensor(shape, encoderInputIdsBuffers)
val encoderAttentionMaskKeyTensors = tensorEncoder.createIntBufferTensor(shape, encoderAttentionMaskBuffers)
val decoderAttentionMaskTensors = tensorEncoder.createIntBufferTensor(shape, decoderAttentionMaskBuffers)
val session = tensorflow.getTFSessionWithSignature(configProtoBytes = configProtoBytes, initAllTables = false, savedSignatures = signatures)
val runner = session.runner
runner
.feed(_tfMarianSignatures.getOrElse(ModelSignatureConstants.EncoderInputIds.key, "missing_encoder_input_ids"), encoderInputIdsTensors)
.feed(_tfMarianSignatures.getOrElse(ModelSignatureConstants.EncoderAttentionMask.key, "missing_encoder_attention_mask"), encoderAttentionMaskKeyTensors)
.fetch(_tfMarianSignatures.getOrElse(ModelSignatureConstants.EncoderOutput.key, "missing_last_hidden_state"))
val encoderOuts = runner.run().asScala
val encoderOutsFloats = TensorResources.extractFloats(encoderOuts.head)
val dim = encoderOutsFloats.length / inputDim
val encoderOutsBatch = encoderOutsFloats.grouped(dim).toArray.grouped(maxSentenceLength).toArray
encoderOuts.foreach(_.close())
tensorEncoder.clearSession(encoderOuts)
// Run decoder
val decoderEncoderStateBuffers = tensorEncoder.createFloatBuffer(batch.length * maxSentenceLength * dim)
batch.zipWithIndex.foreach { case (_, index) =>
var offset = index * maxSentenceLength * dim
encoderOutsBatch(index).foreach(encoderOutput => {
decoderEncoderStateBuffers.offset(offset).write(encoderOutput)
offset += dim
})
}
val decoderEncoderStateTensors = tensorEncoder.createFloatBufferTensor(
Array(batch.length.toLong, maxSentenceLength, dim),
decoderEncoderStateBuffers)
var decoderInputs = batch.map(_ => Array(paddingTokenId)).toArray
var modelOutputs = batch.map(_ => Array(paddingTokenId)).toArray
var stopDecoder = false
while (!stopDecoder) {
val decoderInputLength = decoderInputs.head.length
val tensorDecoder = new TensorResources()
val decoderInputBuffers = tensorDecoder.createIntBuffer(batch.length * decoderInputLength)
decoderInputs.zipWithIndex.foreach { case (pieceIds, idx) =>
val offset = idx * decoderInputLength
decoderInputBuffers.offset(offset).write(pieceIds)
}
val decoderInputTensors = tensorDecoder.createIntBufferTensor(
Array(batch.length.toLong, decoderInputLength), decoderInputBuffers)
val runner = session.runner
runner
.feed(_tfMarianSignatures.getOrElse(ModelSignatureConstants.DecoderEncoderInputIds.key, "missing_encoder_state"), decoderEncoderStateTensors)
.feed(_tfMarianSignatures.getOrElse(ModelSignatureConstants.DecoderInputIds.key, "missing_decoder_input_ids"), decoderInputTensors)
.feed(_tfMarianSignatures.getOrElse(ModelSignatureConstants.DecoderAttentionMask.key, "missing_encoder_attention_mask"), decoderAttentionMaskTensors)
.fetch(_tfMarianSignatures.getOrElse(ModelSignatureConstants.DecoderOutput.key, "missing_output_0"))
val decoderOuts = runner.run().asScala
val decoderOutputs = TensorResources.extractFloats(decoderOuts.head)
.grouped(vocabSize).toArray.grouped(decoderInputLength).toArray
val outputIds = decoderOutputs.map(
batch => batch.map(input => {
var maxArg = -1
var maxValue = Float.MinValue
input.indices.foreach(i => {
if ((input(i) >= maxValue) && (!ignoreTokenIds.contains(i))) {
maxArg = i
maxValue = input(i)
}
})
maxArg
}).last)
decoderInputs = decoderInputs.zip(outputIds).map(x => x._1 ++ Array(x._2))
modelOutputs = modelOutputs.zip(outputIds).map(x => {
if (x._1.contains(eosTokenId)) {
x._1
} else {
x._1 ++ Array(x._2)
}
})
decoderOuts.foreach(_.close())
tensorDecoder.clearTensors()
tensorDecoder.clearSession(decoderOuts)
decoderInputTensors.close()
stopDecoder = !modelOutputs.exists(o => o.last != eosTokenId) ||
(modelOutputs.head.length > math.max(maxOutputLength, maxSentenceLength))
}
decoderAttentionMaskTensors.close()
decoderEncoderStateTensors.close()
tensorEncoder.clearTensors()
modelOutputs.map(x => x.filter(y => y != eosTokenId && y != paddingTokenId))
}
def decode(sentences: Array[Array[Int]], vocabsArray: Array[String]): Seq[String] = {
sentences.map { s =>
val filteredPads = s.filter(x => x != 0)
val pieceTokens = filteredPads.map {
pieceId =>
vocabsArray(pieceId)
}
sppTrg.getSppModel.decodePieces(pieceTokens.toList.asJava)
}
}
def encode(sentences: Seq[Annotation], normalizer: MosesPunctNormalizer, maxSeqLength: Int, vocabsArray: Array[String],
langId: Int, unknownTokenId: Int, eosTokenId: Int): Seq[Array[Int]] = {
sentences.map { s =>
// remove language code from the source text
val sentWithoutLangId = langCodeRe.replaceFirstIn(s.result, "").trim
val normalizedSent = normalizer.normalize(sentWithoutLangId)
val pieceTokens = sppSrc.getSppModel.encodeAsPieces(normalizedSent).toArray.map(x => x.toString)
val pieceIds = pieceTokens.map {
piece =>
val pieceId = vocabsArray.indexOf(piece)
if (pieceId > 0) {
pieceId
} else {
unknownTokenId
}
}
if (langId > 0)
Array(langId) ++ pieceIds.take(maxSeqLength - 2) ++ Array(eosTokenId)
else
pieceIds.take(maxSeqLength - 1) ++ Array(eosTokenId)
}
}
/**
* generate seq2seq via encoding, generating, and decoding
*
* @param sentences none empty Annotation
* @param batchSize size of baches to be process at the same time
* @param maxInputLength maximum length for input
* @param maxOutputLength maximum length for output
* @param vocabs list of all vocabs
* @param langId language id for multi-lingual models
* @return
*/
def predict(sentences: Seq[Annotation],
batchSize: Int = 1,
maxInputLength: Int,
maxOutputLength: Int,
vocabs: Array[String],
langId: String,
ignoreTokenIds: Array[Int] = Array()
): Array[Annotation] = {
val normalizer = new MosesPunctNormalizer()
val paddingTokenId = vocabs.indexOf("<pad>")
val unknownTokenId = vocabs.indexOf("<unk>")
val eosTokenId = vocabs.indexOf("</s>")
val ignoreTokenIdsWithPadToken = ignoreTokenIds ++ Array(paddingTokenId)
val vocabSize = vocabs.toSeq.length
val langIdPieceId = if (langId.nonEmpty) {
vocabs.indexOf(langId)
} else {
val lang = langCodeRe.findFirstIn(sentences.head.result.trim).getOrElse(-1L)
vocabs.indexOf(lang)
}
val batchDecoder = sentences.grouped(batchSize).toArray.flatMap { batch =>
val batchSP = encode(batch, normalizer, maxInputLength, vocabs, langIdPieceId, unknownTokenId, eosTokenId)
val spIds = tag(batchSP, maxOutputLength, paddingTokenId, eosTokenId, vocabSize, ignoreTokenIdsWithPadToken)
decode(spIds, vocabs)
}
var sentBegin, nextSentEnd = 0
batchDecoder.zip(sentences).map {
case (content, sent) =>
nextSentEnd += content.length - 1
val annots = new Annotation(
annotatorType = AnnotatorType.DOCUMENT,
begin = sentBegin,
end = nextSentEnd,
result = content,
metadata = sent.metadata)
sentBegin += nextSentEnd + 1
annots
}
}
}
|
JohnSnowLabs/spark-nlp
|
src/main/scala/com/johnsnowlabs/ml/tensorflow/TensorflowMarian.scala
|
Scala
|
apache-2.0
| 12,036
|
package io.vamp.model.reader
import io.vamp.model.artifact._
import io.vamp.model.notification.UnsupportedProtocolError
import io.vamp.model.reader.YamlSourceReader._
import io.vamp.model.reader.ComposeWriter._
import scala.util.Try
import scala.language.higherKinds
/**
* Pairs a result from a Yaml Compose Parse with a list of Comments
* Comments are accumulated and later added to the Yaml as Yaml comments
*/
case class ComposeWriter[A](result: A, comments: List[String]) {
def map[B](f: A ⇒ B): ComposeWriter[B] =
ComposeWriter.map(this)(f)
def flatMap[B](f: A ⇒ ComposeWriter[B]): ComposeWriter[B] =
ComposeWriter.flatMap(this)(f)
def run: (A, List[String]) = tupled(this)
}
object ComposeWriter {
/** Extracts the comments from a ComposeWriter **/
def extractComments[A](composeWriter: ComposeWriter[A]): List[String] = composeWriter.comments
/** Creates a ComposeWriter from A without any comments **/
def lift[A](a: A): ComposeWriter[A] = ComposeWriter(a, List())
/** Retrieve the result from ComposeWriter **/
def get[A](composeWriter: ComposeWriter[A]): A = composeWriter.result
/** Change the result of ComposeWriter without touching the comments **/
def map[A, B](composeWriter: ComposeWriter[A])(f: A ⇒ B): ComposeWriter[B] =
composeWriter.copy(result = f(composeWriter.result))
/** Sequence two ComposeWriter actions together **/
def flatMap[A, B](composeWriter: ComposeWriter[A])(f: A ⇒ ComposeWriter[B]): ComposeWriter[B] =
f(composeWriter.result) match {
case cr: ComposeWriter[B] ⇒ cr.copy(comments = composeWriter.comments ++ cr.comments)
}
/** Extract the results of ComposeWriter in a tuple **/
def tupled[A](composeWriter: ComposeWriter[A]): (A, List[String]) =
(composeWriter.result, composeWriter.comments)
/** Add comment to a parsed A **/
def add[A](a: A)(addComment: A ⇒ String): ComposeWriter[A] =
ComposeWriter(a, List(addComment(a)))
def add[A](as: List[A])(addComments: List[A] ⇒ List[String]) =
ComposeWriter(as, addComments(as))
/** Or combinator first tries to first read if it fails it tries the second **/
def or[A](read: YamlSourceReader ⇒ ComposeWriter[A])(readTwo: YamlSourceReader ⇒ ComposeWriter[A])(implicit source: YamlSourceReader): ComposeWriter[A] =
Try(read(source)).getOrElse(readTwo(source))
/** Sequences a List of ComposeWriter[A] to a ComposeWriter of List[A] **/
def sequence[A](composeWriter: List[ComposeWriter[A]]): ComposeWriter[List[A]] =
ComposeWriter(composeWriter.map(get), composeWriter.flatMap(extractComments))
}
/**
* Reads a Blueprint from a docker-compose yaml
*/
object ComposeBlueprintReader extends YamlReader[ComposeWriter[Blueprint]] {
/**
* Adds the name to the defined blueprints
* Use this function instead of parse, since name is not available in YamlReader.parse
*/
def fromDockerCompose(name: String)(implicit source: String): ComposeWriter[Blueprint] =
this.read(source).map {
case defaultBlueprint: DefaultBlueprint ⇒ defaultBlueprint.copy(name = name)
case other ⇒ other
}
/**
* Flattens and comments unnused values from the docker-compose yaml
*/
private def flattenUnusedValues(implicit source: YamlSourceReader): ComposeWriter[Unit] =
sequence(source.flattenNotConsumed().toList.map {
case (key, value) ⇒
add(())(_ ⇒ s"Did not include unsupported field: '$key' with value: '$value'")
}).map(_ ⇒ source.flatten())
override protected def parse(implicit source: YamlSourceReader): ComposeWriter[Blueprint] =
for {
_ ← add(<<?[String]("version"))(v ⇒ s"Compose version: ${v.getOrElse("undefined")}.")
clusters ← ComposeClusterReader.read
_ ← flattenUnusedValues
} yield DefaultBlueprint(
name = "", // will be replaced by fromDockerCompose function
metadata = Map(),
clusters = clusters,
gateways = List(),
environmentVariables = List(),
dialects = Map())
}
object ComposeClusterReader extends YamlReader[ComposeWriter[List[Cluster]]] {
override protected def parse(implicit source: YamlSourceReader): ComposeWriter[List[Cluster]] =
for {
clusters ← parseClusters
clustersWithDependencies ← resolveDependencies(clusters, clusters.map(c ⇒ c.name))
clustersWithDependenciesAndPorts ← addPorts(clustersWithDependencies.map(_._1), clustersWithDependencies.flatMap(_._2))
} yield clustersWithDependenciesAndPorts
private def parseClusters(implicit source: YamlSourceReader): ComposeWriter[List[Cluster]] =
sequence(<<?[YamlSourceReader]("services") match {
case Some(yaml) ⇒ yaml
.pull()
.toList
.flatMap {
case (name: String, yaml: YamlSourceReader) ⇒
Some(ComposeServicesReader.parseService(name)(yaml).map { service ⇒
Cluster(
name = name,
metadata = Map(),
services = List(service),
gateways = List(),
healthChecks = None,
network = None,
sla = None,
dialects = Map())
})
case _ ⇒ None
}
case None ⇒ List()
})
private def getBreedField[F[_], A](breed: Breed)(default: F[A])(fromBreed: DefaultBreed ⇒ F[A]): F[A] =
breed match {
case defaultBreed: DefaultBreed ⇒ fromBreed(defaultBreed)
case _ ⇒ default
}
private def setBreedValue(setValue: DefaultBreed ⇒ Breed)(breed: Breed): Breed =
breed match {
case defaultBreed: DefaultBreed ⇒ setValue(defaultBreed)
case _ ⇒ breed
}
private def replaceValueOpt(clusterNames: List[String])(environmentVariable: EnvironmentVariable) =
environmentVariable
.value
.flatMap { (environmentValue: String) ⇒
clusterNames.map(cn ⇒ cn → s"($cn):(\\\\d+)".r).flatMap {
case (cn, pattern) ⇒
environmentValue match {
case pattern(name, port) ⇒ Some(Tuple3(
cn,
environmentVariable.copy(value = Some("$" + cn + ".host:" + "$" + cn + ".ports.port_" + port)),
port))
case xs ⇒ None
}
}.headOption
}
private def resolveDependencies(clusters: List[Cluster], clusterNames: List[String]): ComposeWriter[List[(Cluster, List[(String, String)])]] =
clusters.foldRight(lift(List.empty[(Cluster, List[(String, String)])])) { (cluster, acc) ⇒
acc.flatMap { clusters ⇒
addEnvironmentVariables(cluster.services.head, clusterNames).map {
case (service, portsWithCluster) ⇒
(cluster.copy(services = List(service)) → portsWithCluster) +: clusters
}
}
}
private def addEnvironmentVariables(service: Service, clusterNames: List[String]): ComposeWriter[(Service, List[(String, String)])] =
getBreedField[List, EnvironmentVariable](service.breed)(List())(_.environmentVariables)
.flatMap(ev ⇒ replaceValueOpt(clusterNames)(ev))
.foldRight(lift(service → List.empty[(String, String)])) {
case ((cn, env, port), serviceWriter) ⇒
serviceWriter.flatMap {
case (s, portsWithCluster) ⇒
val newEnvironmentVars = getBreedField[List, EnvironmentVariable](s.breed)(List())(_.environmentVariables)
.foldRight(List.empty[EnvironmentVariable]) { (ev, acc) ⇒
if (ev.name == env.name) env +: acc
else ev +: acc
}
val newDependencies: Map[String, Breed] =
getBreedField[({ type F[A] = Map[String, A] })#F, Breed](service.breed)(Map())(_.dependencies) +
(cn → BreedReference(s"$cn:1.0.0"))
add(service.copy(
breed = setBreedValue(_.copy(
environmentVariables = newEnvironmentVars,
dependencies = newDependencies))(s.breed)) → ((cn, port) +: portsWithCluster)
) { _ ⇒
s"Created port reference in environment variable: '${env.name}'"
}
}
}
private def addPorts(clusters: List[Cluster], portsWithCluster: List[(String, String)]): ComposeWriter[List[Cluster]] =
clusters.foldRight(lift(List.empty[Cluster])) { (cluster, acc) ⇒
acc.flatMap { clusters ⇒
portsWithCluster
.filter(_._1 == cluster.name)
.foldRight(lift(cluster)) {
case ((cn, port), clusterWriter) ⇒
clusterWriter.flatMap { c: Cluster ⇒
val newService = c.services
.head
.copy(
breed = setBreedValue(b ⇒
b.copy(
ports = b.ports :+ Port(s"port_$port", None, Some(port), port.toInt, Port.Type.Http))
)(c.services.head.breed))
add(cluster.copy(services = List(newService)))(_ ⇒ s"Created port: 'port_$port' in breed: '$cn:1.0.0'.")
}
}.map { newCluster ⇒
newCluster +: clusters
}
}
}
}
/**
* Reads a a services from a docker-compose yaml
*/
object ComposeServicesReader extends YamlReader[ComposeWriter[Service]] {
def parseService(name: String)(yaml: YamlSourceReader): ComposeWriter[Service] =
parse(yaml).map { service ⇒
service.copy(breed = service.breed match {
case defaultBreed: DefaultBreed ⇒ defaultBreed.copy(name = s"$name:1.0.0")
})
}
protected def parse(implicit yaml: YamlSourceReader): ComposeWriter[Service] =
for {
deployable ← lift(Deployable(Some("container/docker"), <<))
ports ← ComposePortReader.read
dialects ← lift(<<?[String]("command").map(c ⇒ Map("docker" → c)).getOrElse(Map()))
dependencies ← lift(<<?[List[String]]("depends_on").getOrElse(List()).map(d ⇒ d → BreedReference(s"$d:1.0.0")).toMap)
environmentVariables ← or(s ⇒ ComposeEnvironmentReaderList.read(s))(s ⇒ ComposeEnvironmentReaderMap.read(s))
} yield Service(
breed = DefaultBreed(
name = "",
metadata = Map(),
deployable = deployable,
ports = ports,
environmentVariables = environmentVariables,
constants = List(),
arguments = List(),
dependencies = dependencies,
healthChecks = None),
environmentVariables = List(),
scale = None,
arguments = List(),
healthChecks = None,
network = None,
dialects = dialects,
health = None)
}
/**
* Reads a list of ports from a docker-compose yaml
*/
object ComposePortReader extends YamlReader[ComposeWriter[List[Port]]] {
override protected def parse(implicit source: YamlSourceReader): ComposeWriter[List[Port]] =
sequence(<<?[List[String]]("ports").map { ports ⇒
ports.map { portString ⇒
val composePort = portString.split(":") match {
case Array(singlePort) ⇒ lift(parsePort(singlePort))
case Array(hostPort, containerPort) ⇒
add(parsePort(containerPort))(_ ⇒ s"Ignored host port: $hostPort.")
case Array(ip, hostPort, containerPort) ⇒
add(parsePort(containerPort))(_ ⇒ s"Ignored ip: $ip and host port: $hostPort.")
}
composePort.flatMap {
case SinglePort(port) ⇒ lift(Port(s"port_$port", None, Some(port.toString), port, Port.Type.Http))
case PortRange(start, end) ⇒ add(Port(s"port_$start", None, Some(start.toString), start, Port.Type.Http))(_ ⇒ s"Ignored port ranges to: $end.")
case PortWithType(port, pt) ⇒
val portType = if (pt.equalsIgnoreCase("tcp")) Port.Type.Tcp
else if (pt.equalsIgnoreCase("http")) Port.Type.Http
else throwException(UnsupportedProtocolError(pt))
lift(Port(s"port_$port", None, Some(port.toString), port, portType))
}
}
}.getOrElse(List()))
// Private ADT for differentiating types of ports
private sealed trait ComposePort
private case class SinglePort(port: Int) extends ComposePort
private case class PortRange(start: Int, end: Int) extends ComposePort
private case class PortWithType(port: Int, portType: String) extends ComposePort
private def parsePort(portString: String): ComposePort = portString.split("-") match {
case Array(port) ⇒ portType(port)
case Array(startPort, endPort) ⇒ PortRange(startPort.toInt, endPort.toInt)
}
private def portType(s: String): ComposePort = s.split("/") match {
case Array(port) ⇒ SinglePort(port.toInt)
case Array(port, portType) ⇒ PortWithType(port.toInt, portType)
}
}
/**
* Reads a list of environment values that are defined in the docker-compose yaml as a list
*/
object ComposeEnvironmentReaderList extends YamlReader[ComposeWriter[List[EnvironmentVariable]]] {
override protected def parse(implicit source: YamlSourceReader): ComposeWriter[List[EnvironmentVariable]] =
lift(<<?[List[String]]("environment", silent = true).map { environments ⇒
environments.map(_.split("=") match {
case Array(key, value) ⇒ EnvironmentVariable(key, None, Some(value), None)
})
}.getOrElse(List()))
}
/**
* Reads a list of environment values that are defined in the docker-compose yaml as key-value pairs
*/
object ComposeEnvironmentReaderMap extends YamlReader[ComposeWriter[List[EnvironmentVariable]]] {
override protected def parse(implicit source: YamlSourceReader): ComposeWriter[List[EnvironmentVariable]] =
lift(<<?[YamlSourceReader]("environment", silent = true)
.map {
_.pull()
.toList
.map {
case (key, value) ⇒
EnvironmentVariable(key, None, Some(value.asInstanceOf[String]), None)
}
}.getOrElse(List()))
}
|
magneticio/vamp
|
model/src/main/scala/io/vamp/model/reader/ComposeBlueprintReader.scala
|
Scala
|
apache-2.0
| 14,045
|
/**
* Caches features on a per-item basis. Basically a simple wrapper for
* thread-safe hashmap.
*/
package edu.berkeley.veloxms
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicLong
/**
* @tparam K type of the item whose features are being stored.
*/
// TODO make sure that K is hashable
class FeatureCache[K, V](cacheActivated: Boolean) {
// TODO: maybe we should universally represent features in JBLAS format
// so I don't have to keep transforming them between vectors and arrays
private val _cache = new ConcurrentHashMap[K, V]()
private val hits = new AtomicLong()
private val misses = new AtomicLong()
def addItem(data: K, features: V) {
if (cacheActivated) {
_cache.putIfAbsent(data, features)
}
}
def getItem(data: K): Option[V] = {
if (cacheActivated) {
val result = Option(_cache.get(data))
result match {
case Some(_) => hits.incrementAndGet()
case None => misses.incrementAndGet()
}
result
} else {
misses.incrementAndGet()
None
}
}
def getCacheHitRate: (Long, Long) = {
(hits.get(), misses.get())
}
}
// object FeatureCache {
//
// // Totally arbitrary placeholder until we figure out what
// // a cache budget means
// val tempBudget = 100
//
// }
|
kcompher/velox-modelserver
|
veloxms-core/src/main/scala/edu/berkeley/veloxms/FeatureCache.scala
|
Scala
|
apache-2.0
| 1,327
|
package io.buoyant.linkerd
import com.fasterxml.jackson.annotation.{JsonIgnore, JsonProperty, JsonTypeInfo}
import com.twitter.conversions.DurationOps._
import com.twitter.finagle._
import com.twitter.finagle.naming.buoyant.DstBindingFactory
import com.twitter.finagle.naming.NameInterpreter
import com.twitter.finagle.service._
import com.twitter.finagle.buoyant.ParamsMaybeWith
import com.twitter.util.Closable
import io.buoyant.namer.{DefaultInterpreterConfig, InterpreterConfig}
import io.buoyant.router.{ClassifiedRetries, Originator, RoutingFactory}
/**
* A router configuration builder api.
*
* Each router must have a [[ProtocolInitializer protocol]] that
* assists in the parsing and initialization of a router and its
* services.
*
* `params` contains all params configured on this router, including
* (in order of ascending preference):
* - protocol-specific default router parameters
* - linker default parameters
* - router-specific params.
*
* Each router must have one or more [[Server Servers]].
*
* Concrete implementations are provided by a [[ProtocolInitializer]].
*/
trait Router {
def protocol: ProtocolInitializer
// configuration
def params: Stack.Params
protected def _withParams(ps: Stack.Params): Router
protected def configureServer(s: Server): Server
def withParams(ps: Stack.Params): Router = {
val routerWithParams = _withParams(ps)
routerWithParams.withServers(routerWithParams.servers.map(routerWithParams.configureServer))
}
def configured[P: Stack.Param](p: P): Router = withParams(params + p)
def configured(ps: Stack.Params): Router = withParams(params ++ ps)
// helper aliases
def label: String = params[param.Label].label
// servers
def servers: Seq[Server]
protected def withServers(servers: Seq[Server]): Router
/** Return a router with an additional server. */
def serving(s: Server): Router =
withServers(servers :+ configureServer(s))
def serving(ss: Seq[Server]): Router = ss.foldLeft(this)(_ serving _)
def withAnnouncers(announcers: Seq[(Path, Announcer)]): Router
/**
* Initialize a router by instantiating a downstream router client
* so that its upstream `servers` may be bound.
*/
def initialize(): Router.Initialized
def interpreter: NameInterpreter = params[DstBindingFactory.Namer].interpreter
}
object Router {
/**
* A [[Router]] that has been configured and initialized.
*
* Concrete implementations
*/
trait Initialized extends Closable {
def protocol: ProtocolInitializer
def params: Stack.Params
def servers: Seq[Server.Initializer]
def announcers: Seq[(Path, Announcer)]
}
}
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "protocol")
trait RouterConfig {
// RouterConfig subtypes are required to implement these so that they may
// refine to more specific config types.
def servers: Seq[ServerConfig]
def service: Option[Svc]
def client: Option[Client]
var dtab: Option[Dtab] = None
var originator: Option[Boolean] = None
var dstPrefix: Option[String] = None
@JsonProperty("announcers")
var _announcers: Option[Seq[AnnouncerConfig]] = None
@JsonProperty("label")
var _label: Option[String] = None
@JsonIgnore
def label = _label.getOrElse(protocol.name)
/*
* interpreter controls how names are bound.
*/
@JsonProperty("interpreter")
var _interpreter: Option[InterpreterConfig] = None
protected[this] def defaultInterpreter: InterpreterConfig =
new DefaultInterpreterConfig
@JsonIgnore
def interpreter: InterpreterConfig =
_interpreter.getOrElse(defaultInterpreter)
/*
* bindingTimeoutMs limits name resolution.
*/
@JsonProperty("bindingTimeoutMs")
var _bindingTimeoutMs: Option[Int] = None
@JsonIgnore
def bindingTimeout = _bindingTimeoutMs.map(_.millis).getOrElse(10.seconds)
/*
* binding cache size
*/
var bindingCache: Option[BindingCacheConfig] = None
@JsonIgnore protected[this] def defaultResponseClassifier: ResponseClassifier =
ClassifiedRetries.Default
/**
* This property must be set to true in order to use this router if it
* is experimental.
*/
@JsonProperty("experimental")
var _experimentalEnabled: Option[Boolean] = None
/**
* If this protocol is experimental but has not set the
* `experimental` property.
*/
@JsonIgnore
def disabled = protocol.experimentalRequired && !_experimentalEnabled.contains(true)
@JsonIgnore
def routerParams(params: Stack.Params) = (params +
param.ResponseClassifier(defaultResponseClassifier) +
FailureAccrualConfig.default)
.maybeWith(dtab.map(dtab => RoutingFactory.BaseDtab(() => dtab)))
.maybeWith(originator.map(Originator.Param(_)))
.maybeWith(dstPrefix.map(pfx => RoutingFactory.DstPrefix(Path.read(pfx))))
.maybeWith(bindingCache.map(_.capacity))
.maybeWith(client.map(_.clientParams))
.maybeWith(service.map(_.pathParams))
.maybeWith(bindingCache.flatMap(_.idleTtl)) +
param.Label(label) +
DstBindingFactory.BindingTimeout(bindingTimeout)
@JsonIgnore
def router(params: Stack.Params): Router = {
val prms = params ++ routerParams(params)
val param.Label(label) = prms[param.Label]
val announcers = _announcers.toSeq.flatten.map { announcer =>
announcer.prefix -> announcer.mk(params)
}
protocol.router.configured(prms)
.serving(servers.map(_.mk(protocol, label)))
.withAnnouncers(announcers)
}
@JsonIgnore
def protocol: ProtocolInitializer
}
case class BindingCacheConfig(
paths: Option[Int],
trees: Option[Int],
bounds: Option[Int],
clients: Option[Int],
idleTtlSecs: Option[Int]
) {
private[this] val default = DstBindingFactory.Capacity.default
def capacity = DstBindingFactory.Capacity(
paths = paths.getOrElse(default.paths),
trees = trees.getOrElse(default.trees),
bounds = bounds.getOrElse(default.bounds),
clients = clients.getOrElse(default.clients)
)
def idleTtl: Option[DstBindingFactory.IdleTtl] = idleTtlSecs.map { t =>
DstBindingFactory.IdleTtl(t.seconds)
}
}
|
linkerd/linkerd
|
linkerd/core/src/main/scala/io/buoyant/linkerd/Router.scala
|
Scala
|
apache-2.0
| 6,134
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.optimization
import scala.collection.mutable.ArrayBuffer
import breeze.linalg.{DenseVector => BDV, axpy}
import breeze.optimize.{CachedDiffFunction, DiffFunction, LBFGS => BreezeLBFGS}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.{Vectors, Vector}
/**
* :: DeveloperApi ::
* Class used to solve an optimization problem using Limited-memory BFGS.
* Reference: [[http://en.wikipedia.org/wiki/Limited-memory_BFGS]]
* @param gradient Gradient function to be used.
* @param updater Updater to be used to update weights after every iteration.
*/
@DeveloperApi
class LBFGS(private var gradient: Gradient, private var updater: Updater)
extends Optimizer with Logging {
private var numCorrections = 10
private var convergenceTol = 1E-4
private var maxNumIterations = 100
private var regParam = 0.0
private var miniBatchFraction = 1.0
/**
* Set the number of corrections used in the LBFGS update. Default 10.
* Values of numCorrections less than 3 are not recommended; large values
* of numCorrections will result in excessive computing time.
* 3 < numCorrections < 10 is recommended.
* Restriction: numCorrections > 0
*/
def setNumCorrections(corrections: Int): this.type = {
assert(corrections > 0)
this.numCorrections = corrections
this
}
/**
* Set fraction of data to be used for each L-BFGS iteration. Default 1.0.
*/
def setMiniBatchFraction(fraction: Double): this.type = {
this.miniBatchFraction = fraction
this
}
/**
* Set the convergence tolerance of iterations for L-BFGS. Default 1E-4.
* Smaller value will lead to higher accuracy with the cost of more iterations.
*/
def setConvergenceTol(tolerance: Int): this.type = {
this.convergenceTol = tolerance
this
}
/**
* Set the maximal number of iterations for L-BFGS. Default 100.
*/
def setMaxNumIterations(iters: Int): this.type = {
this.maxNumIterations = iters
this
}
/**
* Set the regularization parameter. Default 0.0.
*/
def setRegParam(regParam: Double): this.type = {
this.regParam = regParam
this
}
/**
* Set the gradient function (of the loss function of one single data example)
* to be used for L-BFGS.
*/
def setGradient(gradient: Gradient): this.type = {
this.gradient = gradient
this
}
/**
* Set the updater function to actually perform a gradient step in a given direction.
* The updater is responsible to perform the update from the regularization term as well,
* and therefore determines what kind or regularization is used, if any.
*/
def setUpdater(updater: Updater): this.type = {
this.updater = updater
this
}
override def optimize(data: RDD[(Double, Vector)], initialWeights: Vector): Vector = {
val (weights, _) = LBFGS.runMiniBatchLBFGS(
data,
gradient,
updater,
numCorrections,
convergenceTol,
maxNumIterations,
regParam,
miniBatchFraction,
initialWeights)
weights
}
}
/**
* :: DeveloperApi ::
* Top-level method to run L-BFGS.
*/
@DeveloperApi
object LBFGS extends Logging {
/**
* Run Limited-memory BFGS (L-BFGS) in parallel using mini batches.
* In each iteration, we sample a subset (fraction miniBatchFraction) of the total data
* in order to compute a gradient estimate.
* Sampling, and averaging the subgradients over this subset is performed using one standard
* spark map-reduce in each iteration.
*
* @param data - Input data for L-BFGS. RDD of the set of data examples, each of
* the form (label, [feature values]).
* @param gradient - Gradient object (used to compute the gradient of the loss function of
* one single data example)
* @param updater - Updater function to actually perform a gradient step in a given direction.
* @param numCorrections - The number of corrections used in the L-BFGS update.
* @param convergenceTol - The convergence tolerance of iterations for L-BFGS
* @param maxNumIterations - Maximal number of iterations that L-BFGS can be run.
* @param regParam - Regularization parameter
* @param miniBatchFraction - Fraction of the input data set that should be used for
* one iteration of L-BFGS. Default value 1.0.
*
* @return A tuple containing two elements. The first element is a column matrix containing
* weights for every feature, and the second element is an array containing the loss
* computed for every iteration.
*/
def runMiniBatchLBFGS(
data: RDD[(Double, Vector)],
gradient: Gradient,
updater: Updater,
numCorrections: Int,
convergenceTol: Double,
maxNumIterations: Int,
regParam: Double,
miniBatchFraction: Double,
initialWeights: Vector): (Vector, Array[Double]) = {
val lossHistory = new ArrayBuffer[Double](maxNumIterations)
val numExamples = data.count()
val miniBatchSize = numExamples * miniBatchFraction
val costFun =
new CostFun(data, gradient, updater, regParam, miniBatchFraction, lossHistory, miniBatchSize)
val lbfgs = new BreezeLBFGS[BDV[Double]](maxNumIterations, numCorrections, convergenceTol)
val weights = Vectors.fromBreeze(
lbfgs.minimize(new CachedDiffFunction(costFun), initialWeights.toBreeze.toDenseVector))
logInfo("LBFGS.runMiniBatchSGD finished. Last 10 losses %s".format(
lossHistory.takeRight(10).mkString(", ")))
(weights, lossHistory.toArray)
}
/**
* CostFun implements Breeze's DiffFunction[T], which returns the loss and gradient
* at a particular point (weights). It's used in Breeze's convex optimization routines.
*/
private class CostFun(
data: RDD[(Double, Vector)],
gradient: Gradient,
updater: Updater,
regParam: Double,
miniBatchFraction: Double,
lossHistory: ArrayBuffer[Double],
miniBatchSize: Double) extends DiffFunction[BDV[Double]] {
private var i = 0
override def calculate(weights: BDV[Double]) = {
// Have a local copy to avoid the serialization of CostFun object which is not serializable.
val localData = data
val localGradient = gradient
val (gradientSum, lossSum) = localData.sample(false, miniBatchFraction, 42 + i)
.aggregate((BDV.zeros[Double](weights.size), 0.0))(
seqOp = (c, v) => (c, v) match { case ((grad, loss), (label, features)) =>
val l = localGradient.compute(
features, label, Vectors.fromBreeze(weights), Vectors.fromBreeze(grad))
(grad, loss + l)
},
combOp = (c1, c2) => (c1, c2) match { case ((grad1, loss1), (grad2, loss2)) =>
(grad1 += grad2, loss1 + loss2)
})
/**
* regVal is sum of weight squares if it's L2 updater;
* for other updater, the same logic is followed.
*/
val regVal = updater.compute(
Vectors.fromBreeze(weights),
Vectors.dense(new Array[Double](weights.size)), 0, 1, regParam)._2
val loss = lossSum / miniBatchSize + regVal
/**
* It will return the gradient part of regularization using updater.
*
* Given the input parameters, the updater basically does the following,
*
* w' = w - thisIterStepSize * (gradient + regGradient(w))
* Note that regGradient is function of w
*
* If we set gradient = 0, thisIterStepSize = 1, then
*
* regGradient(w) = w - w'
*
* TODO: We need to clean it up by separating the logic of regularization out
* from updater to regularizer.
*/
// The following gradientTotal is actually the regularization part of gradient.
// Will add the gradientSum computed from the data with weights in the next step.
val gradientTotal = weights - updater.compute(
Vectors.fromBreeze(weights),
Vectors.dense(new Array[Double](weights.size)), 1, 1, regParam)._1.toBreeze
// gradientTotal = gradientSum / miniBatchSize + gradientTotal
axpy(1.0 / miniBatchSize, gradientSum, gradientTotal)
/**
* NOTE: lossSum and loss is computed using the weights from the previous iteration
* and regVal is the regularization value computed in the previous iteration as well.
*/
lossHistory.append(loss)
i += 1
(loss, gradientTotal)
}
}
}
|
zhangjunfang/eclipse-dir
|
spark/mllib/src/main/scala/org/apache/spark/mllib/optimization/LBFGS.scala
|
Scala
|
bsd-2-clause
| 9,357
|
package eu.liderproject.functionalsparql
import com.hp.hpl.jena.graph.NodeFactory
import com.hp.hpl.jena.sparql.core.Quad
import java.io.File
import java.net.URL
import org.apache.jena.riot.RDFDataMgr
import org.scalatest._
import scala.collection.JavaConversions._
import scala.xml.XML
class DAWGTests extends FlatSpec with Matchers {
val testsToSkip = Set(
// TODO: File Bug Report @ Jena
"http://www.w3.org/2001/sw/DataAccess/tests/data-r2/basic/manifest#term-6",
"http://www.w3.org/2001/sw/DataAccess/tests/data-r2/basic/manifest#term-7",
// Known issue
"http://www.w3.org/2001/sw/DataAccess/tests/data-r2/optional-filter/manifest#dawg-optional-filter-005-not-simplified",
"http://www.w3.org/2001/sw/DataAccess/tests/data-r2/algebra/manifest#opt-filter-2",
// Unapproved
"http://www.w3.org/2001/sw/DataAccess/tests/data-r2/open-world/manifest#date-1")
for(dir <- new File("src/test/resources/test-suite-archive/data-r2/").listFiles() if dir.isDirectory) {
for(file <- dir.listFiles() if file.getName() == "manifest.ttl") {
val model = RDFDataMgr.loadModel("file:" + file.getPath())
val testListTriples = model.listResourcesWithProperty(
model.createProperty("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
model.createResource("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#QueryEvaluationTest")
)
for(test <- testListTriples if !testsToSkip.exists(test.toString.contains(_))) {
//for(test <- testListTriples if test.toString == "http://www.w3.org/2001/sw/DataAccess/tests/data-r2/solution-seq/manifest#offset-1") {
//for(test <- testListTriples if test.toString.contains("opt-filter-2") || test.toString.contains("nested-opt-1") || test.toString.contains("filter-scope-1")) {
val expResult = test.getProperty(model.createProperty("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#result")).getObject().asResource()
val queries = test.getProperty(model.createProperty("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#action")).getObject().asResource().
listProperties(model.createProperty("http://www.w3.org/2001/sw/DataAccess/tests/test-query#query")).map { st =>
st.getObject().asResource()
}
val dataset = Option(test.getProperty(model.createProperty("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#action")).getObject().asResource().
getProperty(model.createProperty("http://www.w3.org/2001/sw/DataAccess/tests/test-query#data"))).map(_.getObject().asResource())
val givenGraphDataset = test.getProperty(model.createProperty("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#action")).getObject().asResource().
listProperties(model.createProperty("http://www.w3.org/2001/sw/DataAccess/tests/test-query#graphData")).map(_.getObject().asNode())
test.toString should "execute" in {
for(query <- queries) {
//println("Processing " + query.toString)
val plan = functionalsparql.processQuery(scala.io.Source.fromURL(query.toString)(scala.io.Codec.UTF8).getLines.mkString("\n"), query.toString)
val data = dataset match {
case Some(ds) => RDFDataMgr.loadModel(
ds.getURI()).listStatements().toList.map(s => new Quad(Quad.defaultGraphIRI, s.asTriple))
case None => Nil
}
val graphDataset = Set() ++ givenGraphDataset ++ plan.graphs ++ plan.namedGraphs
val graphData = graphDataset flatMap { ds =>
val model = RDFDataMgr.loadModel(ds.getURI())
model.listStatements().toList.map(s => new Quad(ds, s.asTriple))
}
require(dataset == None || data.size() > 0)
plan match {
case p : SelectPlan =>
val result = p.execute(new SimpleDistCollection(data ++ graphData))
if(expResult.getURI().endsWith(".srx")) {
SparqlXMLResults.verify(new URL(expResult.getURI()), p, result)
} else if(expResult.getURI().endsWith(".ttl")
|| expResult.getURI().endsWith(".rdf")) {
SparqlTurtleResults.verify(new URL(expResult.getURI()), p, result)
} else {
fail("Unknown results" + expResult)
}
case p : AskPlan =>
val result = p.execute(new SimpleDistCollection(data ++ graphData))
if(expResult.getURI().endsWith(".srx")) {
SparqlXMLResults.verify(new URL(expResult.getURI()), p, result)
} else if(expResult.getURI().endsWith(".ttl")) {
result should be (expResult.getURI().endsWith("true.ttl"))
//SparqlTurtleResults.verify(new URL(expResult.getURI()), p, result)
} else {
fail("Unknown results" + expResult)
}
case _ =>
System.err.println("TODO")
}
}
}//}
}
}
}
object SparqlTurtleResults {
def verify(resultFile : java.net.URL, plan : Plan[DistCollection[Match]],
resultDC : DistCollection[Match]) {
val sparqlTTL = RDFDataMgr.loadGraph(resultFile.toString())
val statements = sparqlTTL.find(null,
NodeFactory.createURI("http://www.w3.org/2001/sw/DataAccess/tests/result-set#solution"),
null)
val results = new scala.collection.mutable.ArrayBuffer[Match]() ++ resultDC.toIterable
for(result <- statements) {
val bindingStats = sparqlTTL.find(result.getObject(),
NodeFactory.createURI("http://www.w3.org/2001/sw/DataAccess/tests/result-set#binding"),
null)
val r2 = Seq() ++ (for(binding <- bindingStats) yield {
val k = sparqlTTL.find(binding.getObject(),
NodeFactory.createURI("http://www.w3.org/2001/sw/DataAccess/tests/result-set#variable"),
null).next().getObject().getLiteralLexicalForm()
val v = sparqlTTL.find(binding.getObject(),
NodeFactory.createURI("http://www.w3.org/2001/sw/DataAccess/tests/result-set#value"),
null).next().getObject()
k -> v
})
val resultMatch = results.find { result =>
result.binding.size == r2.size &&
r2.forall {
case (k, v) =>
result.binding.contains(k) && (result.binding(k) == v ||
result.binding(k).isBlank() && v.isBlank())
}
}
resultMatch match {
case Some(s) => results.remove(results.indexOf(s))
case None => fail("not found:" + r2)
}
}
for(result <- results) {
fail("not a result:" + result.toString)
}
}
}
object SparqlXMLResults {
def toXML(plan : Plan[_], m : DistCollection[Match]) = {
<sparql xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:xs="http://www.w3.org/2001/XMLSchema#" xmlns="http://www.w3.org/2005/sparql-results#">
<head>
{head(plan)}
</head>
</sparql>
}
private def head(plan : Plan[_]) = {
plan.vars.map { v => <variable name={v}></variable> }
}
def verify(resultFile : java.net.URL, plan : Plan[DistCollection[Match]], resultDC : DistCollection[Match]) {
val sparqlXML = XML.load(resultFile)
val head = sparqlXML \\ "head"
plan.vars.size should be ((head \ "variable").size)
for(variable <- (head \ "variable")) {
require(plan.vars.contains((variable \ "@name").text))
}
val results = new scala.collection.mutable.ArrayBuffer[Match]() ++ resultDC.toIterable
for(result <- (sparqlXML \ "results" \ "result")) {
val r2 = (for(binding <- (result \ "binding")) yield {
val k = (binding \ "@name").text
val v = (if((binding \ "uri").size > 0) {
(binding \ "uri").text
} else if((binding \ "bnode").size > 0) {
"_:"
} else if((binding \ "literal").size > 0) {
if((binding \ "literal" \ "@{http://www.w3.org/XML/1998/namespace}lang").size > 0) {
"\"" + (binding \ "literal").text + "\"@" + (binding \ "literal" \ "@{http://www.w3.org/XML/1998/namespace}lang").text
} else if((binding \ "literal" \ "@datatype").size > 0) {
"\"" + (binding \ "literal").text + "\"^^" + (binding \ "literal" \ "@datatype").text
} else {
"\"" + (binding \ "literal").text + "\""//^^http://www.w3.org/2001/XMLSchema#string"
}
} else {
throw new RuntimeException()
})
k -> v
})
val resultMatch = results.find { result =>
result.binding.size == r2.size &&
r2.forall {
case (k, v) =>
result.binding.contains(k) && (result.binding(k).toString == v ||
(v == "_:" && result.binding(k).isBlank()))
}
}
resultMatch match {
case Some(s) => results.remove(results.indexOf(s))
case None => fail("not found:" + r2)
}
}
for(result <- results) {
fail("not a result:" + result.toString)
}
}
def verify(resultFile : java.net.URL, plan : Plan[Boolean], result : Boolean) {
val sparqlXML = XML.load(resultFile)
(sparqlXML \ "boolean").text match {
case "true" => require(result)
case "false" => require(!result)
case other => throw new IllegalArgumentException("Bad Boolean value " + other)
}
}
}
}
|
jmccrae/functionalsparql
|
src/test/scala/functionalsparql/DAWGTests.scala
|
Scala
|
apache-2.0
| 9,965
|
package com.dataintuitive.luciuscore.filters
/**
* A basic representation of a filter with a key and a value
*/
case class Filter(key: String, value:String) extends Serializable
/**
* A representation for a filter query: one key, multiple (possible) values
*/
case class QFilter(val key: String, val values: Seq[String]) extends Serializable
|
data-intuitive/LuciusCore
|
src/main/scala/com/dataintuitive/luciuscore/filters/Filters.scala
|
Scala
|
apache-2.0
| 348
|
package text.similarity
import text.vector.FrequencyVector
import util.Config
/**
* @author K.Sakamoto
* Created on 2016/05/23
*/
object SimilarityCalculator {
def calculate(v1: FrequencyVector, v2: FrequencyVector): Double = {
Similarity.calculate(v1, v2, Config.similarity)
}
}
/**
* @author K.Sakamoto
* @param vector frequency vector
*/
class SimilarityCalculator(val vector: FrequencyVector) {
def calculate(v2: FrequencyVector): Double = {
Similarity.calculate(vector, v2, Config.similarity)
}
}
/**
* @author K.Sakamoto
* @param vectors frequency vectors
*/
class AverageSimilarityCalculator(val vectors: Seq[FrequencyVector]) {
val calculators: Seq[SimilarityCalculator] = {
for (vector <- vectors) yield {
new SimilarityCalculator(vector)
}
}
val size: Int = calculators.size
def calculate(v2: FrequencyVector): Double = {
var score: Double = 0D
calculators foreach {
calculator: SimilarityCalculator =>
score += calculator.calculate(v2)
}
Divider.divide(score, size)
}
}
|
ktr-skmt/FelisCatusZero
|
src/main/scala/text/similarity/SimilarityCalculator.scala
|
Scala
|
apache-2.0
| 1,080
|
package com.github.cocagne
package object composable_paxos {
type NetworkUID = String
}
|
cocagne/scala-composable-paxos
|
src/main/scala/com/github/cocagne/composable_paxos/package.scala
|
Scala
|
mit
| 94
|
import play.sbt.PlayImport._
import sbt._, Keys._
object Dependencies {
val lilaMaven = "lila-maven" at "https://raw.githubusercontent.com/ornicar/lila-maven/master"
val scalalib = "com.github.ornicar" %% "scalalib" % "7.0.2"
val hasher = "com.roundeights" %% "hasher" % "1.2.1"
val jodaTime = "joda-time" % "joda-time" % "2.10.12"
val chess = "org.lichess" %% "scalachess" % "10.2.11"
val compression = "org.lichess" %% "compression" % "1.6"
val maxmind = "com.sanoma.cda" %% "maxmind-geoip2-scala" % "1.3.1-THIB"
val prismic = "io.prismic" %% "scala-kit" % "1.2.19-THIB213"
val scaffeine = "com.github.blemale" %% "scaffeine" % "5.1.1" % "compile"
val googleOAuth = "com.google.auth" % "google-auth-library-oauth2-http" % "1.2.2"
val scalaUri = "io.lemonlabs" %% "scala-uri" % "3.6.0"
val scalatags = "com.lihaoyi" %% "scalatags" % "0.10.0"
val lettuce = "io.lettuce" % "lettuce-core" % "6.1.5.RELEASE"
val epoll = "io.netty" % "netty-transport-native-epoll" % "4.1.65.Final" classifier "linux-x86_64"
val autoconfig = "io.methvin.play" %% "autoconfig-macros" % "0.3.2" % "provided"
val scalatest = "org.scalatest" %% "scalatest" % "3.1.0" % Test
val uaparser = "org.uaparser" %% "uap-scala" % "0.13.0"
val specs2 = "org.specs2" %% "specs2-core" % "4.13.0" % Test
val apacheText = "org.apache.commons" % "commons-text" % "1.9"
val bloomFilter = "com.github.alexandrnikitin" %% "bloom-filter" % "0.13.1"
object flexmark {
val version = "0.62.2"
val bundle =
("com.vladsch.flexmark" % "flexmark" % version) ::
List("ext-tables", "ext-autolink", "ext-gfm-strikethrough").map { ext =>
"com.vladsch.flexmark" % s"flexmark-$ext" % version
}
}
object macwire {
val version = "2.4.2"
val macros = "com.softwaremill.macwire" %% "macros" % version % "provided"
val util = "com.softwaremill.macwire" %% "util" % version % "provided"
val tagging = "com.softwaremill.common" %% "tagging" % "2.3.1"
def bundle = Seq(macros, util, tagging)
}
object reactivemongo {
val version = "1.0.7"
val driver = "org.reactivemongo" %% "reactivemongo" % version
val stream = "org.reactivemongo" %% "reactivemongo-akkastream" % version
val epoll = "org.reactivemongo" % "reactivemongo-shaded-native" % s"$version-linux-x86-64"
val kamon = "org.reactivemongo" %% "reactivemongo-kamon" % "1.0.7"
def bundle = Seq(driver, stream)
}
object play {
val version = "2.8.8-lila_1.8"
val api = "com.typesafe.play" %% "play" % version
val json = "com.typesafe.play" %% "play-json" % "2.9.2"
val mailer = "com.typesafe.play" %% "play-mailer" % "8.0.1"
}
object playWs {
val version = "2.1.3"
val ahc = "com.typesafe.play" %% "play-ahc-ws-standalone" % version
val json = "com.typesafe.play" %% "play-ws-standalone-json" % version
val bundle = Seq(ahc, json)
}
object kamon {
val version = "2.2.3"
val core = "io.kamon" %% "kamon-core" % version
val influxdb = "io.kamon" %% "kamon-influxdb" % version
val metrics = "io.kamon" %% "kamon-system-metrics" % version
val prometheus = "io.kamon" %% "kamon-prometheus" % version
}
object akka {
val version = "2.6.16"
val akka = "com.typesafe.akka" %% "akka-actor" % version
val akkaTyped = "com.typesafe.akka" %% "akka-actor-typed" % version
val akkaStream = "com.typesafe.akka" %% "akka-stream" % version
val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % version
val testkit = "com.typesafe.akka" %% "akka-testkit" % version % Test
def bundle = List(akka, akkaTyped, akkaStream, akkaSlf4j)
}
}
|
luanlv/lila
|
project/Dependencies.scala
|
Scala
|
mit
| 4,431
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.joda.time.LocalDate
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.Validators.TradingLossesValidation
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CPQ17(value: Option[Boolean]) extends CtBoxIdentifier(name = "Trading losses not used from previous accounting periods?")
with CtOptionalBoolean
with Input
with ValidatableBox[ComputationsBoxRetriever]
with TradingLossesValidation {
override def validate(boxRetriever: ComputationsBoxRetriever): Set[CtValidation] = {
collectErrors(
requiredErrorIf({ value.isEmpty && boxRetriever.cp117().value > 0 }),
cannotExistErrorIf(value.nonEmpty && boxRetriever.cp117().value == 0),
(value, boxRetriever.cpQ19().value) match {
case (Some(_), Some(_)) => Set(CtValidation(Some(boxId), "error.CPQ17.cannot.exist.cpq19"))
case _ => Set.empty[CtValidation]
}
)
}
}
object CPQ17 {
val lossReform2017 = LocalDate.parse("2017-04-01")
}
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/computations/CPQ17.scala
|
Scala
|
apache-2.0
| 1,638
|
package com.thangiee.lolhangouts.ui.search
import android.content.Context
import android.support.v4.view.MenuItemCompat
import android.support.v7.widget.RecyclerView
import android.text.Editable
import android.view._
import android.widget._
import com.balysv.materialmenu.MaterialMenuDrawable
import com.skocken.efficientadapter.lib.adapter.AbsViewHolderAdapter.OnItemClickListener
import com.skocken.efficientadapter.lib.adapter.{AbsViewHolderAdapter, SimpleAdapter}
import com.thangiee.lolchat.region.BR
import com.thangiee.lolhangouts.R
import com.thangiee.lolhangouts.data.usecases._
import com.thangiee.lolhangouts.data.usecases.entities.SummSearchHist
import com.thangiee.lolhangouts.ui.core.Container
import com.thangiee.lolhangouts.ui.utils.{Bad, Good, _}
import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext.Implicits.global
abstract class SummSearchContainer(implicit ctx: Context) extends RelativeLayout(ctx) with Container
with OnItemClickListener[SummSearchHist] {
private var regionSpinner : Spinner = _
private var searchMenuIcon: MenuItem = _
private val searchSuggestions = scala.collection.mutable.ArrayBuffer[SummSearchHist]()
private val suggestionAdapter = new SimpleAdapter[SummSearchHist](R.layout.line_item_summ_search_result,
classOf[SummSearchHistViewHolder], searchSuggestions)
private lazy val searchViewContainer = layoutInflater.inflate(R.layout.search_view_container, this, false)
private lazy val searchView = searchViewContainer.find[EditText](R.id.search_view)
private lazy val suggestionsView = this.find[RecyclerView](R.id.rv_suggestions)
private val getAppDataUseCase = GetAppDataUseCaseImpl()
private val checkSummExistUseCase = CheckSummExistUseCaseImpl()
private val loadUser = GetUserUseCaseImpl().loadUser()
private val loadSummSearchHist = ManageSearchHistUseCaseImpl().loadSummSearchHist()
private val regions = R.array.regions.r2StringArray
override def getView: View = this
def displayView: Option[View]
def onSearchCompleted(query: String, region: String)
override def onAttachedToWindow(): Unit = {
super.onAttachedToWindow()
navIcon.setIconState(MaterialMenuDrawable.IconState.BURGER)
addView(layoutInflater.inflate(R.layout.summ_search_container, this, false))
displayView.foreach(view => this.find[FrameLayout](R.id.content).addView(view))
suggestionsView.setLayoutManager(new LinearLayoutManager(ctx))
suggestionsView.setHasFixedSize(false)
suggestionsView.setAdapter(suggestionAdapter.asInstanceOf[RecyclerView.Adapter[SummSearchHistViewHolder]])
suggestionAdapter.setOnItemClickListener(this)
toolbar.addView(searchViewContainer)
searchView.afterTextChanged(updateSuggestions _)
searchView.onKey((v, actionId, event) =>
if (event.getAction == KeyEvent.ACTION_DOWN && event.getKeyCode == KeyEvent.KEYCODE_ENTER) {
val i = regionSpinner.getSelectedItemPosition
onSearchSubmit(searchView.txt2str, regions(i), isNewSearch = true)
true
} else {
false
}
)
// set the username as default in the search view
loadUser.onSuccess {
case Good(user) => runOnUiThread(searchView.setText(user.inGameName))
case Bad(_) => // leave it as blank
}
val clearSearchBtn = searchViewContainer.find[ImageView](R.id.search_clear)
clearSearchBtn.onClick(searchView.setText(""))
}
override def onCreateOptionsMenu(menuInflater: MenuInflater, menu: Menu): Boolean = {
menuInflater.inflate(R.menu.search, menu)
searchMenuIcon = menu.findItem(R.id.menu_search)
// setup regions spinner
val regionAdapter = ArrayAdapter.createFromResource(ctx, R.array.regions, R.layout.spinner_item)
regionAdapter.setDropDownViewResource(R.layout.spinner_drop_down_item)
regionSpinner = MenuItemCompat.getActionView(menu.findItem(R.id.menu_spinner_regions)).asInstanceOf[Spinner]
regionSpinner.setAdapter(regionAdapter)
// set the default spinner selection to the user region
getAppDataUseCase.loadAppData().onSuccess {
case data => runOnUiThread {
regionSpinner.setSelection(regions.indexOf(data.selectedRegion.getOrElse(BR).id.toUpperCase))
regionSpinner.setVisibility(View.GONE)
}
}
displaySearchView(visible = true)
true
}
override def onNavIconClick(): Boolean = {
if (navIcon.getIconState == MaterialMenuDrawable.IconState.ARROW) {
displaySearchView(visible = false)
true // consume
} else {
super.onNavIconClick()
}
}
override def onOptionsItemSelected(item: MenuItem): Boolean = {
item.getItemId match {
case R.id.menu_search =>
displaySearchView(visible = true)
true
case _ => super.onOptionsItemSelected(item)
}
}
override def onItemClick(adp: AbsViewHolderAdapter[SummSearchHist], v: View, res: SummSearchHist, p: Int): Unit = {
delay(mills = 500) {
onSearchSubmit(res.name, res.regionId, isNewSearch = false)
searchView.setText(res.name)
}
}
private def onSearchSubmit(query: String, regionId: String, isNewSearch: Boolean): Unit = {
info(s"[*] query submitted: $query - $regionId")
inputMethodManager.hideSoftInputFromWindow(getWindowToken, 0) // hide keyboard
checkSummExistUseCase.checkExists(query, regionId).map { isExists =>
if (isExists) {
if (isNewSearch) ManageSearchHistUseCaseImpl().saveSummSearchHist(query, regionId)
onSearchCompleted(query, regionId)
} else {
SnackBar(R.string.err_summ_search.r2String.format(query, regionId)).show()
}
}
}
private def updateSuggestions(query: Editable): Unit = {
searchSuggestions.clear()
loadSummSearchHist.onSuccess {
case result =>
searchSuggestions ++= result.filter(_.name.toLowerCase.contains(query.toString.toLowerCase)) // filter names by query
runOnUiThread(suggestionAdapter.notifyDataSetChanged())
}
}
private def displaySearchView(visible: Boolean): Unit = {
if (visible) {
navIcon.setIconState(MaterialMenuDrawable.IconState.ARROW)
// hide title, spinner, and search icon
toolbar.setTitle(null)
searchMenuIcon.setVisible(false)
regionSpinner.setVisibility(View.GONE)
// show the search view, suggestions, and other related things
searchViewContainer.setVisibility(View.VISIBLE)
suggestionsView.setVisibility(View.VISIBLE)
searchView.requestFocus()
searchView.setSelection(searchView.txt2str.length) // move cursor to the end
inputMethodManager.showSoftInput(searchView, 0) // show keyboard
} else {
navIcon.setIconState(MaterialMenuDrawable.IconState.BURGER)
// show title, spinner, and search icon
toolbar.setTitle(R.string.app_name)
searchMenuIcon.setVisible(true)
regionSpinner.setVisibility(View.VISIBLE)
// hide search container and suggestions
searchViewContainer.setVisibility(View.GONE)
suggestionsView.setVisibility(View.GONE)
inputMethodManager.hideSoftInputFromWindow(getWindowToken, 0) // hide keyboard
}
}
}
|
Thangiee/LoL-Hangouts
|
src/com/thangiee/lolhangouts/ui/search/SummSearchContainer.scala
|
Scala
|
apache-2.0
| 7,216
|
package org.higherstate.jameson.parsers
import org.higherstate.jameson.validators.Validator
import org.higherstate.jameson.tokenizers.Tokenizer
import org.higherstate.jameson.Path
import org.higherstate.jameson.failures._
case class ValidatorParser[+T](parser:Parser[T], validators:List[Validator]) extends Parser[T] {
def parse(tokenizer: Tokenizer, path: Path): Valid[T] =
parser.parse(tokenizer, path).flatMap { value =>
validators
.flatMap(_.apply(value, path))
.headOption
.fold(Success(value))(Failure(_))
}
override def default = parser.default
def schema = parser.schema ++ validators.flatMap(_.schema)
}
|
HigherState/jameson
|
src/main/scala/org/higherstate/jameson/parsers/ValidatorParser.scala
|
Scala
|
apache-2.0
| 660
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.system.kafka_deprecated
import kafka.api.TopicMetadata
import kafka.api.PartitionMetadata
import kafka.cluster.Broker
import kafka.common.TopicAndPartition
import kafka.message.Message
import kafka.message.MessageAndOffset
import org.apache.kafka.common.protocol.Errors
import org.apache.samza.system.IncomingMessageEnvelope
import org.apache.samza.system.SystemStreamPartition
import org.apache.samza.Partition
import org.apache.samza.util.TopicMetadataStore
import org.junit.Test
import org.junit.Assert._
import org.apache.samza.system.SystemAdmin
import org.mockito.Mockito._
import org.mockito.Matchers._
class TestKafkaSystemConsumer {
val systemAdmin: SystemAdmin = mock(classOf[KafkaSystemAdmin])
private val SSP: SystemStreamPartition = new SystemStreamPartition("test", "test", new Partition(0))
private val envelope: IncomingMessageEnvelope = new IncomingMessageEnvelope(SSP, null, null, null)
private val envelopeWithSize: IncomingMessageEnvelope = new IncomingMessageEnvelope(SSP, null, null, null, 100)
private val clientId = "TestClientId"
@Test
def testFetchThresholdShouldDivideEvenlyAmongPartitions {
val metadataStore = new MockMetadataStore
val consumer = new KafkaSystemConsumer("", systemAdmin, new KafkaSystemConsumerMetrics, metadataStore, clientId, fetchThreshold = 50000) {
override def refreshBrokers {
}
}
for (i <- 0 until 50) {
consumer.register(new SystemStreamPartition("test-system", "test-stream", new Partition(i)), "0")
}
consumer.start
assertEquals(1000, consumer.perPartitionFetchThreshold)
}
@Test
def testBrokerCreationShouldTriggerStart {
val systemName = "test-system"
val streamName = "test-stream"
val metrics = new KafkaSystemConsumerMetrics
// Lie and tell the store that the partition metadata is empty. We can't
// use partition metadata because it has Broker in its constructor, which
// is package private to Kafka.
val metadataStore = new MockMetadataStore(Map(streamName -> TopicMetadata(streamName, Seq.empty, Errors.NONE)))
var hosts = List[String]()
var getHostPortCount = 0
val consumer = new KafkaSystemConsumer(systemName, systemAdmin, metrics, metadataStore, clientId) {
override def getLeaderHostPort(partitionMetadata: Option[PartitionMetadata]): Option[(String, Int)] = {
// Generate a unique host every time getHostPort is called.
getHostPortCount += 1
Some("localhost-%s" format getHostPortCount, 0)
}
override def createBrokerProxy(host: String, port: Int): BrokerProxy = {
new BrokerProxy(host, port, systemName, "", metrics, sink) {
override def addTopicPartition(tp: TopicAndPartition, nextOffset: Option[String]) = {
// Skip this since we normally do verification of offsets, which
// tries to connect to Kafka. Rather than mock that, just forget it.
nextOffsets.size
}
override def start {
hosts :+= host
}
}
}
}
consumer.register(new SystemStreamPartition(systemName, streamName, new Partition(0)), "1")
assertEquals(0, hosts.size)
consumer.start
assertEquals(List("localhost-1"), hosts)
// Should trigger a refresh with a new host.
consumer.sink.abdicate(new TopicAndPartition(streamName, 0), 2)
assertEquals(List("localhost-1", "localhost-2"), hosts)
}
@Test
def testConsumerRegisterOlderOffsetOfTheSamzaSSP {
when(systemAdmin.offsetComparator(anyString, anyString)).thenCallRealMethod()
val metadataStore = new MockMetadataStore
val consumer = new KafkaSystemConsumer("", systemAdmin, new KafkaSystemConsumerMetrics, metadataStore, clientId, fetchThreshold = 50000)
val ssp0 = new SystemStreamPartition("test-system", "test-stream", new Partition(0))
val ssp1 = new SystemStreamPartition("test-system", "test-stream", new Partition(1))
val ssp2 = new SystemStreamPartition("test-system", "test-stream", new Partition(2))
consumer.register(ssp0, "0")
consumer.register(ssp0, "5")
consumer.register(ssp1, "2")
consumer.register(ssp1, "3")
consumer.register(ssp2, "0")
assertEquals("0", consumer.topicPartitionsAndOffsets(KafkaSystemConsumer.toTopicAndPartition(ssp0)))
assertEquals("2", consumer.topicPartitionsAndOffsets(KafkaSystemConsumer.toTopicAndPartition(ssp1)))
assertEquals("0", consumer.topicPartitionsAndOffsets(KafkaSystemConsumer.toTopicAndPartition(ssp2)))
}
@Test
def testFetchThresholdBytesShouldDivideEvenlyAmongPartitions {
val metadataStore = new MockMetadataStore
val consumer = new KafkaSystemConsumer("", systemAdmin, new KafkaSystemConsumerMetrics, metadataStore, clientId,
fetchThreshold = 50000, fetchThresholdBytes = 60000L, fetchLimitByBytesEnabled = true) {
override def refreshBrokers {
}
}
for (i <- 0 until 10) {
consumer.register(new SystemStreamPartition("test-system", "test-stream", new Partition(i)), "0")
}
consumer.start
assertEquals(5000, consumer.perPartitionFetchThreshold)
assertEquals(3000, consumer.perPartitionFetchThresholdBytes)
}
@Test
def testFetchThresholdBytes {
val metadataStore = new MockMetadataStore
val consumer = new KafkaSystemConsumer("test-system", systemAdmin, new KafkaSystemConsumerMetrics, metadataStore, clientId,
fetchThreshold = 50000, fetchThresholdBytes = 60000L, fetchLimitByBytesEnabled = true) {
override def refreshBrokers {
}
}
for (i <- 0 until 10) {
consumer.register(new SystemStreamPartition("test-system", "test-stream", new Partition(i)), "0")
}
consumer.start
val msg = Array[Byte](5, 112, 9, 126)
val msgAndOffset: MessageAndOffset = MessageAndOffset(new Message(msg), 887654)
// 4 data + 18 Message overhead + 80 IncomingMessageEnvelope overhead
consumer.sink.addMessage(new TopicAndPartition("test-stream", 0), msgAndOffset, 887354)
assertEquals(106, consumer.getMessagesSizeInQueue(new SystemStreamPartition("test-system", "test-stream", new Partition(0))))
}
@Test
def testFetchThresholdBytesDisabled {
val metadataStore = new MockMetadataStore
val consumer = new KafkaSystemConsumer("", systemAdmin, new KafkaSystemConsumerMetrics, metadataStore, clientId,
fetchThreshold = 50000, fetchThresholdBytes = 60000L) {
override def refreshBrokers {
}
}
for (i <- 0 until 10) {
consumer.register(new SystemStreamPartition("test-system", "test-stream", new Partition(i)), "0")
}
consumer.start
assertEquals(5000, consumer.perPartitionFetchThreshold)
assertEquals(0, consumer.perPartitionFetchThresholdBytes)
assertEquals(0, consumer.getMessagesSizeInQueue(new SystemStreamPartition("test-system", "test-stream", new Partition(0))))
}
}
class MockMetadataStore(var metadata: Map[String, TopicMetadata] = Map()) extends TopicMetadataStore {
def getTopicInfo(topics: Set[String]): Map[String, TopicMetadata] = metadata
}
|
bharathkk/samza
|
samza-kafka/src/test/scala/org/apache/samza/system/kafka_deprecated/TestKafkaSystemConsumer.scala
|
Scala
|
apache-2.0
| 7,891
|
package com.tuvistavie.xserver.backend.model
abstract class Keymap {
val minCode: Int
val maxCode: Int
}
|
tuvistavie/scala-x-server
|
backend/src/main/scala/com/tuvistavie/xserver/model/Keymap.scala
|
Scala
|
mit
| 110
|
/*
* Copyright (c) 2011-2017 Interfaculty Department of Geoinformatics, University of
* Salzburg (Z_GIS) & Institute of Geological and Nuclear Sciences Limited (GNS Science)
* in the SMART Aquifer Characterisation (SAC) programme funded by the New Zealand
* Ministry of Business, Innovation and Employment (MBIE)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services
import javax.inject.Inject
import akka.actor.{Actor, Props}
import models.csw.{CswGetRecordsRequest, CswGetRecordsResponse}
import models.gmd.MdMetadataSet
import org.apache.lucene.document.Document
import org.apache.lucene.index.{IndexWriter, IndexWriterConfig}
import org.apache.lucene.store.{Directory, RAMDirectory}
import play.api.{Configuration, Environment, Mode}
import play.api.http.Status
import play.api.libs.ws.WSClient
import services.LuceneIndexBuilderActor._
import utils.ClassnameLogger
import scala.concurrent._
import scala.concurrent.duration._
import scala.util.{Failure, Success}
object LuceneIndexBuilderActor {
def props: Props = Props[LuceneIndexBuilderActor]
trait Factory {
def apply(): Actor
}
case class IndexCatalogue(catalogueName: String, catalogueUrl: String)
case class IndexResponseDocument(cswGetRecordsResponse: CswGetRecordsResponse, catalogueName: String, isLast: Boolean)
// case class MergeLuceneIndexLocal(directory: Directory, catalogueName: String)
case class QueryCatalogueFirst(catalogueName: String, catalogueUrl: String)
case class QueryCatalogueNext(catalogueName: String, catalogueUrl: String, startDocument: Int, documentsToFetch: Int)
case class ShutdownActor(catalogueName: String)
}
/**
* Builds a Lucene Index from a given CswGetRecordsResponse object
*/
class LuceneIndexBuilderActor @Inject()(configuration: Configuration,
environment: Environment,
wsClient: WSClient,
luceneService: LuceneService)(implicit ec: ExecutionContext) extends Actor with
ClassnameLogger {
logger.debug("Reading configuration: csw.maxDocs ")
private val CSW_MAX_RECORDS = 500
private val maxDocsPerFetch = configuration.getInt("csw.maxDocs").getOrElse(CSW_MAX_RECORDS)
private val indexBaseFolder = configuration.getString("searcher.indexCacheDir").getOrElse(".")
// private val catalogueDirectory = new RAMDirectory()
private var luceneDocs = scala.collection.mutable.Map[String, Document]()
/**
* Defines the initial Actor behaviour. Following behaviours are implemented:
* - [[IndexCatalogue]]
*
* @return
*/
override def receive: Receive = {
case IndexCatalogue(catalogueName, catalogueUrl) => {
logger.info(s"Initialising to index catalogue $catalogueName")
context.become(indexing)
self ! QueryCatalogueFirst(catalogueName, catalogueUrl)
}
//FIXME some kind of default behaviour?
}
def indexing: Receive = {
case QueryCatalogueFirst(catalogueName, catalogueUrl) => {
logger.info(s"Querying $catalogueName on url $catalogueUrl")
queryCatalogue(catalogueName, catalogueUrl, 1, maxDocsPerFetch)
}
case QueryCatalogueNext(catalogueName, catalogueUrl, startDocument, documentsToFetch) => {
val secDelay = if (environment.mode.equals(Mode.Prod)) 5 + scala.util.Random.nextInt(6) else 1
logger.info(s"Querying next round $catalogueName on url $catalogueUrl in ${secDelay}s")
context.system.scheduler.scheduleOnce(secDelay.seconds) {
queryCatalogue(catalogueName, catalogueUrl, startDocument, documentsToFetch)
}
}
case IndexResponseDocument(cswGetRecordsResponse, catalogueName, isLast) => {
val intermediateMapOfDocuments = extractLuceneDocsFromResponse(cswGetRecordsResponse, catalogueName)
intermediateMapOfDocuments.foreach (
tup => luceneDocs += (tup._1 -> tup._2))
// val directory = buildIndexFor(intermediateMapOfDocuments, catalogueName)
// sender() ! MergeLuceneIndexLocal(directory, catalogueName)
// its important to send the shutdown from here, otherwise the last merge will not be executed, because shutdown
// will be received before this merge message
if (isLast) {
logger.info(s"Last response indexed. Sending shutdown for $catalogueName")
sender() ! ShutdownActor(catalogueName)
}
}
// case MergeLuceneIndexLocal(directory, catalogueName) => {
// logger.info(s"Merging index inside actor of $catalogueName")
// val config = new IndexWriterConfig()
// config.setCommitOnClose(true)
//
// val iwriter = new IndexWriter(catalogueDirectory, config)
// try {
// iwriter.addIndexes(directory)
// iwriter.commit()
// logger.info(s"Done merging index for $catalogueName")
// }
// finally {
// iwriter.close()
// }
// }
case ShutdownActor(catalogueName) => {
// if (catalogueDirectory.listAll().length > 0) {
// logger.info(s"Merging index for $catalogueName back to main service")
// luceneService.mergeOverWriteIndex(catalogueDirectory, catalogueName)
// }
if (luceneDocs.size > 0) {
logger.info(s"Merging index (${luceneDocs.size} records) for $catalogueName back to main service")
luceneService.mergeUpdateDocsIndex(luceneDocs.toMap, catalogueName)
}
logger.info(s"Shutting down actor for $catalogueName")
context.stop(self)
}
//FIXME some kind of default behaviour?
}
/**
* Builds a seq of Lucene Documents from a given CswGetRecordsResponse object
*
* @param cswGetRecordsResponse
* @param catalogueName
* @return
*/
private def extractLuceneDocsFromResponse(cswGetRecordsResponse: CswGetRecordsResponse, catalogueName: String): Map[String, Document] = {
logger.info(s"Start building (partial) index for ${catalogueName}...")
val mdMetadataSet = (cswGetRecordsResponse.xml \\ "MD_Metadata").map(mdMetadataNode => {
logger.debug(f"Preparing($catalogueName): ${(mdMetadataNode \ "fileIdentifier" \ "CharacterString").text}")
logger.trace(mdMetadataNode.toString)
MdMetadataSet.fromXml(mdMetadataNode, catalogueName, luceneService.getCatalogueUrl(catalogueName))
}).filter(item => item.isDefined).map(item => item.get) //filter out all None values
mdMetadataSet.map(md => (md.fileIdentifier, md.asLuceneDocument)).toMap
}
/**
* Builds a Lucene Index from a given CswGetRecordsResponse object
*
* @param intermediateSetofDocuments Set[Document] to build an index from
* @param catalogueName String containing the catalogue name
* @return [[Directory]] containing the index
*/
@deprecated
private def buildIndexFor(intermediateSetofDocuments: Map[String, Document], catalogueName: String): Directory = {
val directory = new RAMDirectory()
val config = new IndexWriterConfig()
config.setCommitOnClose(true)
//FIXME SR use SCALA_ARM (automated resource management)?
val iwriter = new IndexWriter(directory, config)
try {
iwriter.deleteAll()
iwriter.commit()
intermediateSetofDocuments.foreach(tup => iwriter.addDocument(tup._2))
iwriter.commit()
logger.info(s"Partial index ready for $catalogueName")
}
finally {
iwriter.close()
}
directory
}
/**
* it all happens here, we could have tried tailrec annotation to work in constant heap?
*
* @param catalogueName
* @param catalogueUrl
* @param startDocument
* @param documentsToFetch
*/
private def queryCatalogue(catalogueName: String, catalogueUrl: String, startDocument: Int,
documentsToFetch: Int): Unit = {
val wsClientResponseFuture =
wsClient.url(catalogueUrl)
// .withRequestTimeout(20.seconds)
.withHeaders("Content-Type" -> "application/xml")
.post(CswGetRecordsRequest(startDocument, documentsToFetch))
wsClientResponseFuture.onComplete {
case Success(wsClientResponse) => {
wsClientResponse.status match {
case Status.OK => {
logger.debug(f"Response Content Type: ${wsClientResponse.allHeaders.getOrElse("Content-Type", "Unknown")}")
logger.debug(f"Response-Length: ${wsClientResponse.body.length}")
logger.trace(f"Response-Body: ${wsClientResponse.body.toString}")
wsClientResponse.xml.label match {
case "ExceptionReport" => {
logger.warn(
f"Got XML Exception Response. Text: ${(wsClientResponse.xml \ "Exception" \ "ExceptionText").text}")
}
case "GetRecordsResponse" => {
val cswGetRecResp = CswGetRecordsResponse(wsClientResponse.xml)
logger.info(
f"c: $catalogueName nextRecord: ${cswGetRecResp.nextRecord}, " +
f"numberOfRec ${cswGetRecResp.numberOfRecordsMatched}, " +
f"recordsReturned ${cswGetRecResp.numberOfRecordsReturned}")
if ((cswGetRecResp.nextRecord > cswGetRecResp.numberOfRecordsMatched) ||
(cswGetRecResp.nextRecord == 0) ||
(cswGetRecResp.numberOfRecordsReturned == 0)) {
logger.info(s"Sending IndexResponseDocument - is last! ($catalogueName)")
self ! IndexResponseDocument(cswGetRecResp, catalogueName, true)
}
else {
logger.info(s"Sending IndexResponseDocument - and start another query round. ($catalogueName)")
self ! IndexResponseDocument(cswGetRecResp, catalogueName, false)
self ! QueryCatalogueNext(catalogueName, catalogueUrl, cswGetRecResp.nextRecord, documentsToFetch)
}
}
case _ => {
logger.warn(f"Unknown response content. Body: ${wsClientResponse.xml.toString} ($catalogueName)")
self ! ShutdownActor(catalogueName)
}
}
}
case _ => {
self ! ShutdownActor(catalogueName)
}
}
}
case Failure(ex) => {
logger.warn(s"Exception while querying CSW $catalogueName (${ex.getClass.getCanonicalName}): ${ex.getMessage}", ex)
self ! ShutdownActor(catalogueName)
}
}
}
}
|
ZGIS/smart-csw-ingester
|
app/services/LuceneIndexBuilderActor.scala
|
Scala
|
apache-2.0
| 10,917
|
package cromwell.services
import java.io.{ByteArrayOutputStream, PrintStream}
import java.sql.Connection
import java.time.OffsetDateTime
import javax.sql.rowset.serial.{SerialBlob, SerialClob, SerialException}
import better.files._
import com.typesafe.config.ConfigFactory
import cromwell.core.Tags._
import cromwell.core.WorkflowId
import cromwell.database.migration.liquibase.LiquibaseUtils
import cromwell.database.slick.{EngineSlickDatabase, MetadataSlickDatabase, SlickDatabase}
import cromwell.database.sql.SqlConverters._
import cromwell.database.sql.joins.JobStoreJoin
import cromwell.database.sql.tables.WorkflowStoreEntry.WorkflowStoreState
import cromwell.database.sql.tables.{JobStoreEntry, JobStoreSimpletonEntry, WorkflowStoreEntry}
import liquibase.diff.DiffResult
import liquibase.diff.output.DiffOutputControl
import liquibase.diff.output.changelog.DiffToChangeLog
import org.hsqldb.persist.HsqlDatabaseProperties
import org.scalactic.StringNormalizations
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{FlatSpec, Matchers}
import slick.jdbc.JdbcProfile
import slick.jdbc.meta._
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.Try
import scala.xml._
class ServicesStoreSpec extends FlatSpec with Matchers with ScalaFutures with StringNormalizations {
import ServicesStoreSpec._
implicit val ec = ExecutionContext.global
implicit val defaultPatience = PatienceConfig(timeout = Span(5, Seconds), interval = Span(100, Millis))
behavior of "ServicesStore"
it should "not deadlock" in {
// Test based on https://github.com/kwark/slick-deadlock/blob/82525fc/src/main/scala/SlickDeadlock.scala
val databaseConfig = ConfigFactory.parseString(
s"""|db.url = "jdbc:hsqldb:mem:$${uniqueSchema};shutdown=false;hsqldb.tx=mvcc"
|db.driver = "org.hsqldb.jdbcDriver"
|db.connectionTimeout = 3000
|db.numThreads = 2
|profile = "slick.jdbc.HsqldbProfile$$"
|""".stripMargin)
import ServicesStore.EnhancedSqlDatabase
for {
database <- new EngineSlickDatabase(databaseConfig)
.initialized(EngineServicesStore.EngineLiquibaseSettings).autoClosed
} {
val futures = 1 to 20 map { _ =>
val workflowUuid = WorkflowId.randomId().toString
val callFqn = "call.fqn"
val jobIndex = 1
val jobAttempt = 1
val jobSuccessful = false
val jobStoreEntry = JobStoreEntry(workflowUuid, callFqn, jobIndex, jobAttempt, jobSuccessful, None, None, None)
val jobStoreJoins = Seq(JobStoreJoin(jobStoreEntry, Seq()))
// NOTE: This test just needs to repeatedly read/write from a table that acts as a PK for a FK.
for {
_ <- database.addJobStores(jobStoreJoins, 10)
queried <- database.queryJobStores(workflowUuid, callFqn, jobIndex, jobAttempt)
_ = queried.get.jobStoreEntry.workflowExecutionUuid should be(workflowUuid)
} yield ()
}
Future.sequence(futures).futureValue(Timeout(10.seconds))
}
}
"Singleton Slick" should behave like testSchemaManager("singleton", "slick")
"Singleton Liquibase" should behave like testSchemaManager("singleton", "liquibase")
"Metadata Slick" should behave like testSchemaManager("metadata", "slick")
"Metadata Liquibase" should behave like testSchemaManager("metadata", "liquibase")
def testSchemaManager(databaseType: String, schemaManager: String): Unit = {
val otherSchemaManager = if (schemaManager == "slick") "liquibase" else "slick"
it should s"have the same schema as $otherSchemaManager" in {
for {
actualDatabase <- databaseForSchemaManager(databaseType, schemaManager).autoClosed
expectedDatabase <- databaseForSchemaManager(databaseType, otherSchemaManager).autoClosed
} {
compare(
actualDatabase.dataAccess.driver, actualDatabase.database,
expectedDatabase.dataAccess.driver, expectedDatabase.database) { diffResult =>
import cromwell.database.migration.liquibase.DiffResultFilter._
/*
NOTE: Unique indexes no longer need to be filtered, as WE SHOULD NOT BE USING THEM!
See notes at the bottom of changelog.xml
*/
val diffFilters = StandardTypeFilters
val filteredDiffResult = diffResult
.filterChangeLogs
.filterLiquibaseObjects
.filterChangedObjects(diffFilters)
val totalChanged =
filteredDiffResult.getChangedObjects.size +
filteredDiffResult.getMissingObjects.size +
filteredDiffResult.getUnexpectedObjects.size
if (totalChanged > 0) {
val outputStream = new ByteArrayOutputStream
val printStream = new PrintStream(outputStream, true)
val diffOutputControl = new DiffOutputControl(false, false, false, Array.empty)
val diffToChangeLog = new DiffToChangeLog(filteredDiffResult, diffOutputControl)
diffToChangeLog.print(printStream)
val changeSetsScoped = XML.loadString(outputStream.toString) \\ "changeSet" \\ "_"
val changeSets = changeSetsScoped map stripNodeScope
fail(changeSets.mkString(
s"The following changes are in $schemaManager but not in $otherSchemaManager:\\n ",
"\\n ",
"\\nEnsure that the columns/fields exist, with the same lengths in " +
s"$schemaManager and $otherSchemaManager and synchronize the two."))
}
}
}
}
it should "match expected generated names" in {
var schemaMetadata: SchemaMetadata = null
for {
slickDatabase <- databaseForSchemaManager(databaseType, schemaManager).autoClosed
} {
import slickDatabase.dataAccess.driver.api._
val schemaMetadataFuture =
for {
tables <- slickDatabase.database.run(MTable.getTables(Option("PUBLIC"), Option("PUBLIC"), None, None))
workingTables = tables
.filterNot(_.name.name.contains("DATABASECHANGELOG"))
// NOTE: MetadataEntry column names are perma-busted due to the large size of the table.
.filterNot(_.name.name == "METADATA_ENTRY")
columns <- slickDatabase.database.run(DBIO.sequence(workingTables.map(_.getColumns)))
indexes <- slickDatabase.database.run(DBIO.sequence(workingTables.map(_.getIndexInfo())))
primaryKeys <- slickDatabase.database.run(DBIO.sequence(workingTables.map(_.getPrimaryKeys)))
foreignKeys <- slickDatabase.database.run(DBIO.sequence(workingTables.map(_.getExportedKeys)))
} yield SchemaMetadata(tables, columns.flatten, indexes.flatten.filterNot(isGenerated),
primaryKeys.flatten.filterNot(isGenerated), foreignKeys.flatten)
schemaMetadata = schemaMetadataFuture.futureValue
}
var misnamed = Seq.empty[String]
schemaMetadata.primaryKeyMetadata foreach { primaryKey =>
val actual = primaryKey.pkName.get
val expected = s"PK_${primaryKey.table.name}"
if (actual != expected) {
misnamed :+=
s"""| PrimaryKey: $actual
| Should be: $expected
|""".stripMargin
}
}
schemaMetadata.foreignKeyMetadata foreach { foreignKey =>
val actual = foreignKey.fkName.get
val expected = s"FK_${foreignKey.fkTable.name}_${foreignKey.fkColumn}"
if (actual != expected) {
misnamed :+=
s"""| ForeignKey: $actual
| Should be: $expected
|""".stripMargin
}
}
schemaMetadata.indexMetadata.groupBy(getIndexName) foreach {
case (indexName, indexColumns) =>
val index = indexColumns.head
val prefix = if (index.nonUnique) "IX" else "UC"
val tableName = index.table.name
val sortedColumns = indexColumns.sortBy(_.ordinalPosition)
val abbrColumns = sortedColumns.map(indexColumn => snakeAbbreviate(indexColumn.column.get))
val actual = indexName
val expected = abbrColumns.mkString(s"${prefix}_${tableName}_", "_", "")
if (actual != expected) {
misnamed :+=
s"""| Index: $actual
| Should be: $expected
|""".stripMargin
}
}
var missing = Seq.empty[String]
schemaMetadata.columns foreach { column =>
if (!schemaMetadata.existsTableItem(column)) {
missing :+= s" ${tableClassName(column.tableName)}.${column.itemName}"
}
}
schemaMetadata.slickItems foreach { databaseItem =>
if (!schemaMetadata.existsSlickMapping(databaseItem)) {
missing :+= s" ${slickClassName(databaseItem.tableName)}.${databaseItem.itemName}"
}
}
if (missing.nonEmpty || misnamed.nonEmpty) {
var failMessage = ""
if (misnamed.nonEmpty) {
failMessage += misnamed.mkString(s"The following items are misnamed in $schemaManager:\\n", "\\n", "\\n")
}
if (missing.nonEmpty) {
failMessage += missing.mkString(
s"Based on the schema in $schemaManager, please ensure that the following tables/columns exist:\\n",
"\\n", "\\n")
}
fail(failMessage)
}
}
}
"SlickDatabase (hsqldb)" should behave like testWith("database")
"SlickDatabase (mysql)" should behave like testWith("database-test-mysql")
def testWith(configPath: String): Unit = {
import ServicesStore.EnhancedSqlDatabase
lazy val databaseConfig = ConfigFactory.load.getConfig(configPath)
lazy val dataAccess = new EngineSlickDatabase(databaseConfig)
.initialized(EngineServicesStore.EngineLiquibaseSettings)
lazy val getProduct = {
import dataAccess.dataAccess.driver.api._
SimpleDBIO[String](_.connection.getMetaData.getDatabaseProductName)
}
it should "(if hsqldb) have transaction isolation mvcc" taggedAs DbmsTest in {
import dataAccess.dataAccess.driver.api._
//noinspection SqlDialectInspection
val getHsqldbTx = sql"""SELECT PROPERTY_VALUE
FROM INFORMATION_SCHEMA.SYSTEM_PROPERTIES
WHERE PROPERTY_NAME = 'hsqldb.tx'""".as[String].head
(for {
product <- dataAccess.database.run(getProduct)
_ <- product match {
case HsqlDatabaseProperties.PRODUCT_NAME =>
dataAccess.database.run(getHsqldbTx) map { hsqldbTx =>
(hsqldbTx shouldEqual "mvcc") (after being lowerCased)
}
case _ => Future.successful(())
}
} yield ()).futureValue
}
it should "fail to store and retrieve empty clobs" taggedAs DbmsTest in {
// See notes in StringToClobOption
val emptyClob = new SerialClob(Array.empty[Char])
val workflowUuid = WorkflowId.randomId().toString
val callFqn = "call.fqn"
val jobIndex = 1
val jobAttempt = 1
val jobSuccessful = false
val jobStoreEntry = JobStoreEntry(workflowUuid, callFqn, jobIndex, jobAttempt, jobSuccessful, None, None, None)
val jobStoreSimpletonEntries = Seq(JobStoreSimpletonEntry("empty", Option(emptyClob), "WdlString"))
val jobStoreJoins = Seq(JobStoreJoin(jobStoreEntry, jobStoreSimpletonEntries))
val future = for {
product <- dataAccess.database.run(getProduct)
_ <- product match {
case "HSQL Database Engine" =>
// HSQLDB doesn't crash because it calls getCharacterStream instead of getSubString.
dataAccess.addJobStores(jobStoreJoins, 1)
case "MySQL" =>
dataAccess.addJobStores(jobStoreJoins, 1).failed map { exception =>
exception should be(a[SerialException])
exception.getMessage should be("Invalid position in SerialClob object set")
}
}
} yield ()
future.futureValue
}
it should "fail to store and retrieve empty blobs" taggedAs DbmsTest in {
// See notes in BytesToBlobOption
import eu.timepit.refined.auto._
import eu.timepit.refined.collection._
val clob = "".toClob(default = "{}")
val clobOption = "{}".toClobOption
val emptyBlob = new SerialBlob(Array.empty[Byte])
val workflowUuid = WorkflowId.randomId().toString
val workflowStoreEntry = WorkflowStoreEntry(
workflowExecutionUuid = workflowUuid,
workflowType = WdlWorkflowType,
workflowTypeVersion = None,
workflowDefinition = clobOption,
workflowInputs = clobOption,
workflowOptions = clobOption,
workflowState = WorkflowStoreState.Submitted,
restarted = false,
submissionTime = OffsetDateTime.now.toSystemTimestamp,
importsZip = Option(emptyBlob),
customLabels = clob)
val workflowStoreEntries = Seq(workflowStoreEntry)
val future = for {
product <- dataAccess.database.run(getProduct)
_ <- product match {
case "HSQL Database Engine" =>
// HSQLDB doesn't crash because it calls getBinaryStream instead of getBytes.
dataAccess.addWorkflowStoreEntries(workflowStoreEntries)
case "MySQL" =>
dataAccess.addWorkflowStoreEntries(workflowStoreEntries).failed map { exception =>
exception should be(a[SerialException])
exception.getMessage should
be("Invalid arguments: position cannot be less than 1 or greater than the length of the SerialBlob")
}
}
} yield ()
future.futureValue
}
it should "store and retrieve empty clobs" taggedAs DbmsTest in {
// See notes in StringToClobOption
val workflowUuid = WorkflowId.randomId().toString
val callFqn = "call.fqn"
val jobIndex = 1
val jobAttempt = 1
val jobSuccessful = false
val jobStoreEntry = JobStoreEntry(workflowUuid, callFqn, jobIndex, jobAttempt, jobSuccessful, None, None, None)
val jobStoreSimpletonEntries = Seq(
JobStoreSimpletonEntry("empty", "".toClobOption, "WdlString"),
JobStoreSimpletonEntry("aEntry", "a".toClobOption, "WdlString")
)
val jobStoreJoins = Seq(JobStoreJoin(jobStoreEntry, jobStoreSimpletonEntries))
val future = for {
_ <- dataAccess.addJobStores(jobStoreJoins, 1)
queried <- dataAccess.queryJobStores(workflowUuid, callFqn, jobIndex, jobAttempt)
_ = {
val jobStoreJoin = queried.get
jobStoreJoin.jobStoreEntry.workflowExecutionUuid should be(workflowUuid)
val emptyEntry = jobStoreJoin.jobStoreSimpletonEntries.find(_.simpletonKey == "empty").get
emptyEntry.simpletonValue.toRawString should be("")
val aEntry = jobStoreJoin.jobStoreSimpletonEntries.find(_.simpletonKey == "aEntry").get
aEntry.simpletonValue.toRawString should be("a")
}
_ <- dataAccess.removeJobStores(Seq(workflowUuid))
} yield ()
future.futureValue
}
it should "store and retrieve empty blobs" taggedAs DbmsTest in {
// See notes in BytesToBlobOption
import eu.timepit.refined.auto._
import eu.timepit.refined.collection._
val testWorkflowState = WorkflowStoreState.Submitted
val clob = "".toClob(default = "{}")
val clobOption = "{}".toClobOption
val emptyWorkflowUuid = WorkflowId.randomId().toString
val emptyWorkflowStoreEntry = WorkflowStoreEntry(
workflowExecutionUuid = emptyWorkflowUuid,
workflowType = WdlWorkflowType,
workflowTypeVersion = None,
workflowDefinition = clobOption,
workflowInputs = clobOption,
workflowOptions = clobOption,
workflowState = testWorkflowState,
restarted = false,
submissionTime = OffsetDateTime.now.toSystemTimestamp,
importsZip = Option(Array.empty[Byte]).toBlobOption,
customLabels = clob)
val noneWorkflowUuid = WorkflowId.randomId().toString
val noneWorkflowStoreEntry = WorkflowStoreEntry(
workflowExecutionUuid = noneWorkflowUuid,
workflowType = WdlWorkflowType,
workflowTypeVersion = None,
workflowDefinition = clobOption,
workflowInputs = clobOption,
workflowOptions = clobOption,
workflowState = testWorkflowState,
restarted = false,
submissionTime = OffsetDateTime.now.toSystemTimestamp,
importsZip = None,
customLabels = clob)
val aByte = 'a'.toByte
val aByteWorkflowUuid = WorkflowId.randomId().toString
val aByteWorkflowStoreEntry = WorkflowStoreEntry(
workflowExecutionUuid = aByteWorkflowUuid,
workflowType = WdlWorkflowType,
workflowTypeVersion = None,
workflowDefinition = clobOption,
workflowInputs = clobOption,
workflowOptions = clobOption,
workflowState = testWorkflowState,
restarted = false,
submissionTime = OffsetDateTime.now.toSystemTimestamp,
importsZip = Option(Array(aByte)).toBlobOption,
customLabels = clob)
val workflowStoreEntries = Seq(emptyWorkflowStoreEntry, noneWorkflowStoreEntry, aByteWorkflowStoreEntry)
val future = for {
_ <- dataAccess.addWorkflowStoreEntries(workflowStoreEntries)
queried <- dataAccess.fetchStartableWorkflows(Int.MaxValue)
_ = {
val emptyEntry = queried.find(_.workflowExecutionUuid == emptyWorkflowUuid).get
emptyEntry.importsZip.toBytesOption should be(None)
val noneEntry = queried.find(_.workflowExecutionUuid == noneWorkflowUuid).get
noneEntry.importsZip.toBytesOption should be(None)
val aByteEntry = queried.find(_.workflowExecutionUuid == aByteWorkflowUuid).get
aByteEntry.importsZip.toBytesOption.get.toSeq should be(Seq(aByte))
}
_ <- dataAccess.removeWorkflowStoreEntry(emptyWorkflowUuid)
_ <- dataAccess.removeWorkflowStoreEntry(noneWorkflowUuid)
_ <- dataAccess.removeWorkflowStoreEntry(aByteWorkflowUuid)
} yield ()
future.futureValue
}
it should "close the database" taggedAs DbmsTest in {
dataAccess.close()
}
}
}
object ServicesStoreSpec {
// strip the namespace from elems and their children
private def stripNodeScope(node: Node): Node = {
node match {
case elem: Elem => elem.copy(scope = TopScope, child = elem.child map stripNodeScope)
case other => other
}
}
private def databaseForSchemaManager(databaseType: String, schemaManager: String): SlickDatabase = {
val databaseConfig = ConfigFactory.parseString(
s"""
|db.url = "jdbc:hsqldb:mem:$${uniqueSchema};shutdown=false;hsqldb.tx=mvcc"
|db.driver = "org.hsqldb.jdbcDriver"
|db.connectionTimeout = 3000
|profile = "slick.jdbc.HsqldbProfile$$"
|liquibase.updateSchema = false
|""".stripMargin)
val (database, settings) = databaseType match {
case "singleton" =>
(new EngineSlickDatabase(databaseConfig), EngineServicesStore.EngineLiquibaseSettings)
case "metadata" => (new MetadataSlickDatabase(databaseConfig), MetadataServicesStore.MetadataLiquibaseSettings)
}
schemaManager match {
case "liquibase" =>
database withConnection LiquibaseUtils.updateSchema(settings)
case "slick" =>
SlickDatabase.createSchema(database)
}
database
}
private def compare[ReferenceProfile <: JdbcProfile, ComparisonProfile <: JdbcProfile, T]
(referenceProfile: ReferenceProfile,
referenceDatabase: ReferenceProfile#Backend#Database,
comparisonProfile: ComparisonProfile,
comparisonDatabase: ComparisonProfile#Backend#Database)(block: DiffResult => T): T = {
withConnections(referenceProfile, referenceDatabase, comparisonProfile, comparisonDatabase) {
LiquibaseUtils.compare(_, _)(block)
}
}
/**
* Lends a connection to a block of code.
*
* @param profile The slick jdbc profile for accessing the database.
* @param database The database to use for the connection.
* @param block The block of code to run over the connection.
* @tparam Profile The slick jdbc profile for accessing the database.
* @tparam T The return type of the block.
* @return The return value of the block.
*/
private def withConnection[Profile <: JdbcProfile, T](profile: Profile, database: Profile#Backend#Database)
(block: Connection => T): T = {
/*
TODO: Should this withConnection() method have a (implicit?) timeout parameter, that it passes on to Await.result?
If we run completely asynchronously, nest calls to withConnection, and then call flatMap, the outer connection may
already be closed before an inner block finishes running.
*/
Await.result(database.run(profile.api.SimpleDBIO(context => block(context.connection))), Duration.Inf)
}
/**
* Lends two connections to a block of code.
*
* @param profile1 The slick jdbc profile for accessing the first database.
* @param database1 The database to use for the first connection.
* @param profile2 The slick jdbc profile for accessing the second database.
* @param database2 The database to use for the second connection.
* @param block The block of code to run over the first and second connections.
* @tparam Profile1 The slick jdbc profile for accessing the first database.
* @tparam Profile2 The slick jdbc profile for accessing the second database.
* @tparam T The return type of the block.
* @return The return value of the block.
*/
private def withConnections[Profile1 <: JdbcProfile, Profile2 <: JdbcProfile, T]
(profile1: Profile1, database1: Profile1#Backend#Database, profile2: Profile2, database2: Profile2#Backend#Database)
(block: (Connection, Connection) => T): T = {
withConnection(profile1, database1) { connection1 =>
withConnection(profile2, database2) { connection2 =>
block(connection1, connection2)
}
}
}
private val SnakeRegex = "_([a-z])".r
private def snakeToCamel(value: String): String = {
SnakeRegex.replaceAllIn(value.toLowerCase, _.group(1).toUpperCase)
}
private def snakeAbbreviate(value: String): String = {
SnakeRegex.findAllMatchIn("_" + value.toLowerCase).map(_.group(1)).mkString("").toUpperCase
}
private val SlickPrimaryKeyRegex = """SYS_PK_\\d+""".r
private def isGenerated(primaryKey: MPrimaryKey): Boolean = {
primaryKey.pkName.get match {
case SlickPrimaryKeyRegex(_*) => true
case _ => false
}
}
private val LiquibasePrimaryKeyIndexRegex = """SYS_IDX_PK_[A-Z_]+_\\d+""".r
private val SlickPrimaryKeyIndexRegex = """SYS_IDX_SYS_PK_\\d+_\\d+""".r
private val SlickForeignKeyIndexRegex = """SYS_IDX_\\d+""".r
private def isGenerated(index: MIndexInfo): Boolean = {
index.indexName.get match {
case LiquibasePrimaryKeyIndexRegex(_*) => true
case SlickPrimaryKeyIndexRegex(_*) => true
case SlickForeignKeyIndexRegex(_*) => true
case _ => false
}
}
private def tableClassName(tableName: String) = s"cromwell.database.sql.tables.$tableName"
private def slickClassName(tableName: String) =
s"cromwell.database.slick.tables.${tableName}Component$$${tableName.replace("Entry", "Entries")}"
private def getIndexName(index: MIndexInfo) = index.indexName.get.replaceAll("(^SYS_IDX_|_\\\\d+$)", "")
case class TableClass(tableName: String) {
private def getClass(name: String): Try[Class[_]] = Try(Class.forName(name))
private lazy val tableColumns = getClass(tableClassName(tableName)).map(_.getDeclaredFields).getOrElse(Array.empty)
private lazy val slickMapping = getClass(slickClassName(tableName)).map(_.getDeclaredMethods).getOrElse(Array.empty)
def existsTableField(name: String): Boolean = tableColumns.exists(_.getName == name)
def existsSlickMapping(name: String): Boolean = slickMapping.exists(_.getName == name)
}
case class DatabaseItem(tableName: String, itemName: String)
case class SchemaMetadata(tableMetadata: Seq[MTable], columnMetadata: Seq[MColumn], indexMetadata: Seq[MIndexInfo],
primaryKeyMetadata: Seq[MPrimaryKey], foreignKeyMetadata: Seq[MForeignKey]) {
lazy val tables: Seq[TableClass] = tableMetadata.map({ table =>
val tableName = snakeToCamel(table.name.name).capitalize
TableClass(tableName)
}).distinct
lazy val columns: Seq[DatabaseItem] = columnMetadata.map({ column =>
val tableName = snakeToCamel(column.table.name).capitalize
val columnName = snakeToCamel(column.name)
DatabaseItem(tableName, columnName)
}).distinct
lazy val indexes: Seq[DatabaseItem] = indexMetadata.map({ index =>
val tableName = snakeToCamel(index.table.name).capitalize
val indexName = snakeToCamel(getIndexName(index))
DatabaseItem(tableName, indexName)
}).distinct
lazy val foreignKeys: Seq[DatabaseItem] = foreignKeyMetadata.map({ foreignKey =>
val tableName = snakeToCamel(foreignKey.fkTable.name).capitalize
val indexName = snakeToCamel(foreignKey.fkName.get)
DatabaseItem(tableName, indexName)
}).distinct
lazy val slickItems: Seq[DatabaseItem] = columns ++ indexes ++ foreignKeys
def existsTableItem(tableItem: DatabaseItem): Boolean = {
tables.find(_.tableName == tableItem.tableName).exists(_.existsTableField(tableItem.itemName))
}
def existsSlickMapping(tableItem: DatabaseItem): Boolean = {
tables.find(_.tableName == tableItem.tableName).exists(_.existsSlickMapping(tableItem.itemName))
}
}
private val WdlWorkflowType = Option("WDL")
}
|
ohsu-comp-bio/cromwell
|
services/src/test/scala/cromwell/services/ServicesStoreSpec.scala
|
Scala
|
bsd-3-clause
| 26,080
|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.tools.status
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.fs.FileSystemDataStore
import org.locationtech.geomesa.fs.tools.FsDataStoreCommand
import org.locationtech.geomesa.fs.tools.FsDataStoreCommand.FsParams
import org.locationtech.geomesa.fs.tools.status.FsGetTypeNamesCommand.FsGetTypeNamesParams
import org.locationtech.geomesa.tools.status.GetTypeNamesCommand
class FsGetTypeNamesCommand extends GetTypeNamesCommand[FileSystemDataStore] with FsDataStoreCommand {
override val params = new FsGetTypeNamesParams()
}
object FsGetTypeNamesCommand {
@Parameters(commandDescription = "List GeoMesa feature type for a given Fs resource")
class FsGetTypeNamesParams extends FsParams
}
|
jahhulbert-ccri/geomesa
|
geomesa-fs/geomesa-fs-tools/src/main/scala/org/locationtech/geomesa/fs/tools/status/FsGetTypeNamesCommand.scala
|
Scala
|
apache-2.0
| 1,219
|
package com.taig.tmpltr.engine.html
import com.taig.tmpltr._
import play.api.mvc.Content
class p( val attributes: Attributes, val content: Content )
extends markup.p
with Tag.Body[p, Content]
object p
extends Tag.Body.Appliable[p, Content]
|
Taig/Play-Tmpltr
|
app/com/taig/tmpltr/engine/html/p.scala
|
Scala
|
mit
| 243
|
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.nscplugin.test
import org.scalajs.nscplugin.test.util._
import org.scalajs.nscplugin.test.util.VersionDependentUtils.methodSig
import org.junit.Assume._
import org.junit.Test
// scalastyle:off line.size.limit
class JSExportTest extends DirectTest with TestHelpers {
override def extraArgs: List[String] =
super.extraArgs ::: List("-deprecation")
override def preamble: String =
"""import scala.scalajs.js, js.annotation._
"""
@Test
def warnOnDuplicateExport(): Unit = {
"""
class A {
@JSExport
@JSExport
def a = 1
}
""" hasWarns
"""
|newSource1.scala:6: warning: Found duplicate @JSExport
| def a = 1
| ^
"""
"""
class A {
@JSExport
@JSExport("a")
def a = 1
}
""" hasWarns
"""
|newSource1.scala:6: warning: Found duplicate @JSExport
| def a = 1
| ^
"""
"""
class A {
@JSExport("a")
@JSExport("a")
def a = 1
}
""" hasWarns
"""
|newSource1.scala:6: warning: Found duplicate @JSExport
| def a = 1
| ^
"""
// special case for @JSExportAll and 2 or more @JSExport("apply")
// since @JSExportAll and single @JSExport("apply") should not be warned (see other tests)
"""
@JSExportAll
class A {
@JSExport("apply")
@JSExport("apply")
def apply(): Int = 1
}
""" hasWarns
"""
|newSource1.scala:7: warning: Found duplicate @JSExport
| def apply(): Int = 1
| ^
"""
"""
@JSExportAll
class A {
@JSExport
def a = 1
}
""" hasWarns
"""
|newSource1.scala:6: warning: Found duplicate @JSExport
| def a = 1
| ^
"""
}
@Test
def noWarnOnUniqueExplicitName(): Unit = {
"""
class A {
@JSExport("a")
@JSExport("b")
def c = 1
}
""".hasNoWarns()
}
@Test
def noJSExportClass(): Unit = {
"""
@JSExport
class A
@JSExport("Foo")
class B
""" hasErrors
"""
|newSource1.scala:3: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
|newSource1.scala:6: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport("Foo")
| ^
"""
}
@Test
def noJSExportObject(): Unit = {
"""
@JSExport
object A
@JSExport("Foo")
object B
""" hasErrors
"""
|newSource1.scala:3: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
|newSource1.scala:6: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport("Foo")
| ^
"""
}
@Test
def noDoubleUnderscoreExport(): Unit = {
"""
class A {
@JSExport(name = "__")
def foo = 1
@JSExport
def bar__(x: Int) = x
}
""" hasErrors
"""
|newSource1.scala:4: error: An exported name may not contain a double underscore (`__`)
| @JSExport(name = "__")
| ^
|newSource1.scala:8: error: An exported name may not contain a double underscore (`__`)
| def bar__(x: Int) = x
| ^
"""
}
@Test
def doubleUnderscoreOKInTopLevelExport(): Unit = {
"""
@JSExportTopLevel("__A")
class A
@JSExportTopLevel("__B")
object B
object Container {
@JSExportTopLevel("__c")
def c(): Int = 4
@JSExportTopLevel("__d")
val d: Boolean = true
}
""".hasNoWarns()
}
@Test
def noConflictingExport(): Unit = {
"""
class Confl {
@JSExport("value")
def hello = "foo"
@JSExport("value")
def world = "bar"
}
""" hasErrors
"""
|newSource1.scala:7: error: double definition:
|def $js$exported$prop$value: Any at line 4 and
|def $js$exported$prop$value: Any at line 7
|have same type
| @JSExport("value")
| ^
"""
"""
class Confl {
class Box[T](val x: T)
@JSExport
def ub(x: Box[String]): String = x.x
@JSExport
def ub(x: Box[Int]): Int = x.x
}
""" hasErrors
s"""
|newSource1.scala:8: error: double definition:
|def ${"$js$exported$meth$ub"}(x: Confl.this.Box[String]): Any at line 6 and
|def ${"$js$exported$meth$ub"}(x: Confl.this.Box[Int]): Any at line 8
|have same type after erasure: ${methodSig("(x: Confl#Box)", "Object")}
| @JSExport
| ^
"""
"""
class Confl {
@JSExport
def rtType(x: js.Any) = x
@JSExport
def rtType(x: js.Dynamic) = x
}
""" hasErrors
s"""
|newSource1.scala:7: error: Cannot disambiguate overloads for exported method rtType with types
| ${methodSig("(x: scala.scalajs.js.Any)", "Object")}
| ${methodSig("(x: scala.scalajs.js.Dynamic)", "Object")}
| @JSExport
| ^
"""
"""
class Confl {
@JSExport
def foo(x: Int)(ys: Int*) = x
@JSExport
def foo(x: Int*) = x
}
""" hasErrors
s"""
|newSource1.scala:7: error: Cannot disambiguate overloads for exported method foo with types
| ${methodSig("(x: Int, ys: Seq)", "Object")}
| ${methodSig("(x: Seq)", "Object")}
| @JSExport
| ^
"""
"""
class Confl {
@JSExport
def foo(x: Int = 1) = x
@JSExport
def foo(x: String*) = x
}
""" hasErrors
s"""
|newSource1.scala:6: error: Cannot disambiguate overloads for exported method foo with types
| ${methodSig("(x: Int)", "Object")}
| ${methodSig("(x: Seq)", "Object")}
| @JSExport
| ^
"""
"""
class Confl {
@JSExport
def foo(x: Double, y: String)(z: Int = 1) = x
@JSExport
def foo(x: Double, y: String)(z: String*) = x
}
""" hasErrors
s"""
|newSource1.scala:6: error: Cannot disambiguate overloads for exported method foo with types
| ${methodSig("(x: Double, y: String, z: Int)", "Object")}
| ${methodSig("(x: Double, y: String, z: Seq)", "Object")}
| @JSExport
| ^
"""
"""
class A {
@JSExport
def a(x: scala.scalajs.js.Any) = 1
@JSExport
def a(x: Any) = 2
}
""" hasErrors
s"""
|newSource1.scala:7: error: Cannot disambiguate overloads for exported method a with types
| ${methodSig("(x: Object)", "Object")}
| ${methodSig("(x: scala.scalajs.js.Any)", "Object")}
| @JSExport
| ^
"""
}
@Test
def noExportLocal(): Unit = {
// Local class
"""
class A {
def method = {
@JSExport
class A
@JSExport
class B extends js.Object
}
}
""" hasErrors
"""
|newSource1.scala:5: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
|newSource1.scala:8: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
"""
// Local object
"""
class A {
def method = {
@JSExport
object A
@JSExport
object B extends js.Object
}
}
""" hasErrors
"""
|newSource1.scala:5: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
|newSource1.scala:8: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
"""
// Local method
"""
class A {
def method = {
@JSExport
def foo = 1
}
}
""" hasErrors
"""
|newSource1.scala:5: error: You may not export a local definition
| @JSExport
| ^
"""
// Local val
"""
class A {
def method = {
@JSExport
val x = 1
}
}
""" hasErrors
"""
|newSource1.scala:5: error: You may not export a local definition
| @JSExport
| ^
"""
// Local var
"""
class A {
def method = {
@JSExport
var x = 1
}
}
""" hasErrors
"""
|newSource1.scala:5: error: You may not export a local definition
| @JSExport
| ^
"""
}
@Test
def noMiddleVarArg(): Unit = {
"""
class A {
@JSExport
def method(xs: Int*)(ys: String) = 1
}
""" hasErrors
"""
|newSource1.scala:4: error: In an exported method, a *-parameter must come last (through all parameter lists)
| @JSExport
| ^
"""
}
@Test
def noMiddleDefaultParam(): Unit = {
"""
class A {
@JSExport
def method(x: Int = 1)(y: String) = 1
}
""" hasErrors
"""
|newSource1.scala:4: error: In an exported method, all parameters with defaults must be at the end
| @JSExport
| ^
"""
}
@Test
def noExportAbstractClass(): Unit = {
"""
@JSExportTopLevel("A")
abstract class A
abstract class B(x: Int) {
@JSExportTopLevel("B")
def this() = this(5)
}
""" hasErrors
"""
|newSource1.scala:3: error: You may not export an abstract class
| @JSExportTopLevel("A")
| ^
|newSource1.scala:7: error: You may not export an abstract class
| @JSExportTopLevel("B")
| ^
"""
}
@Test
def noJSExportOnTrait(): Unit = {
"""
@JSExport
trait Test
@JSExport
trait Test2 extends js.Object
@JSExport
@js.native
trait Test3 extends js.Object
""" hasErrors
"""
|newSource1.scala:3: error: You may not export a trait
| @JSExport
| ^
|newSource1.scala:6: error: You may not export a trait
| @JSExport
| ^
|newSource1.scala:9: error: You may not export a trait
| @JSExport
| ^
"""
}
@Test
def noExportNonPublicClassOrObject(): Unit = {
"""
@JSExportTopLevel("A")
private class A
@JSExportTopLevel("B")
protected[this] class B
@JSExportTopLevel("C")
private class C extends js.Object
@JSExportTopLevel("D")
protected[this] class D extends js.Object
""" hasErrors
"""
|newSource1.scala:3: error: You may only export public and protected classes
| @JSExportTopLevel("A")
| ^
|newSource1.scala:6: error: You may only export public and protected classes
| @JSExportTopLevel("B")
| ^
|newSource1.scala:9: error: You may only export public and protected classes
| @JSExportTopLevel("C")
| ^
|newSource1.scala:12: error: You may only export public and protected classes
| @JSExportTopLevel("D")
| ^
"""
"""
@JSExportTopLevel("A")
private object A
@JSExportTopLevel("B")
protected[this] object B
@JSExportTopLevel("C")
private object C extends js.Object
@JSExportTopLevel("D")
protected[this] object D extends js.Object
""" hasErrors
"""
|newSource1.scala:3: error: You may only export public and protected objects
| @JSExportTopLevel("A")
| ^
|newSource1.scala:6: error: You may only export public and protected objects
| @JSExportTopLevel("B")
| ^
|newSource1.scala:9: error: You may only export public and protected objects
| @JSExportTopLevel("C")
| ^
|newSource1.scala:12: error: You may only export public and protected objects
| @JSExportTopLevel("D")
| ^
"""
}
@Test
def noExportNonPublicMember(): Unit = {
"""
class A {
@JSExport
private def foo = 1
@JSExport
protected[this] def bar = 2
}
""" hasErrors
"""
|newSource1.scala:4: error: You may only export public and protected methods
| @JSExport
| ^
|newSource1.scala:7: error: You may only export public and protected methods
| @JSExport
| ^
"""
}
@Test
def noExportNestedClass(): Unit = {
"""
class A {
@JSExport
class Nested {
@JSExport
def this(x: Int) = this()
}
@JSExport
class Nested2 extends js.Object
}
""" hasErrors
"""
|newSource1.scala:4: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
|newSource1.scala:6: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
|newSource1.scala:10: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
"""
}
@Test
def noNestedExportClass: Unit = {
"""
object A {
@JSExport
class Nested {
@JSExport
def this(x: Int) = this
}
@JSExport
class Nested2 extends js.Object
}
""" hasErrors
"""
|newSource1.scala:4: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
|newSource1.scala:6: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
|newSource1.scala:10: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
"""
}
@Test
def noNestedExportObject(): Unit = {
"""
object A {
@JSExport
object Nested
@JSExport
object Nested2 extends js.Object
}
""" hasErrors
"""
|newSource1.scala:4: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
|newSource1.scala:7: error: @JSExport is forbidden on objects and classes. Use @JSExportTopLevel instead.
| @JSExport
| ^
"""
}
@Test
def noExportTopLevelNestedObject(): Unit = {
"""
class A {
@JSExportTopLevel("Nested")
object Nested
@JSExportTopLevel("Nested2")
object Nested2 extends js.Object
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a nested object
| @JSExportTopLevel("Nested")
| ^
|newSource1.scala:7: error: You may not export a nested object
| @JSExportTopLevel("Nested2")
| ^
"""
}
@Test
def noExportJSNative(): Unit = {
"""
import scala.scalajs.js
@JSExportTopLevel("A")
@js.native
@JSGlobal("Dummy")
object A extends js.Object
""" hasErrors
"""
|newSource1.scala:5: error: You may not export a native JS object
| @JSExportTopLevel("A")
| ^
"""
"""
import scala.scalajs.js
@JSExportTopLevel("A")
@js.native
trait A extends js.Object
""" hasErrors
"""
|newSource1.scala:5: error: You may not export a trait
| @JSExportTopLevel("A")
| ^
"""
"""
import scala.scalajs.js
@JSExportTopLevel("A")
@js.native
@JSGlobal("Dummy")
class A extends js.Object {
@JSExportTopLevel("A")
def this(x: Int) = this()
}
""" hasErrors
"""
|newSource1.scala:5: error: You may not export a native JS class
| @JSExportTopLevel("A")
| ^
|newSource1.scala:9: error: You may not export a constructor of a subclass of js.Any
| @JSExportTopLevel("A")
| ^
"""
}
@Test
def noExportJSMember(): Unit = {
"""
import scala.scalajs.js
@js.native
@JSGlobal("Dummy")
class A extends js.Object {
@JSExport
def foo: Int = js.native
}
""" hasErrors
"""
|newSource1.scala:8: error: You may not export a method of a subclass of js.Any
| @JSExport
| ^
"""
"""
import scala.scalajs.js
class A extends js.Object {
@JSExport
def foo: Int = js.native
}
""" hasErrors
"""
|newSource1.scala:6: error: You may not export a method of a subclass of js.Any
| @JSExport
| ^
"""
}
@Test
def noBadSetterType(): Unit = {
// Bad param list
"""
class A {
@JSExport
def foo_=(x: Int, y: Int) = ()
}
""" hasErrors
"""
|newSource1.scala:4: error: Exported setters must have exactly one argument
| @JSExport
| ^
"""
// Bad return type
"""
class A {
@JSExport
def foo_=(x: Int) = "string"
}
""" hasErrors
"""
|newSource1.scala:4: error: Exported setters must return Unit
| @JSExport
| ^
"""
// Varargs
"""
class A {
@JSExport
def foo_=(x: Int*) = ()
}
""" hasErrors
"""
|newSource1.scala:4: error: Exported setters may not have repeated params
| @JSExport
| ^
"""
// Default arguments
"""
class A {
@JSExport
def foo_=(x: Int = 1) = ()
}
""" hasErrors
"""
|newSource1.scala:4: error: Exported setters may not have default params
| @JSExport
| ^
"""
}
@Test
def noBadToStringExport(): Unit = {
"""
class A {
@JSExport("toString")
def a(): Int = 5
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a zero-argument method named other than 'toString' under the name 'toString'
| @JSExport("toString")
| ^
"""
}
@Test
def noBadNameExportAll(): Unit = {
"""
@JSExportAll
class A {
val __f = 1
def a_= = 2
}
""" hasErrors
"""
|newSource1.scala:5: error: An exported name may not contain a double underscore (`__`)
| val __f = 1
| ^
|newSource1.scala:3: error: Exported setters must return Unit
| @JSExportAll
| ^
"""
}
@Test
def noConflictingMethodAndProperty(): Unit = {
// Basic case
"""
class A {
@JSExport("a")
def bar() = 2
@JSExport("a")
val foo = 1
}
""" hasErrors
"""
|newSource1.scala:4: error: Exported property a conflicts with A.$js$exported$meth$a
| @JSExport("a")
| ^
|newSource1.scala:7: error: Exported method a conflicts with A.$js$exported$prop$a
| @JSExport("a")
| ^
"""
// Inherited case
"""
class A {
@JSExport("a")
def bar() = 2
}
class B extends A {
@JSExport("a")
def foo_=(x: Int): Unit = ()
@JSExport("a")
val foo = 1
}
""" hasErrors
"""
|newSource1.scala:4: error: Exported property a conflicts with A.$js$exported$meth$a
| @JSExport("a")
| ^
"""
}
@Test
def gracefulDoubleDefaultFail(): Unit = {
// This used to blow up (i.e. not just fail), because PrepJSExports asked
// for the symbol of the default parameter getter of [[y]], and asserted its
// not overloaded. Since the Scala compiler only fails later on this, the
// assert got triggered and made the compiler crash
"""
class A {
@JSExport
def foo(x: String, y: String = "hello") = x
def foo(x: Int, y: String = "bar") = x
}
""" hasErrors
"""
|newSource1.scala:3: error: in class A, multiple overloaded alternatives of method foo define default arguments.
| class A {
| ^
"""
}
@Test
def noNonLiteralExportNames(): Unit = {
"""
object A {
val a = "Hello"
final val b = "World"
}
class B {
@JSExport(A.a)
def foo = 1
@JSExport(A.b)
def bar = 1
}
""" hasErrors
"""
|newSource1.scala:9: error: The argument to JSExport must be a literal string
| @JSExport(A.a)
| ^
"""
}
@Test
def noNonLiteralModuleID(): Unit = {
"""
object A {
val a = "Hello"
final val b = "World"
}
object B {
@JSExportTopLevel("foo", A.a)
def foo() = 1
@JSExportTopLevel("foo", A.b)
def bar() = 1
}
""" hasErrors
"""
|newSource1.scala:9: error: moduleID must be a literal string
| @JSExportTopLevel("foo", A.a)
| ^
"""
}
@Test
def noExportImplicitApply(): Unit = {
"""
class A {
@JSExport
def apply(): Int = 1
}
""" hasErrors
"""
|newSource1.scala:4: error: A member cannot be exported to function application. Add @JSExport("apply") to export under the name apply.
| @JSExport
| ^
"""
"""
@JSExportAll
class A {
def apply(): Int = 1
}
""" hasErrors
"""
|newSource1.scala:5: error: A member cannot be exported to function application. Add @JSExport("apply") to export under the name apply.
| def apply(): Int = 1
| ^
"""
"""
@JSExportAll
class A {
@JSExport("foo")
def apply(): Int = 1
}
""" hasErrors
"""
|newSource1.scala:6: error: A member cannot be exported to function application. Add @JSExport("apply") to export under the name apply.
| def apply(): Int = 1
| ^
"""
"""
@JSExportAll
class A {
@JSExport("apply")
def apply(): Int = 1
}
""".hasNoWarns()
}
@Test
def exportObjectAsToString(): Unit = {
"""
@JSExportTopLevel("toString")
object ExportAsToString
""".succeeds()
}
private def since(v: String): String = {
val version = scala.util.Properties.versionNumberString
if (version.startsWith("2.11.")) ""
else s" (since $v)"
}
@Test
def noExportTopLevelTrait(): Unit = {
"""
@JSExportTopLevel("foo")
trait A
@JSExportTopLevel("bar")
trait B extends js.Object
""" hasErrors
"""
|newSource1.scala:3: error: You may not export a trait
| @JSExportTopLevel("foo")
| ^
|newSource1.scala:6: error: You may not export a trait
| @JSExportTopLevel("bar")
| ^
"""
"""
object Container {
@JSExportTopLevel("foo")
trait A
@JSExportTopLevel("bar")
trait B extends js.Object
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a trait
| @JSExportTopLevel("foo")
| ^
|newSource1.scala:7: error: You may not export a trait
| @JSExportTopLevel("bar")
| ^
"""
}
@Test
def noExportTopLevelLazyVal(): Unit = {
"""
object A {
@JSExportTopLevel("foo")
lazy val a: Int = 1
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a lazy val to the top level
| @JSExportTopLevel("foo")
| ^
"""
}
@Test
def noExportTopLevelInvalidJSIdentifier(): Unit = {
"""
@JSExportTopLevel("not-a-valid-JS-identifier-1")
object A
@JSExportTopLevel("not-a-valid-JS-identifier-2")
class B
object C {
@JSExportTopLevel("not-a-valid-JS-identifier-3")
val a: Int = 1
@JSExportTopLevel("not-a-valid-JS-identifier-4")
var b: Int = 1
@JSExportTopLevel("not-a-valid-JS-identifier-5")
def c(): Int = 1
}
@JSExportTopLevel("")
object D
""" hasErrors
"""
|newSource1.scala:3: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("not-a-valid-JS-identifier-1")
| ^
|newSource1.scala:6: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("not-a-valid-JS-identifier-2")
| ^
|newSource1.scala:10: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("not-a-valid-JS-identifier-3")
| ^
|newSource1.scala:13: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("not-a-valid-JS-identifier-4")
| ^
|newSource1.scala:16: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("not-a-valid-JS-identifier-5")
| ^
|newSource1.scala:20: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("")
| ^
"""
}
@Test
def noExportTopLevelNamespaced(): Unit = {
"""
@JSExportTopLevel("namespaced.export1")
object A
@JSExportTopLevel("namespaced.export2")
class B
object C {
@JSExportTopLevel("namespaced.export3")
val a: Int = 1
@JSExportTopLevel("namespaced.export4")
var b: Int = 1
@JSExportTopLevel("namespaced.export5")
def c(): Int = 1
}
""" hasErrors
"""
|newSource1.scala:3: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("namespaced.export1")
| ^
|newSource1.scala:5: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("namespaced.export2")
| ^
|newSource1.scala:8: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("namespaced.export3")
| ^
|newSource1.scala:10: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("namespaced.export4")
| ^
|newSource1.scala:12: error: The top-level export name must be a valid JavaScript identifier name
| @JSExportTopLevel("namespaced.export5")
| ^
"""
}
@Test
def noExportTopLevelGetter(): Unit = {
"""
object A {
@JSExportTopLevel("foo")
def a: Int = 1
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a getter or a setter to the top level
| @JSExportTopLevel("foo")
| ^
"""
}
@Test
def noExportTopLevelSetter(): Unit = {
"""
object A {
@JSExportTopLevel("foo")
def a_=(x: Int): Unit = ()
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a getter or a setter to the top level
| @JSExportTopLevel("foo")
| ^
"""
}
@Test
def noExportTopLevelFieldsWithSameName(): Unit = {
"""
object A {
@JSExportTopLevel("foo")
val a: Int = 1
@JSExportTopLevel("foo")
var b: Int = 1
}
""" hasErrors
"""
|newSource1.scala:4: error: export overload conflicts with export of variable b: a field may not share its exported name with another export
| @JSExportTopLevel("foo")
| ^
"""
}
@Test
def noExportTopLevelFieldsAndMethodsWithSameName(): Unit = {
"""
object A {
@JSExportTopLevel("foo")
val a: Int = 1
@JSExportTopLevel("foo")
def b(x: Int): Int = x + 1
}
""" hasErrors
"""
|newSource1.scala:4: error: export overload conflicts with export of method b: they are of different types (Field / Method)
| @JSExportTopLevel("foo")
| ^
"""
"""
object A {
@JSExportTopLevel("foo")
def a(x: Int): Int = x + 1
@JSExportTopLevel("foo")
val b: Int = 1
}
""" hasErrors
"""
|newSource1.scala:4: error: export overload conflicts with export of value b: they are of different types (Method / Field)
| @JSExportTopLevel("foo")
| ^
"""
}
@Test
def noExportTopLevelNonStatic(): Unit = {
"""
class A {
@JSExportTopLevel("foo")
def a(): Unit = ()
}
""" hasErrors
"""
|newSource1.scala:4: error: Only static objects may export their members to the top level
| @JSExportTopLevel("foo")
| ^
"""
"""
class A {
object B {
@JSExportTopLevel("foo")
def a(): Unit = ()
}
}
""" hasErrors
"""
|newSource1.scala:5: error: Only static objects may export their members to the top level
| @JSExportTopLevel("foo")
| ^
"""
"""
class A {
@JSExportTopLevel("Foo")
object B
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a nested object
| @JSExportTopLevel("Foo")
| ^
"""
"""
class A {
@JSExportTopLevel("Foo")
object B extends js.Object
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a nested object
| @JSExportTopLevel("Foo")
| ^
"""
"""
class A {
@JSExportTopLevel("Foo")
class B extends js.Object
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a nested class. Create an exported factory method in the outer class to work around this limitation.
| @JSExportTopLevel("Foo")
| ^
"""
"""
class A {
@JSExportTopLevel("Foo")
class B
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a nested class. Create an exported factory method in the outer class to work around this limitation.
| @JSExportTopLevel("Foo")
| ^
"""
}
@Test
def noExportTopLevelLocal(): Unit = {
// Local class
"""
class A {
def method = {
@JSExportTopLevel("A")
class A
@JSExportTopLevel("B")
class B extends js.Object
}
}
""" hasErrors
"""
|newSource1.scala:5: error: You may not export a local class
| @JSExportTopLevel("A")
| ^
|newSource1.scala:8: error: You may not export a local class
| @JSExportTopLevel("B")
| ^
"""
// Local object
"""
class A {
def method = {
@JSExportTopLevel("A")
object A
@JSExportTopLevel("B")
object B extends js.Object
}
}
""" hasErrors
"""
|newSource1.scala:5: error: You may not export a local object
| @JSExportTopLevel("A")
| ^
|newSource1.scala:8: error: You may not export a local object
| @JSExportTopLevel("B")
| ^
"""
}
@Test
def noExportTopLevelJSModule(): Unit = {
"""
object A extends js.Object {
@JSExportTopLevel("foo")
def a(): Unit = ()
}
""" hasErrors
"""
|newSource1.scala:4: error: You may not export a method of a subclass of js.Any
| @JSExportTopLevel("foo")
| ^
"""
}
@Test
def noExportStaticModule(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
object A
}
""" hasErrors
"""
|newSource1.scala:6: error: Implementation restriction: cannot export a class or object as static
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticTrait(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
trait A
}
""" hasErrors
"""
|newSource1.scala:6: error: You may not export a trait as static.
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticClass(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
class A
}
""" hasErrors
"""
|newSource1.scala:6: error: Implementation restriction: cannot export a class or object as static
| @JSExportStatic
| ^
"""
"""
class StaticContainer extends js.Object
object StaticContainer {
class A {
@JSExportStatic
def this(x: Int) = this()
}
}
""" hasErrors
"""
|newSource1.scala:7: error: Implementation restriction: cannot export a class or object as static
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticValTwice(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
@JSExportStatic("b")
val a: Int = 1
}
""" hasErrors
"""
|newSource1.scala:7: error: Fields (val or var) cannot be exported as static more than once
| @JSExportStatic("b")
| ^
"""
}
@Test
def noExportStaticVarTwice(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
@JSExportStatic("b")
var a: Int = 1
}
""" hasErrors
"""
|newSource1.scala:7: error: Fields (val or var) cannot be exported as static more than once
| @JSExportStatic("b")
| ^
"""
}
@Test
def noExportStaticLazyVal(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
lazy val a: Int = 1
}
""" hasErrors
"""
|newSource1.scala:6: error: You may not export a lazy val as static
| @JSExportStatic
| ^
"""
}
@Test
def noExportValAsStaticAndTopLevel(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
@JSExportTopLevel("foo")
val a: Int = 1
}
""" hasErrors
"""
|newSource1.scala:7: error: Fields (val or var) cannot be exported both as static and at the top-level
| @JSExportTopLevel("foo")
| ^
"""
}
@Test
def noExportVarAsStaticAndTopLevel(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
@JSExportTopLevel("foo")
var a: Int = 1
}
""" hasErrors
"""
|newSource1.scala:7: error: Fields (val or var) cannot be exported both as static and at the top-level
| @JSExportTopLevel("foo")
| ^
"""
}
@Test
def noExportSetterWithBadSetterType(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def a_=(x: Int, y: Int): Unit = ()
}
""" hasErrors
"""
|newSource1.scala:6: error: Exported setters must have exactly one argument
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticCollapsingMethods(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def foo(x: Int): Int = x
@JSExportStatic("foo")
def bar(x: Int): Int = x + 1
}
""" hasErrors
s"""
|newSource1.scala:10: error: Cannot disambiguate overloads for exported method foo with types
| ${methodSig("(x: Int)", "Int")}
| ${methodSig("(x: Int)", "Int")}
| def bar(x: Int): Int = x + 1
| ^
"""
}
@Test
def noExportStaticCollapsingGetters(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def foo: Int = 1
@JSExportStatic("foo")
def bar: Int = 2
}
""" hasErrors
s"""
|newSource1.scala:10: error: Cannot disambiguate overloads for exported getter foo with types
| ${methodSig("()", "Int")}
| ${methodSig("()", "Int")}
| def bar: Int = 2
| ^
"""
}
@Test
def noExportStaticCollapsingSetters(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def foo_=(v: Int): Unit = ()
@JSExportStatic("foo")
def bar_=(v: Int): Unit = ()
}
""" hasErrors
s"""
|newSource1.scala:10: error: Cannot disambiguate overloads for exported setter foo with types
| ${methodSig("(v: Int)", "Unit")}
| ${methodSig("(v: Int)", "Unit")}
| def bar_=(v: Int): Unit = ()
| ^
"""
}
@Test
def noExportStaticFieldsWithSameName(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
val a: Int = 1
@JSExportStatic("a")
var b: Int = 1
}
""" hasErrors
"""
|newSource1.scala:6: error: export overload conflicts with export of variable b: a field may not share its exported name with another export
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticFieldsAndMethodsWithSameName(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
val a: Int = 1
@JSExportStatic("a")
def b(x: Int): Int = x + 1
}
""" hasErrors
"""
|newSource1.scala:6: error: export overload conflicts with export of method b: they are of different types (Field / Method)
| @JSExportStatic
| ^
"""
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def a(x: Int): Int = x + 1
@JSExportStatic("a")
val b: Int = 1
}
""" hasErrors
"""
|newSource1.scala:6: error: export overload conflicts with export of value b: they are of different types (Method / Field)
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticFieldsAndPropertiesWithSameName(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
val a: Int = 1
@JSExportStatic("a")
def b: Int = 2
}
""" hasErrors
"""
|newSource1.scala:6: error: export overload conflicts with export of method b: they are of different types (Field / Property)
| @JSExportStatic
| ^
"""
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def a: Int = 1
@JSExportStatic("a")
val b: Int = 2
}
""" hasErrors
"""
|newSource1.scala:6: error: export overload conflicts with export of value b: they are of different types (Property / Field)
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticPropertiesAndMethodsWithSameName(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def a: Int = 1
@JSExportStatic("a")
def b(x: Int): Int = x + 1
}
""" hasErrors
"""
|newSource1.scala:6: error: export overload conflicts with export of method b: they are of different types (Property / Method)
| @JSExportStatic
| ^
"""
"""
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def a(x: Int): Int = x + 1
@JSExportStatic("a")
def b: Int = 1
}
""" hasErrors
"""
|newSource1.scala:6: error: export overload conflicts with export of method b: they are of different types (Method / Property)
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticNonStatic(): Unit = {
"""
class A {
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def a(): Unit = ()
}
}
""" hasErrors
"""
|newSource1.scala:7: error: Only a static object whose companion class is a non-native JS class may export its members as static.
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticInJSModule(): Unit = {
"""
class StaticContainer extends js.Object
object StaticContainer extends js.Object {
@JSExportStatic
def a(): Unit = ()
}
""" hasErrors
"""
|newSource1.scala:6: error: You may not export a method of a subclass of js.Any
| @JSExportStatic
| ^
"""
"""
class StaticContainer extends js.Object
@js.native
@JSGlobal("Dummy")
object StaticContainer extends js.Object {
@JSExportStatic
def a(): Unit = js.native
}
""" hasErrors
"""
|newSource1.scala:8: error: You may not export a method of a subclass of js.Any
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticIfWrongCompanionType(): Unit = {
"""
class StaticContainer
object StaticContainer {
@JSExportStatic
def a(): Unit = ()
}
""" hasErrors
"""
|newSource1.scala:6: error: Only a static object whose companion class is a non-native JS class may export its members as static.
| @JSExportStatic
| ^
"""
"""
trait StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def a(): Unit = ()
}
""" hasErrors
"""
|newSource1.scala:6: error: Only a static object whose companion class is a non-native JS class may export its members as static.
| @JSExportStatic
| ^
"""
"""
@js.native
@JSGlobal("Dummy")
class StaticContainer extends js.Object
object StaticContainer {
@JSExportStatic
def a(): Unit = ()
}
""" hasErrors
"""
|newSource1.scala:8: error: Only a static object whose companion class is a non-native JS class may export its members as static.
| @JSExportStatic
| ^
"""
}
@Test
def noExportStaticFieldAfterStatOrNonStaticField(): Unit = {
for {
offendingDecl <- Seq(
"val a: Int = 1",
"var a: Int = 1",
"""println("foo")"""
)
}
s"""
class StaticContainer extends js.Object
object StaticContainer {
$offendingDecl
@JSExportStatic
val b: Int = 1
@JSExportStatic
var c: Int = 1
@JSExportStatic
def d: Int = 1
@JSExportStatic
def d_=(v: Int): Unit = ()
@JSExportStatic
def e(): Int = 1
}
""" hasErrors
"""
|newSource1.scala:9: error: @JSExportStatic vals and vars must be defined before any other val/var, and before any constructor statement.
| val b: Int = 1
| ^
|newSource1.scala:12: error: @JSExportStatic vals and vars must be defined before any other val/var, and before any constructor statement.
| var c: Int = 1
| ^
"""
for {
validDecl <- Seq(
"@JSExportStatic val a: Int = 1",
"@JSExportStatic var a: Int = 1",
"lazy val a: Int = 1",
"def a: Int = 1",
"def a_=(v: Int): Unit = ()",
"def a(): Int = 1",
"@JSExportStatic def a: Int = 1",
"@JSExportStatic def a_=(v: Int): Unit = ()",
"@JSExportStatic def a(): Int = 1",
"class A",
"object A",
"trait A",
"type A = Int"
)
}
s"""
class StaticContainer extends js.Object
object StaticContainer {
$validDecl
@JSExportStatic
val b: Int = 1
@JSExportStatic
var c: Int = 1
}
""".succeeds()
}
}
|
scala-js/scala-js
|
compiler/src/test/scala/org/scalajs/nscplugin/test/JSExportTest.scala
|
Scala
|
apache-2.0
| 44,004
|
package extruder.cats.effect
import cats.{Functor, MonadError}
import cats.data.EitherT
import cats.effect._
import cats.syntax.either._
import extruder.core.ValidationErrorsToThrowable
import extruder.data.{ValidationErrors, ValidationT}
trait ConcurrentEffectInstances extends ConcurrentInstances with EffectInstances {
implicit def effectValidationConcurrentEffect[F[_]: ConcurrentEffect](
implicit c: Concurrent[EitherT[F, ValidationErrors, ?]],
tt: ValidationErrorsToThrowable
): ConcurrentEffect[EffectValidation[F, ?]] =
new EffectValidationConcurrentEffect[F] {
override def F: Concurrent[EitherT[F, ValidationErrors, ?]] = Concurrent[EitherT[F, ValidationErrors, ?]]
override def FF: MonadError[ValidationT[F, ?], Throwable] = MonadError[ValidationT[F, ?], Throwable]
override protected def FFF: Functor[EitherT[F, ValidationErrors, ?]] = F
override def FFFF: ConcurrentEffect[F] = ConcurrentEffect[F]
override def toThrowable: ValidationErrorsToThrowable = tt
}
private[effect] trait EffectValidationConcurrentEffect[F[_]]
extends ConcurrentEffect[EffectValidation[F, ?]]
with EffectValidationConcurrent[F]
with EffectValidationEffect[F] {
implicit def FFFF: ConcurrentEffect[F]
def toThrowable: ValidationErrorsToThrowable
override def runCancelable[A](
fa: EffectValidation[F, A]
)(cb: Either[Throwable, A] => IO[Unit]): SyncIO[CancelToken[EffectValidation[F, ?]]] =
FFFF
.runCancelable(fa.a.value)(cb.compose(_.right.flatMap(_.leftMap(toThrowable.convertErrors))))
.map(ct => EffectValidation(EitherT.liftF(ct)(FFFF)))
}
}
|
janstenpickle/extruder
|
cats-effect/src/main/scala/extruder/cats/effect/ConcurrentEffectInstances.scala
|
Scala
|
mit
| 1,657
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import java.util.NoSuchElementException
import scala.collection.mutable
import org.apache.spark.SparkException
import org.apache.spark.annotation.Since
import org.apache.spark.ml.Transformer
import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, NumericAttribute, UnresolvedAttribute}
import org.apache.spark.ml.linalg.{Vector, Vectors, VectorUDT}
import org.apache.spark.ml.param.{Param, ParamMap, ParamValidators}
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util._
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
/**
* A feature transformer that merges multiple columns into a vector column.
*
* This requires one pass over the entire dataset. In case we need to infer column lengths from the
* data we require an additional call to the 'first' Dataset method, see 'handleInvalid' parameter.
*/
@Since("1.4.0")
class VectorAssembler @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends Transformer with HasInputCols with HasOutputCol with HasHandleInvalid
with DefaultParamsWritable {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("vecAssembler"))
/** @group setParam */
@Since("1.4.0")
def setInputCols(value: Array[String]): this.type = set(inputCols, value)
/** @group setParam */
@Since("1.4.0")
def setOutputCol(value: String): this.type = set(outputCol, value)
/** @group setParam */
@Since("2.4.0")
def setHandleInvalid(value: String): this.type = set(handleInvalid, value)
/**
* Param for how to handle invalid data (NULL values). Options are 'skip' (filter out rows with
* invalid data), 'error' (throw an error), or 'keep' (return relevant number of NaN in the
* output). Column lengths are taken from the size of ML Attribute Group, which can be set using
* `VectorSizeHint` in a pipeline before `VectorAssembler`. Column lengths can also be inferred
* from first rows of the data since it is safe to do so but only in case of 'error' or 'skip'.
* Default: "error"
* @group param
*/
@Since("2.4.0")
override val handleInvalid: Param[String] = new Param[String](this, "handleInvalid",
"""Param for how to handle invalid data (NULL and NaN values). Options are 'skip' (filter out
|rows with invalid data), 'error' (throw an error), or 'keep' (return relevant number of NaN
|in the output). Column lengths are taken from the size of ML Attribute Group, which can be
|set using `VectorSizeHint` in a pipeline before `VectorAssembler`. Column lengths can also
|be inferred from first rows of the data since it is safe to do so but only in case of 'error'
|or 'skip'.""".stripMargin.replaceAll("\\n", " "),
ParamValidators.inArray(VectorAssembler.supportedHandleInvalids))
setDefault(handleInvalid, VectorAssembler.ERROR_INVALID)
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema, logging = true)
// Schema transformation.
val schema = dataset.schema
val vectorCols = $(inputCols).filter { c =>
schema(c).dataType match {
case _: VectorUDT => true
case _ => false
}
}
val vectorColsLengths = VectorAssembler.getLengths(dataset, vectorCols, $(handleInvalid))
val featureAttributesMap = $(inputCols).map { c =>
val field = schema(c)
field.dataType match {
case DoubleType =>
val attribute = Attribute.fromStructField(field)
attribute match {
case UnresolvedAttribute =>
Seq(NumericAttribute.defaultAttr.withName(c))
case _ =>
Seq(attribute.withName(c))
}
case _: NumericType | BooleanType =>
// If the input column type is a compatible scalar type, assume numeric.
Seq(NumericAttribute.defaultAttr.withName(c))
case _: VectorUDT =>
val attributeGroup = AttributeGroup.fromStructField(field)
if (attributeGroup.attributes.isDefined) {
attributeGroup.attributes.get.zipWithIndex.toSeq.map { case (attr, i) =>
if (attr.name.isDefined) {
// TODO: Define a rigorous naming scheme.
attr.withName(c + "_" + attr.name.get)
} else {
attr.withName(c + "_" + i)
}
}
} else {
// Otherwise, treat all attributes as numeric. If we cannot get the number of attributes
// from metadata, check the first row.
(0 until vectorColsLengths(c)).map { i =>
NumericAttribute.defaultAttr.withName(c + "_" + i)
}
}
case otherType =>
throw new SparkException(s"VectorAssembler does not support the $otherType type")
}
}
val featureAttributes = featureAttributesMap.flatten[Attribute]
val lengths = featureAttributesMap.map(a => a.length)
val metadata = new AttributeGroup($(outputCol), featureAttributes).toMetadata()
val filteredDataset = $(handleInvalid) match {
case VectorAssembler.SKIP_INVALID => dataset.na.drop($(inputCols))
case VectorAssembler.KEEP_INVALID | VectorAssembler.ERROR_INVALID => dataset
}
val keepInvalid = $(handleInvalid) == VectorAssembler.KEEP_INVALID
// Data transformation.
val assembleFunc = udf { r: Row =>
VectorAssembler.assemble(lengths, keepInvalid)(r.toSeq: _*)
}.asNondeterministic()
val args = $(inputCols).map { c =>
schema(c).dataType match {
case DoubleType => dataset(c)
case _: VectorUDT => dataset(c)
case _: NumericType | BooleanType => dataset(c).cast(DoubleType).as(s"${c}_double_$uid")
}
}
filteredDataset.select(col("*"), assembleFunc(struct(args: _*)).as($(outputCol), metadata))
}
@Since("1.4.0")
override def transformSchema(schema: StructType): StructType = {
val inputColNames = $(inputCols)
val outputColName = $(outputCol)
val incorrectColumns = inputColNames.flatMap { name =>
schema(name).dataType match {
case _: NumericType | BooleanType => None
case t if t.isInstanceOf[VectorUDT] => None
case other => Some(s"Data type ${other.catalogString} of column $name is not supported.")
}
}
if (incorrectColumns.nonEmpty) {
throw new IllegalArgumentException(incorrectColumns.mkString("\\n"))
}
if (schema.fieldNames.contains(outputColName)) {
throw new IllegalArgumentException(s"Output column $outputColName already exists.")
}
StructType(schema.fields :+ new StructField(outputColName, new VectorUDT, true))
}
@Since("1.4.1")
override def copy(extra: ParamMap): VectorAssembler = defaultCopy(extra)
}
@Since("1.6.0")
object VectorAssembler extends DefaultParamsReadable[VectorAssembler] {
private[feature] val SKIP_INVALID: String = "skip"
private[feature] val ERROR_INVALID: String = "error"
private[feature] val KEEP_INVALID: String = "keep"
private[feature] val supportedHandleInvalids: Array[String] =
Array(SKIP_INVALID, ERROR_INVALID, KEEP_INVALID)
/**
* Infers lengths of vector columns from the first row of the dataset
* @param dataset the dataset
* @param columns name of vector columns whose lengths need to be inferred
* @return map of column names to lengths
*/
private[feature] def getVectorLengthsFromFirstRow(
dataset: Dataset[_],
columns: Seq[String]): Map[String, Int] = {
try {
val first_row = dataset.toDF().select(columns.map(col): _*).first()
columns.zip(first_row.toSeq).map {
case (c, x) => c -> x.asInstanceOf[Vector].size
}.toMap
} catch {
case e: NullPointerException => throw new NullPointerException(
s"""Encountered null value while inferring lengths from the first row. Consider using
|VectorSizeHint to add metadata for columns: ${columns.mkString("[", ", ", "]")}. """
.stripMargin.replaceAll("\\n", " ") + e.toString)
case e: NoSuchElementException => throw new NoSuchElementException(
s"""Encountered empty dataframe while inferring lengths from the first row. Consider using
|VectorSizeHint to add metadata for columns: ${columns.mkString("[", ", ", "]")}. """
.stripMargin.replaceAll("\\n", " ") + e.toString)
}
}
private[feature] def getLengths(
dataset: Dataset[_],
columns: Seq[String],
handleInvalid: String): Map[String, Int] = {
val groupSizes = columns.map { c =>
c -> AttributeGroup.fromStructField(dataset.schema(c)).size
}.toMap
val missingColumns = groupSizes.filter(_._2 == -1).keys.toSeq
val firstSizes = (missingColumns.nonEmpty, handleInvalid) match {
case (true, VectorAssembler.ERROR_INVALID) =>
getVectorLengthsFromFirstRow(dataset, missingColumns)
case (true, VectorAssembler.SKIP_INVALID) =>
getVectorLengthsFromFirstRow(dataset.na.drop(missingColumns), missingColumns)
case (true, VectorAssembler.KEEP_INVALID) => throw new RuntimeException(
s"""Can not infer column lengths with handleInvalid = "keep". Consider using VectorSizeHint
|to add metadata for columns: ${columns.mkString("[", ", ", "]")}."""
.stripMargin.replaceAll("\\n", " "))
case (_, _) => Map.empty
}
groupSizes ++ firstSizes
}
@Since("1.6.0")
override def load(path: String): VectorAssembler = super.load(path)
/**
* Returns a function that has the required information to assemble each row.
* @param lengths an array of lengths of input columns, whose size should be equal to the number
* of cells in the row (vv)
* @param keepInvalid indicate whether to throw an error or not on seeing a null in the rows
* @return a udf that can be applied on each row
*/
private[feature] def assemble(lengths: Array[Int], keepInvalid: Boolean)(vv: Any*): Vector = {
val indices = mutable.ArrayBuilder.make[Int]
val values = mutable.ArrayBuilder.make[Double]
var featureIndex = 0
var inputColumnIndex = 0
vv.foreach {
case v: Double =>
if (v.isNaN && !keepInvalid) {
throw new SparkException(
s"""Encountered NaN while assembling a row with handleInvalid = "error". Consider
|removing NaNs from dataset or using handleInvalid = "keep" or "skip"."""
.stripMargin)
} else if (v != 0.0) {
indices += featureIndex
values += v
}
inputColumnIndex += 1
featureIndex += 1
case vec: Vector =>
vec.foreachActive { case (i, v) =>
if (v != 0.0) {
indices += featureIndex + i
values += v
}
}
inputColumnIndex += 1
featureIndex += vec.size
case null =>
if (keepInvalid) {
val length: Int = lengths(inputColumnIndex)
Array.range(0, length).foreach { i =>
indices += featureIndex + i
values += Double.NaN
}
inputColumnIndex += 1
featureIndex += length
} else {
throw new SparkException(
s"""Encountered null while assembling a row with handleInvalid = "keep". Consider
|removing nulls from dataset or using handleInvalid = "keep" or "skip"."""
.stripMargin)
}
case o =>
throw new SparkException(s"$o of type ${o.getClass.getName} is not supported.")
}
Vectors.sparse(featureIndex, indices.result(), values.result()).compressed
}
}
|
aosagie/spark
|
mllib/src/main/scala/org/apache/spark/ml/feature/VectorAssembler.scala
|
Scala
|
apache-2.0
| 12,472
|
package com.github.tarao
package slickjdbc
package interpolation
import util.NonEmpty
import scala.annotation.implicitNotFound
import slick.jdbc.{SetParameter => SP, PositionedParameters}
trait CompoundParameter {
implicit val createSetProduct: SP[Product] = SetProduct
@inline implicit
def createSetList[T](implicit c: SP[T]): SetList[T, NonEmpty[T]] =
new SetList[T, NonEmpty[T]](c)
}
object CompoundParameter extends CompoundParameter
/** SetParameter for non-empty list types. */
class SetList[S, -T <: NonEmpty[S]](val c: SP[S]) extends SP[T] {
def apply(param: T, pp: PositionedParameters): Unit = {
param.foreach { item => c.asInstanceOf[SP[Any]](item, pp) }
}
}
/** SetParameter for product types especially for case classes. */
object SetProduct extends SP[Product] {
def apply(prod: Product, pp: PositionedParameters): Unit =
for (v <- prod.productIterator) v match {
case p: Product => SetProduct(p, pp)
case v => SP.SetSimpleProduct(Tuple1(v), pp)
}
}
@implicitNotFound(msg = "Unsupported parameter type: ${T}.\n" +
"[NOTE] You need an implicit of slick.jdbc.SetParameter[${T}] to pass a value of the type.")
sealed trait CheckParameter[-T]
object CheckParameter {
implicit def valid[T](implicit c: SP[T]): CheckParameter[T] =
new CheckParameter[T] {}
}
@implicitNotFound(msg = "A product is passed.\n" +
"[NOTE] Use interpolation.CompoundParameter trait to enable passing a product.")
sealed trait CheckProduct[-T]
object CheckProduct {
implicit def valid[T](implicit c: SP[T]): CheckProduct[T] =
new CheckProduct[T] {}
}
@implicitNotFound(msg = "Illegal parameter type: ${T}.\n" +
"[NOTE] A list is not allowed since it may be empty and breaks the query.\n" +
"[NOTE] Pass a util.NonEmpty[] if you know that it is not empty.")
sealed trait CheckList[-T]
object CheckList {
implicit def valid[T](implicit c: SP[T]): CheckList[T] =
new CheckList[T] {}
}
@implicitNotFound(msg = "A non-empty list is passed.\n" +
"[NOTE] Use interpolation.CompoundParameter trait to enable passing a non-empty list.")
sealed trait CheckNonEmpty[-T]
object CheckNonEmpty {
implicit def valid[T](implicit c: SP[T]): CheckNonEmpty[T] =
new CheckNonEmpty[T] {}
}
sealed trait NoOptionNonEmpty[-T]
object NoOptionNonEmpty {
implicit def valid[T]: NoOptionNonEmpty[T] = new NoOptionNonEmpty[T] {}
// $COVERAGE-OFF$
implicit def ambig1[T]: NoOptionNonEmpty[Option[NonEmpty[T]]] =
sys.error("unexpected")
implicit def ambig2[T]: NoOptionNonEmpty[Option[NonEmpty[T]]] =
sys.error("unexpected")
// $COVERAGE-ON$
}
@implicitNotFound(msg = "A maybe-non-empty list is passed.\n" +
"[NOTE] Break it into Some(_) or None to confirm that it is not empty.")
sealed trait CheckOptionNonEmpty[-T]
object CheckOptionNonEmpty {
implicit def valid[T](implicit
check: NoOptionNonEmpty[T],
c: SP[T]
): CheckOptionNonEmpty[T] = new CheckOptionNonEmpty[T] {}
}
sealed trait NoOption[-T]
object NoOption {
implicit def valid[T]: NoOption[T] = new NoOption[T] {}
// $COVERAGE-OFF$
implicit def ambig1[S]: NoOption[Option[S]] = sys.error("unexpected")
implicit def ambig2[S]: NoOption[Option[S]] = sys.error("unexpected")
// $COVERAGE-ON$
}
@implicitNotFound(msg = "Illegal parameter type: ${T}\n" +
"[NOTE] An option is not allowed since it may be none and breaks the query.\n" +
"[NOTE] Break it into Some(_) or None to confirm that it has a value.")
sealed trait CheckOption[-T]
object CheckOption {
implicit def valid[T](implicit
check: NoOption[T],
c: SP[T]
): CheckOption[T] = new CheckOption[T] {}
}
|
TimothyKlim/slick-jdbc-extension-scala
|
src/main/scala/com/github/tarao/slickjdbc/interpolation/SetParameter.scala
|
Scala
|
mit
| 3,617
|
/*
* Copyright 2010 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter
package object logging {
type HandlerFactory = (() => Handler)
}
|
travisbrown/util
|
util-logging/src/main/scala/com/twitter/logging/package.scala
|
Scala
|
apache-2.0
| 686
|
/*
* Copyright (C) 2016 VSCT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.vsct.dt.maze.topology
import fr.vsct.dt.maze.core.Execution
import scala.concurrent.duration.FiniteDuration
/**
* This is an abstraction to describe a node of a cluster, handled by docker.
*
* To use it at ease, see com.vsct.dt.dsl.MultipleContainerClusterNode or com.vsct.dt.dsl.SingleContainerClusterNode
*/
trait ClusterNode {
/**
* Start node
*/
def start(): Unit
/**
* Stop node
*/
def stop(): Unit
/**
* Trash node : stop it (do not expect it to be stopped cleanly) and remove metadata from it.
*
* A cleared node cannot be restarted.
*/
def clear(): Unit
/**
* Stop and start a node
*/
def restart(): Unit = {
stop()
start()
}
/**
* Kill a node, sending a given signal
*
* @param signal the unix signal to send, by default SIGTERM
*/
def kill(signal: String = "SIGTERM"): Unit
/**
* Stop cleanly the node by calling the shutdown script.
*/
def stopCleanly(): Unit
/**
* Crash violently the process.
*/
def crash(): Unit
/**
* Return the complete logs for this node
*
* @return a way to retrieve the logs, as an array
*/
def logs: Execution[Array[String]]
/**
* Execute some command on a shell
*
* @param command the command to execute, every parameter is a word (for instance: "ps", "aux")
* @return the result of the command as a list of lines
*/
def shellExecution(command: String*): Execution[Array[String]]
/**
* Lag on the whole node of given duration.
*
* @param duration of the added lag
*/
def lag(duration: FiniteDuration): Unit
/**
* Fill with a file of the size of the remaining space in the given path.
*
* @param path the path to the partition to fill
*/
def fillFilesystem(path: String): Unit = shellExecution("dd", "if=/dev/zero", s"of=$path/fill-it", "bs=$((1024*1024))", "count=$((1024*1024))").execute()
/**
* Getter for the hostname of this node
*
* @return the hostname, used to call this node from inside the network
*/
var hostname: String
/**
* The service port is the main port of the application. For instance, on tomcat, this port usually is 8080.
* This is the port that will be used, for instance, to create connection strings
*
* @return the service port
*/
def servicePort: Int
/**
* The IP of the node
*/
def ip: String
/**
* Create a file with a given content on the container
*
* @param path the path of the file to create
* @param content the content of the file
*/
def createFile(path: String, content: String): Unit
override def toString: String = hostname
}
|
voyages-sncf-technologies/maze
|
src/main/scala/fr/vsct/dt/maze/topology/ClusterNode.scala
|
Scala
|
apache-2.0
| 3,309
|
package co.spendabit.webapp.forms.ui.bootstrap
import co.spendabit.webapp.forms.ui.FormRenderer
/** Implements a form using Bootstrap, as seen in the "basic example", here:
* http://getbootstrap.com/css/#forms-example
*/
class BasicForm extends FormRenderer {
def formElem(labeledControls: xml.NodeSeq): xml.Elem =
<form>
{ labeledControls }
{ submitSection }
</form>
def labeledControl(label: String, control: xml.NodeSeq): xml.NodeSeq =
if (isCheckbox(control)) {
<div class="checkbox"> <label>{ control } { label }</label> </div>
} else {
val widget =
if (isFileInput(control))
control
else
withAttr(control, "class", "form-control")
<div class="form-group">
<label>{ label }</label> { widget }
</div>
}
protected def submitSection: xml.NodeSeq =
<button type="submit" class="btn btn-primary">{ submitButtonLabel }</button>
protected def submitButtonLabel: String = "Submit"
}
|
spendabit/webapp-tools
|
src/co/spendabit/webapp/forms/ui/bootstrap/BasicForm.scala
|
Scala
|
unlicense
| 1,004
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval.internal
import monix.eval.Task.Context
import monix.execution.Callback
import monix.eval.Task
import monix.execution.Ack.Stop
import monix.execution.Scheduler
import monix.execution.atomic.PaddingStrategy.LeftRight128
import monix.execution.atomic.{Atomic, AtomicAny}
import monix.execution.internal.exceptions.matchError
import scala.annotation.tailrec
import scala.util.control.NonFatal
private[eval] object TaskMapBoth {
/**
* Implementation for `Task.mapBoth`.
*/
def apply[A1, A2, R](fa1: Task[A1], fa2: Task[A2])(f: (A1, A2) => R): Task[R] = {
TracedAsync(new Register(fa1, fa2, f), trampolineBefore = true, trampolineAfter = true, restoreLocals = true, traceKey = f)
}
// Implementing Async's "start" via `ForkedStart` in order to signal
// that this is a task that forks on evaluation.
//
// N.B. the contract is that the injected callback gets called after
// a full async boundary!
private final class Register[A1, A2, R](fa1: Task[A1], fa2: Task[A2], f: (A1, A2) => R) extends ForkedRegister[R] {
/* For signaling the values after the successful completion of both tasks. */
def sendSignal(mainConn: TaskConnection, cb: Callback[Throwable, R], a1: A1, a2: A2)(
implicit s: Scheduler): Unit = {
var streamErrors = true
try {
val r = f(a1, a2)
streamErrors = false
mainConn.pop()
cb.onSuccess(r)
} catch {
case NonFatal(ex) if streamErrors =>
// Both tasks completed by this point, so we don't need
// to worry about the `state` being a `Stop`
mainConn.pop()
cb.onError(ex)
}
}
/* For signaling an error. */
@tailrec def sendError(
mainConn: TaskConnection,
state: AtomicAny[AnyRef],
cb: Callback[Throwable, R],
ex: Throwable)(implicit s: Scheduler): Unit = {
// Guarding the contract of the callback, as we cannot send an error
// if an error has already happened because of the other task
state.get() match {
case Stop =>
// We've got nowhere to send the error, so report it
s.reportFailure(ex)
case other =>
if (!state.compareAndSet(other, Stop))
sendError(mainConn, state, cb, ex)(s) // retry
else {
mainConn.pop().map(_ => cb.onError(ex)).runAsyncAndForget
}
}
}
def apply(context: Context, cb: Callback[Throwable, R]): Unit = {
implicit val s = context.scheduler
val mainConn = context.connection
// for synchronizing the results
val state = Atomic.withPadding(null: AnyRef, LeftRight128)
val task1 = TaskConnection()
val task2 = TaskConnection()
val context1 = context.withConnection(task1)
val context2 = context.withConnection(task2)
mainConn.pushConnections(task1, task2)
// Light asynchronous boundary; with most scheduler implementations
// it will not fork a new (logical) thread!
Task.unsafeStartEnsureAsync(
fa1,
context1,
new Callback[Throwable, A1] {
@tailrec def onSuccess(a1: A1): Unit =
state.get() match {
case null => // null means this is the first task to complete
if (!state.compareAndSet(null, Left(a1))) onSuccess(a1)
case Right(a2) => // the other task completed, so we can send
sendSignal(mainConn, cb, a1, a2.asInstanceOf[A2])(s)
case Stop => // the other task triggered an error
() // do nothing
case s @ Left(_) =>
// This task has triggered multiple onSuccess calls
// violating the protocol. Should never happen.
onError(new IllegalStateException(s.toString))
case other =>
// $COVERAGE-OFF$
matchError(other)
// $COVERAGE-ON$
}
def onError(ex: Throwable): Unit =
sendError(mainConn, state, cb, ex)(s)
}
)
// Start first task with a "hard" async boundary to ensure parallel evaluation
Task.unsafeStartEnsureAsync(
fa2,
context2,
new Callback[Throwable, A2] {
@tailrec def onSuccess(a2: A2): Unit =
state.get() match {
case null => // null means this is the first task to complete
if (!state.compareAndSet(null, Right(a2))) onSuccess(a2)
case Left(a1) => // the other task completed, so we can send
sendSignal(mainConn, cb, a1.asInstanceOf[A1], a2)(s)
case Stop => // the other task triggered an error
() // do nothing
case s @ Right(_) =>
// This task has triggered multiple onSuccess calls
// violating the protocol. Should never happen.
onError(new IllegalStateException(s.toString))
case other =>
// $COVERAGE-OFF$
matchError(other)
// $COVERAGE-ON$
}
def onError(ex: Throwable): Unit =
sendError(mainConn, state, cb, ex)(s)
}
)
}
}
}
|
monixio/monix
|
monix-eval/shared/src/main/scala/monix/eval/internal/TaskMapBoth.scala
|
Scala
|
apache-2.0
| 5,901
|
package io.finch
import cats.Eq
import cats.effect.{ContextShift, IO}
import com.twitter.io.Buf
import scala.concurrent.ExecutionContext
/**
* Type class instances for non-Finch types.
*/
trait MissingInstances {
implicit def eqEither[A](implicit A: Eq[A]): Eq[Either[Throwable, A]] = Eq.instance {
case (Right(a), Right(b)) => A.eqv(a, b)
case (Left(x), Left(y)) => x == y
case _ => false
}
implicit def eqBuf: Eq[Buf] = Eq.fromUniversalEquals
implicit val shift: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
}
|
ImLiar/finch
|
core/src/test/scala/io/finch/MissingInstances.scala
|
Scala
|
apache-2.0
| 553
|
/*
mls: basic machine learning algorithms for Scala
Copyright (C) 2014 Davi Pereira dos Santos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ml.models
import ml.Pattern
trait Model {
def JS(pattern: Pattern): Double
val L: Int
def predict(instance: Pattern): Double
/**
* Mostra qtas vezes a classe da linha foi predita como a classe da coluna.
* @param patts
*/
def confusion(patts: Seq[Pattern]) = if (patts.isEmpty) {
println("Empty list of patterns at confusion matrix.")
sys.exit(1)
} else {
val nc = patts.head.nclasses
val n = patts.size
val res = Array.fill(nc)(Array.fill(nc)(0))
var i = 0
while (i < n) {
val p = patts(i)
res(p.label.toInt)(predict(p).toInt) += 1
i += 1
}
res
}
def distribution(instance: Pattern): Array[Double]
protected def log(x: Double) = if (x == 0) 0d else math.log(x)
protected def normalized_entropy(P: Array[Double]) = -P.map(x => x * log(x)).sum / log(P.length)
protected def media_desvioPadrao(items: Vector[Double]) = {
val s = items.sum
val l = items.length.toDouble
val m = s / l
val v0 = (items map {
x =>
val di = x - m
di * di
}).sum / (l - 1)
val v = if (v0.isNaN) 0 else v0
val d = math.sqrt(v)
(m, d)
}
def predictionEntropy(patts: Seq[Pattern]) = if (patts.isEmpty) {
println("Empty list of patterns at predictionEntropy.")
sys.exit(1)
} else {
val ents = patts.map(x => normalized_entropy(distribution(x)))
media_desvioPadrao(ents.toVector)
}
def output(instance: Pattern): Array[Double]
// {
// val dist = distribution(instance)
// val nclasses = instance.nclasses
// var c = 0
// var max = 0d
// var cmax = 0
// while (c < nclasses) {
// val v = dist(c)
// if (v > max) {
// max = v
// cmax = c
// }
// c += 1
// }
// cmax
// }
def hit(instance: Pattern) = instance.label == predict(instance)
def hits(patterns: Seq[Pattern]) = patterns.count(hit) //weka is not thread-safe to parallelize hits()
def accuracy(patterns: Seq[Pattern], n: Double = -1) = {
hits(patterns) / (if (n == -1) patterns.length.toDouble else n)
}
def hits_and_qtd_per_class(patterns: Seq[Pattern]) = {
??? //inefficient
(0 until patterns.head.nclasses) map {
c =>
val hits_for_this_class = patterns.filter(_.label == c)
val hits = (hits_for_this_class map hit) count (_ == true)
(hits, hits_for_this_class.length)
}
}
}
//trait IncrementalModel extends Model
trait BatchModel extends Model {
val training_set: Vector[Pattern]
}
// val size: Double
// def distributions(instance: Pattern): Seq[Array[Double]]
// def distribution(instance: Pattern) = {
// val dists = distributions(instance)
// val dist = dists(0)
// val nclasses = instance.nclasses
// var c = 0
// while (c < nclasses) {
// var d = 1
// while (d < size) {
// dist(c) += dists(d)(c)
// d += 1
// }
// dist(c) /= size
// c += 1
// }
// dist
// // distributions(instance).transpose.map(_.sum / size).toArray
// } //average between distributions (is it the same as adding and normalizing?)
// /**
// * Hard prediction for a given instance.
// * In the case of ensembles, hard vote will be performed.
// * @param instance
// * @return
// */
// def predict(instance: Pattern) = {
// val dists = distributions(instance) //weka classifyInstance() also internally falls back to distributionForInstance()
// val nclasses = instance.nclasses
// val votes = new Array[Int](nclasses)
// var d = 0
// while (d < size) {
// var c = 0
// var max = 0d
// var cmax = 0
// while (c < nclasses) {
// val v = dists(d)(c)
// if (v > max) {
// max = v
// cmax = c
// }
// c += 1
// }
// votes(cmax) += 1
// d += 1
// }
// var c = 0
// var max = 0
// var cmax = 0
// while (c < nclasses) {
// val v = votes(c)
// if (v > max) {
// max = v
// cmax = c
// }
// c += 1
// }
// cmax
// }
|
javadba/mls
|
src/main/scala/ml/models/Model.scala
|
Scala
|
gpl-3.0
| 4,950
|
/*
* This file is part of AckCord, licensed under the MIT License (MIT).
*
* Copyright (c) 2019 Katrix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package ackcord.voice
import java.nio.ByteBuffer
import scala.annotation.unchecked.uncheckedVariance
import scala.collection.immutable
import ackcord.data.{RawSnowflake, UserId}
import akka.stream._
import akka.stream.scaladsl.{BidiFlow, GraphDSL, Source}
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.util.ByteString
import com.iwebpp.crypto.TweetNaclFast
case class BidiShapeWithExtraIn[-In1, +Out1, -In2, +Out2, -EIn](
in1: Inlet[In1 @uncheckedVariance],
out1: Outlet[Out1 @uncheckedVariance],
in2: Inlet[In2 @uncheckedVariance],
out2: Outlet[Out2 @uncheckedVariance],
extraIn: Inlet[EIn @uncheckedVariance]
) extends Shape {
override def inlets: immutable.Seq[Inlet[_]] = immutable.Seq(in1, in2, extraIn)
override def outlets: immutable.Seq[Outlet[_]] = immutable.Seq(out1, out2)
override def deepCopy(): Shape =
BidiShapeWithExtraIn(in1.carbonCopy(), out1.carbonCopy(), in2.carbonCopy(), out2.carbonCopy(), extraIn.carbonCopy())
}
class NaclBidiFlow(ssrc: Int, serverId: RawSnowflake, userId: UserId)
extends GraphStage[
BidiShapeWithExtraIn[ByteString, ByteString, ByteString, AudioAPIMessage.ReceivedData, Option[ByteString]]
] {
val in1: Inlet[ByteString] = Inlet("NaclBidiFlow.in1")
val out1: Outlet[ByteString] = Outlet("NaclBidiFlow.out1")
val in2: Inlet[ByteString] = Inlet("NaclBidiFlow.in2")
val out2: Outlet[AudioAPIMessage.ReceivedData] = Outlet("NaclBidiFlow.out2")
val secretKeysIn: Inlet[Option[ByteString]] = Inlet("NaclBidiFlow.secretKeysIn")
override def shape
: BidiShapeWithExtraIn[ByteString, ByteString, ByteString, AudioAPIMessage.ReceivedData, Option[ByteString]] =
BidiShapeWithExtraIn(in1, out1, in2, out2, secretKeysIn)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
private val nonceEncryptBuffer = ByteBuffer.allocate(24)
private val nonceDecryptBuffer = ByteBuffer.allocate(24)
var sequence: Short = 0
var timestamp = 0
var currentSecretKey: Option[ByteString] = None
private def consuming[A, B](in: Inlet[A], out: Outlet[B]) = new InHandler with OutHandler {
override def onPush(): Unit = grab(in)
override def onPull(): Unit = pull(in)
}
setHandlers(in1, out1, consuming(in1, out1))
setHandlers(in2, out2, consuming(in2, out2))
setHandler(
secretKeysIn,
new InHandler {
override def onPush(): Unit = {
val newKey = grab(secretKeysIn)
if (currentSecretKey != newKey) {
currentSecretKey = newKey
newKey match {
case Some(someKey) => gotNewSecretKey(someKey)
case None =>
setHandlers(in1, out1, consuming(in1, out1))
setHandlers(in2, out2, consuming(in2, out2))
}
}
}
}
)
override def preStart(): Unit =
pull(secretKeysIn)
def gotNewSecretKey(secretKey: ByteString): Unit = {
val secret = new TweetNaclFast.SecretBox(secretKey.toArray)
setHandlers(
in1,
out1,
new InHandler with OutHandler {
override def onPush(): Unit = {
val data = grab(in1)
val header = RTPHeader(sequence, timestamp, ssrc)
sequence = (sequence + 1).toShort
timestamp += VoiceUDPFlow.FrameSize
nonceEncryptBuffer.clear()
header.nonceToBuffer(nonceEncryptBuffer)
val encrypted = secret.box(data.toArray, nonceEncryptBuffer.array())
push(out1, header.byteString ++ ByteString.fromArrayUnsafe(encrypted))
}
override def onPull(): Unit = pull(in1)
}
)
setHandlers(
in2,
out2,
new InHandler with OutHandler {
override def onPush(): Unit = {
val data = grab(in2)
val (rtpHeader, voice) = RTPHeader.fromBytes(data)
if (voice.length >= 16 && rtpHeader.version != -55 && rtpHeader.version != -56) { //FIXME: These break stuff
rtpHeader.nonceToBuffer(nonceDecryptBuffer)
nonceDecryptBuffer.clear()
val decryptedData = secret.open(voice.toArray, nonceDecryptBuffer.array())
if (decryptedData != null) {
val byteStringDecrypted = ByteString(decryptedData)
push(out2, AudioAPIMessage.ReceivedData(byteStringDecrypted, rtpHeader, serverId, userId))
} else {
failStage(new Exception(s"Failed to decrypt voice data Header: $rtpHeader Received voice: $voice"))
}
}
}
override def onPull(): Unit = pull(in2)
}
)
}
}
}
object NaclBidiFlow {
def bidiFlow[Mat](
ssrc: Int,
serverId: RawSnowflake,
userId: UserId,
secretKeys: Source[Option[ByteString], Mat]
): BidiFlow[ByteString, ByteString, ByteString, AudioAPIMessage.ReceivedData, Mat] = {
val graph = GraphDSL.create(secretKeys) { implicit b => keys =>
import GraphDSL.Implicits._
val naclBidiFlow = b.add(new NaclBidiFlow(ssrc, serverId, userId))
keys ~> naclBidiFlow.extraIn
BidiShape(naclBidiFlow.in1, naclBidiFlow.out1, naclBidiFlow.in2, naclBidiFlow.out2)
}
BidiFlow.fromGraph(graph)
}
}
|
Katrix-/AckCord
|
voice/src/main/scala/ackcord/voice/NaclBidiFlow.scala
|
Scala
|
mit
| 6,634
|
package calctest.assignment
import scala.util.parsing.combinator._
object Spreadsheet {
// Cell contents consist of these: Cell reference, Operator, or positive decimal value.
object CCT extends Enumeration { type CCT = Value; val C, O, V = Value }
import CCT._
type CellExpr = List[(CCT.Value, String)]
object CellParsers extends JavaTokenParsers {
def cellref: Parser[String] = """[A-Z][1-9]\\d*""".r
def op: Parser[String] = "++" | "--" | "+" | "-" | "*" | "/"
def term: Parser[(CCT.Value, String)] =
floatingPointNumber ^^ { (V, _) } |
op ^^ { (O, _) } |
cellref ^^ { (C, _) }
def expr: Parser[CellExpr] = term ~ rep(term) ^^ { case a ~ list => List(a) ++ list }
}
import CellParsers._
def rowColNumbers(line: String): (Int, Int) = {
val Array(r, c) = line.split(' '); (r.toInt, c.toInt)
}
def readExpressions(lines: Array[String]): Array[CellExpr] = {
lines.map(parseAll(expr, _).get)
}
def cellOfIndex(i: Int, rowN: Int, colN: Int): String = {
val row = i / colN
val col = i % colN + 1
('A' + row).toChar.toString + col.toString
}
def indexOfCell(cell: String, rowN: Int, colN: Int): Int = {
val row:Int = cell.head - 'A'
val col:Int = cell.tail.toInt - 1
colN * row + col
}
def readDependencies(expressions: Array[CellExpr], rowN: Int, colN: Int): Map[String, List[String]] = {
expressions.zipWithIndex.map{ case (a, b) =>
cellOfIndex(b, rowN, colN) -> a.filter(_._1 == C).map(_._2)
}
.toMap
}
def allCells(cellMap: Map[String, List[String]]): Set[String] =
(cellMap.keys ++ cellMap.values.flatMap(identity)).toSet
def allCells(rowN: Int, colN: Int): Set[String] = {
(0 until rowN * colN).map(cellOfIndex(_, rowN, colN)).toSet
}
def revertDependencies(dMap: Map[String, List[String]]): Map[String, List[String]] = {
dMap.toList
.flatMap{ x => Array.fill(x._2.size)(x._1).zip(x._2) }
.map(_.swap)
.groupBy(_._1)
.mapValues(_.map(_._2))
}
def findProcessingOrder(dependencyMap: Map[String, List[String]], rowN: Int, colN: Int): Option[List[String]] = {
val reverseMap = revertDependencies(dependencyMap)
val nodependencyCells = reverseMap.keys.toSet -- dependencyMap.keys
var set = nodependencyCells
var list = List.empty[String]
var edges = reverseMap.toList.flatMap{ x => Array.fill(x._2.size)(x._1).zip(x._2) }.toSet
while (set.nonEmpty) {
val n = set.head
set = set - n
list = n :: list
reverseMap.get(n).foreach { l =>
l.map { m =>
edges = edges - ((n, m))
if (!edges.map(_._2).toSet(m)) set = set + m
}
}
}
if (edges.nonEmpty) None else Some(list.reverse)
}
def evaluate(expr: CellExpr, resolutions: Map[String, Double]): Double = {
var stack = List.empty[Double]
expr.foreach {
case (C, c) => stack = resolutions(c) :: stack
case (V, v) => stack = v.toDouble :: stack
case (O, o) => {
if (o == "++") stack = (stack.head + 1) :: stack.tail
else if (o == "--") stack = (stack.head - 1) :: stack.tail
else stack match {
case x :: y :: xs => if (o == "+") stack = (y + x) :: stack.drop(2)
else if (o == "-") stack = (y - x) :: stack.drop(2)
else if (o == "*") stack = (y * x) :: stack.drop(2)
else if (o == "/") stack = (if (x != 0) (y / x) else 0.0f) :: stack.drop(2)
case _ =>
}
}
}
assert (stack.size == 1)
stack.head
}
def main(args: Array[String]): Unit = {
val lines = io.Source.stdin.getLines.toArray
val (colN, rowN) = rowColNumbers(lines.head)
val expressions = readExpressions(lines.tail)
val dependencyMap = readDependencies(expressions, rowN, colN)
val processingOrder = findProcessingOrder(dependencyMap, rowN, colN)
if (processingOrder.isEmpty) {
println("Spreadsheet cells have cyclical dependency")
sys.exit(1)
}
val cells = allCells(rowN, colN)
val dependencyOrder = processingOrder.get
val nodependencyCells = cells -- dependencyOrder
val order = nodependencyCells.toList ++ dependencyOrder
var resolutions = Map[String, Double]()
order.foreach{ cell =>
val index = indexOfCell(cell, rowN, colN)
val expr = expressions(index)
val result = evaluate(expr, resolutions)
resolutions = resolutions + (cell -> result)
}
val result = resolutions.toList.map{ case (k, v) => indexOfCell(k, rowN, colN) -> "%.5f".format(v) }.sortBy(_._1).map(_._2).mkString("\\n")
println(result)
}
}
|
shyamendra/spreadsheet
|
src/main/scala/calctest/assignment/Spreadsheet.scala
|
Scala
|
mit
| 4,742
|
package dbtarzan.db
import java.sql.Connection
import dbtarzan.config.connections.ConnectionData
trait ConnectionProvider {
def getConnection(data : ConnectionData) : Connection
}
|
aferrandi/dbtarzan
|
src/main/scala/dbtarzan/db/ConnectionProvider.scala
|
Scala
|
apache-2.0
| 185
|
package apsu.util
import scala.collection.mutable
class EnumRegistry[E <: EnumLike[E]] {
private val nameRegistry = mutable.OpenHashMap[String, E]()
def register(e: E): Unit = {
nameRegistry.get(e.name) match {
case Some(e2) => throw new IllegalArgumentException(s"Can't register $e; ${e.name} already exists: $e2")
case _ => nameRegistry(e.name) = e
}
}
def forName(name: String): Option[E] = nameRegistry.get(name)
def all: Iterable[E] = {
nameRegistry.values.toBuffer[E].sortWith((t1, t2) => t1.name < t2.name)
}
}
|
chronodm/apsu-util-scala
|
src/main/scala/apsu/util/EnumRegistry.scala
|
Scala
|
mit
| 561
|
package gapt.proofs.lk.transformations
import gapt.expr.formula.Bottom
import gapt.expr.formula.Formula
import gapt.expr.formula.Top
import gapt.proofs.lk.LKProof
import gapt.proofs.lk.rules.AndLeftRule
import gapt.proofs.lk.rules.AndRightRule
import gapt.proofs.lk.rules.BottomAxiom
import gapt.proofs.lk.rules.ContractionLeftRule
import gapt.proofs.lk.rules.ContractionRightRule
import gapt.proofs.lk.rules.CutRule
import gapt.proofs.lk.rules.EqualityLeftRule
import gapt.proofs.lk.rules.EqualityRightRule
import gapt.proofs.lk.rules.ExistsLeftRule
import gapt.proofs.lk.rules.ExistsRightRule
import gapt.proofs.lk.rules.ForallLeftRule
import gapt.proofs.lk.rules.ForallRightRule
import gapt.proofs.lk.rules.ImpLeftRule
import gapt.proofs.lk.rules.ImpRightRule
import gapt.proofs.lk.rules.LogicalAxiom
import gapt.proofs.lk.rules.NegLeftRule
import gapt.proofs.lk.rules.NegRightRule
import gapt.proofs.lk.rules.OrLeftRule
import gapt.proofs.lk.rules.OrRightRule
import gapt.proofs.lk.rules.ReflexivityAxiom
import gapt.proofs.lk.rules.TopAxiom
import gapt.proofs.lk.rules.WeakeningLeftRule
import gapt.proofs.lk.rules.WeakeningRightRule
import gapt.proofs.lk.rules.macros
import gapt.proofs.lk.rules.macros.AndLeftMacroRule
import gapt.proofs.lk.rules.macros.ContractionMacroRule
import gapt.proofs.lk.rules.macros.ImpRightMacroRule
import gapt.proofs.lk.rules.macros.OrRightMacroRule
import gapt.proofs.lk.rules.macros.WeakeningLeftMacroRule
object MG3iToLJ {
private def mkProjs( fs: List[Formula] ): ( Formula, Map[Formula, LKProof] ) =
fs match {
case Nil => ( Bottom(), Map.empty )
case f :: Nil =>
( f, Map( f -> LogicalAxiom( f ) ) )
case f :: fs_ =>
val ( d, ps ) = mkProjs( fs_ )
( d | f, Map( f -> OrRightMacroRule( LogicalAxiom( f ), d, f ) ) ++ ps.view.mapValues( OrRightMacroRule( _, d, f ) ).toMap )
}
def apply( proof: LKProof ): LKProof = proof.conclusion.succedent match {
case Seq() =>
val q = CutRule( apply( proof, Bottom(), Map.empty ), BottomAxiom, Bottom() )
require( q.conclusion.isSubsetOf( proof.conclusion ) )
q
case Seq( f ) =>
val q = apply( proof, f, Map( f -> LogicalAxiom( f ) ) )
require( q.conclusion.isSubsetOf( proof.conclusion ) )
q
case fs =>
val ( newSuc, projs ) = mkProjs( fs.toList )
val q = apply( proof, newSuc, projs )
require( q.conclusion.isSubsetOf( proof.conclusion.copy( succedent = Vector( newSuc ) ) ) )
q
}
def apply( proof: LKProof, goal: Formula, projections: Map[Formula, LKProof] ): LKProof = {
def withAddGoal( p: LKProof, addGoal: Formula, r: LKProof ): LKProof =
if ( !r.conclusion.antecedent.contains( addGoal ) ) r
else if ( p.conclusion.succedent.forall( _ == addGoal ) ) {
val q = apply( p, addGoal, Map( addGoal -> LogicalAxiom( addGoal ) ) )
val res = ContractionMacroRule( CutRule( q, r, addGoal ) )
if ( res.conclusion.succedent.isEmpty ) WeakeningRightRule( res, goal ) else res
} else {
val newGoal = goal | addGoal
val q = apply( p, newGoal, Map() ++
projections.view.mapValues( pr => CutRule( pr, OrRightMacroRule( LogicalAxiom( goal ), goal, addGoal ), goal ) ).toMap +
( addGoal -> OrRightMacroRule( LogicalAxiom( addGoal ), goal, addGoal ) ) )
ContractionMacroRule( CutRule( q, OrLeftRule( LogicalAxiom( goal ), r, newGoal ), newGoal ) )
}
def rightChain( relativeProjs: ( Formula, LKProof )* ): Map[Formula, LKProof] =
projections ++ relativeProjs.map {
case ( f, pr ) =>
val Seq( g ) = pr.conclusion.succedent
f -> ContractionMacroRule( CutRule( pr, projections( g ), g ) )
}
macros.ContractionMacroRule( proof match {
case LogicalAxiom( atom ) => projections( atom )
case proof @ ReflexivityAxiom( _ ) => CutRule( proof, projections( proof.mainFormula ), proof.mainFormula )
case ContractionLeftRule( p, _, _ ) => apply( p, goal, projections )
case ContractionRightRule( p, _, _ ) => apply( p, goal, projections )
case WeakeningRightRule( p, _ ) => apply( p, goal, projections )
case WeakeningLeftRule( p, _ ) => apply( p, goal, projections )
case proof @ CutRule( p1, _, p2, _ ) =>
val q2 = apply( p2, goal, projections )
if ( !q2.conclusion.antecedent.contains( proof.cutFormula ) ) q2
else withAddGoal( p1, proof.cutFormula, q2 )
case BottomAxiom => WeakeningRightRule( BottomAxiom, goal )
case TopAxiom => CutRule( TopAxiom, projections( Top() ), Top() )
case proof @ EqualityLeftRule( p, _, _, cx ) =>
val q = apply( p, goal, projections )
if ( !q.conclusion.antecedent.contains( proof.auxFormula ) ) q else
EqualityLeftRule( WeakeningLeftMacroRule( q, proof.equation ), proof.equation, proof.auxFormula, cx )
case proof @ EqualityRightRule( p, _, _, cx ) =>
apply( p, goal, projections + ( proof.auxFormula ->
EqualityLeftRule( WeakeningLeftRule( projections( proof.mainFormula ), proof.equation ), proof.equation, proof.mainFormula, cx ) ) )
case proof @ AndLeftRule( p, _, _ ) =>
val q = apply( p, goal, projections )
if ( q.conclusion.antecedent.contains( proof.leftConjunct ) || q.conclusion.antecedent.contains( proof.rightConjunct ) )
AndLeftMacroRule( q, proof.leftConjunct, proof.rightConjunct )
else q
case proof @ OrLeftRule( p1, _, p2, _ ) =>
val q1 = apply( p1, goal, projections )
if ( !q1.conclusion.antecedent.contains( proof.leftDisjunct ) ) q1 else {
val q2 = apply( p2, goal, projections )
if ( !q2.conclusion.antecedent.contains( proof.rightDisjunct ) ) q2 else
OrLeftRule( q1, proof.leftDisjunct, q2, proof.rightDisjunct )
}
case proof @ ImpLeftRule( p1, _, p2, _ ) =>
val q2 = apply( p2, goal, projections )
if ( !q2.conclusion.antecedent.contains( proof.impConclusion ) ) q2
else withAddGoal( p1, proof.impPremise, ImpLeftRule( LogicalAxiom( proof.impPremise ), proof.impPremise, q2, proof.impConclusion ) )
case proof @ NegLeftRule( p, _ ) =>
withAddGoal( p, proof.auxFormula, NegLeftRule( LogicalAxiom( proof.auxFormula ), proof.auxFormula ) )
case proof @ AndRightRule( p1, _, p2, _ ) =>
val q2 = apply( p2, goal, rightChain( proof.rightConjunct ->
AndRightRule( LogicalAxiom( proof.leftConjunct ), LogicalAxiom( proof.rightConjunct ), proof.mainFormula ) ) )
withAddGoal( p1, proof.leftConjunct, q2 )
case proof @ OrRightRule( p1, _, _ ) =>
apply( p1, goal, rightChain(
proof.leftDisjunct ->
OrRightMacroRule( LogicalAxiom( proof.leftDisjunct ), proof.leftDisjunct, proof.rightDisjunct ),
proof.rightDisjunct ->
OrRightMacroRule( LogicalAxiom( proof.rightDisjunct ), proof.leftDisjunct, proof.rightDisjunct ) ) )
case proof @ ExistsRightRule( p, _, _, _, _ ) =>
apply( p, goal, rightChain( proof.auxFormula ->
ExistsRightRule( LogicalAxiom( proof.auxFormula ), proof.mainFormula, proof.term ) ) )
case proof @ ExistsLeftRule( p, _, _, _ ) =>
val q = apply( p, goal, projections )
if ( !q.conclusion.antecedent.contains( proof.auxFormula ) ) q else
ExistsLeftRule( q, proof.mainFormula, proof.eigenVariable )
case proof @ ForallLeftRule( p, _, _, _, _ ) =>
val q = apply( p, goal, projections )
if ( !q.conclusion.antecedent.contains( proof.auxFormula ) ) q else
ForallLeftRule( q, proof.mainFormula, proof.term )
case proof @ NegRightRule( p, _ ) =>
require( p.conclusion.succedent.isEmpty )
val q = CutRule( apply( p, Bottom(), Map() ), BottomAxiom, Bottom() )
CutRule(
if ( !q.conclusion.antecedent.contains( proof.auxFormula ) ) q else NegRightRule( q, proof.auxFormula ),
projections( proof.mainFormula ), proof.mainFormula )
case proof @ ImpRightRule( p, _, _ ) =>
require( p.conclusion.succedent.size == 1 )
val q = apply( p, proof.impConclusion, Map( proof.impConclusion -> LogicalAxiom( proof.impConclusion ) ) )
CutRule( ImpRightMacroRule( q, proof.impPremise, proof.impConclusion ), projections( proof.mainFormula ), proof.mainFormula )
case proof @ ForallRightRule( p, _, _, _ ) =>
require( p.conclusion.succedent.size == 1 )
val q = apply( p, proof.auxFormula, Map( proof.auxFormula -> LogicalAxiom( proof.auxFormula ) ) )
CutRule( ForallRightRule( q, proof.mainFormula, proof.eigenVariable ), projections( proof.mainFormula ), proof.mainFormula )
} )
}
}
|
gapt/gapt
|
core/src/main/scala/gapt/proofs/lk/transformations/MG3iToLJ.scala
|
Scala
|
gpl-3.0
| 8,760
|
// scalac: -unchecked -Xfatal-warnings
//
// constructors used to drop outer fields when they were not accessed
// however, how can you know (respecting separate compilation) that they're not accessed!?
class Outer { final class Inner }
// the matches below require Inner's outer pointer
// until scala/bug#4440 is fixed properly, we can't make this a run test
// in principle, the output should be "a\\nb", but without outer checks it's "b\\na"
object Test extends App {
val a = new Outer
val b = new Outer
(new a.Inner: Any) match {
case _: b.Inner => println("b")
case _: a.Inner => println("a") // this is the case we want
}
(new b.Inner: Any) match {
case _: a.Inner => println("a")
case _: b.Inner => println("b") // this is the case we want
}
}
|
martijnhoekstra/scala
|
test/files/neg/t4440.scala
|
Scala
|
apache-2.0
| 779
|
package org.eso.ias.asce.transfer
import java.util.Optional
import org.eso.ias.asce.ComputingElement
import org.eso.ias.logging.IASLogger
import org.eso.ias.types.{IASTypes, Identifier, InOut}
import scala.util.Try
/**
* <code>ScalaTransfer</code> calls the scala
* transfer function provided by the user.
* user.
*
* Note that the Validity of the output is not set by the transfer function
* but automatically implemented by the ASCE
*/
trait ScalaTransfer[T] extends ComputingElement[T] {
/**
* The programming language of this TF
*/
val tfLanguage = TransferFunctionLanguage.scala
/** The logger */
private val logger = IASLogger.getLogger(this.getClass)
/**
* Runs the scala transfer function
*
* @param inputs The actual inputs
* @param id The identifier
* @param actualOutput the actual output
* @return
*/
def transfer(
inputs: Map[String, InOut[_]],
id: Identifier,
actualOutput: InOut[T]): Try[InOut[T]] = {
val ins: Map[String, IasIO[_]] = inputs.mapValues( inout => new IasIO(inout))
val out: IasIO[T] = new IasIO(actualOutput)
Try(tfSetting.transferExecutor.get.asInstanceOf[ScalaTransferExecutor[T]].eval(ins,out).inOut)
}
/**
* Initialize the scala transfer function
*
* @param inputsInfo The IDs and types of the inputs
* @param outputInfo The Id and type of thr output
* @param instance the instance
* @return
*/
def initTransferFunction(
inputsInfo: Set[IasioInfo],
outputInfo: IasioInfo,
instance: Option[Int]): Try[Unit] = {
require(Option(inputsInfo).isDefined && inputsInfo.nonEmpty,"Invalid empty set of IDs of inputs")
require(Option(outputInfo).isDefined,"Invalid empty set ID of output")
require(Option(instance).isDefined,"Unknown if it is tempated or not")
logger.debug("Initializing the transfer function wint inputs {} and output {}",
inputsInfo.map(_.iasioId).mkString(","),outputInfo.iasioId)
instance.foreach(i => logger.debug("This TF is templated with index {}",i))
if(instance.isDefined) {
tfSetting.transferExecutor.get.setTemplateInstance(Optional.of(Int.box(instance.get)))
} else {
tfSetting.transferExecutor.get.setTemplateInstance(Optional.empty());
}
Try(tfSetting.transferExecutor.get.asInstanceOf[ScalaTransferExecutor[T]].
initialize(inputsInfo, outputInfo))
}
}
|
IntegratedAlarmSystem-Group/ias
|
CompElement/src/main/scala/org/eso/ias/asce/transfer/ScalaTransfer.scala
|
Scala
|
lgpl-3.0
| 2,508
|
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model
import model.CivilServantAndInternshipType.CivilServantAndInternshipType
import play.api.libs.json.Json
import reactivemongo.bson.Macros
case class CivilServiceExperienceDetails(
applicable: Boolean,
civilServantAndInternshipTypes: Option[Seq[CivilServantAndInternshipType]] = None,
edipYear: Option[String] = None,
sdipYear: Option[String] = None,
otherInternshipName: Option[String] = None,
otherInternshipYear: Option[String] = None,
fastPassReceived: Option[Boolean] = None,
fastPassAccepted: Option[Boolean] = None,
certificateNumber: Option[String] = None
) {
override def toString = {
s"applicable=$applicable," +
s"civilServantAndInternshipTypes=$civilServantAndInternshipTypes," +
s"edipYear=$edipYear," +
s"sdipYear=$sdipYear," +
s"otherInternshipName=$otherInternshipName," +
s"otherInternshipYear=$otherInternshipYear," +
s"fastPassReceived=$fastPassReceived," +
s"fastPassAccepted=$fastPassAccepted," +
s"certificateNumber=$certificateNumber"
}
}
object CivilServiceExperienceDetails {
implicit val civilServiceExperienceDetailsFormat = Json.format[CivilServiceExperienceDetails]
implicit val civilServiceExperienceDetailsHandler = Macros.handler[CivilServiceExperienceDetails]
}
|
hmrc/fset-faststream
|
app/model/CivilServiceExperienceDetails.scala
|
Scala
|
apache-2.0
| 1,893
|
package fivelangs.lambda
import fivelangs._
object ExtensibleLambda {
// In this file, I try to solve the Expression Problem.
// I don't expect it to work.
import ExtensibleLambdaParsing._
import molt.syntax.cfg.parsable._
import molt.syntax.cfg._
case class GlobalExpSpec(
expSpecMakers: List[(GlobalExpSpec => ExpSpec)]) {
lazy val expSpecs: Set[ExpSpec] = {
val expSpecQueue = collection.mutable.Set.empty[ExpSpec]
expSpecQueue ++= expSpecMakers.map(_.apply(this))
val allExpSpecs = collection.mutable.Set.empty[ExpSpec]
while(!expSpecQueue.isEmpty) {
val next = expSpecQueue.head
expSpecQueue -= next
if(!allExpSpecs(next)) {
allExpSpecs += next
expSpecQueue ++= next.dependencies
}
}
allExpSpecs.toSet
}
// this crazy thing is to work around the fact that
// constructors can't have dependent method types
sealed trait Exp {
val expSpec: ExpSpec
val exp: expSpec.E
override def toString: String = expSpec.toStringExp(exp)
}
def makeExp(thisExpSpec: ExpSpec)(thisExp: thisExpSpec.E) = new Exp {
val expSpec = thisExpSpec
// compiler couldn't tell that expSpec == thisExpSpec
// hopefully this will run ok
val exp = thisExp.asInstanceOf[expSpec.E]
}
sealed trait Type {
val expSpec: ExpSpec
val typ: expSpec.T
override def toString: String = expSpec.toStringType(typ)
}
def makeType(thisExpSpec: ExpSpec)(thisType: thisExpSpec.T) = new Type {
val expSpec = thisExpSpec
val typ = thisType.asInstanceOf[expSpec.T]
}
type TypeCheck = Either[TypeError, Type]
type Environment = Map[String, Type]
def isValue(e: Exp): Boolean = e.expSpec.isValue(e.exp)
def freeVars(e: Exp): Set[String] = e.expSpec.freeVars(e.exp)
def isClosed(t: Exp): Boolean = freeVars(t).isEmpty
// the unfortunate casts WILL be accurate because of
// the appropriate things being equal
def substitute(sub: Exp, name: String, target: Exp): Exp = {
target.expSpec.substitute(
sub.asInstanceOf[target.expSpec.g.Exp],
name,
target.exp).asInstanceOf[Exp]
}
def typeof(t: Exp): TypeCheck = typeWithEnv(Map.empty[String, Type], t)
def typeWithEnv(env: Environment, t: Exp): TypeCheck =
t.expSpec.typeWithEnv(
env.asInstanceOf[t.expSpec.g.Environment],
t.exp
).asInstanceOf[TypeCheck]
def step(t: Exp): Exp =
if(isValue(t)) t
else t.expSpec.step(t.exp).asInstanceOf[Exp]
def smallStepEval(t: Exp): Either[TypeError, Exp] = for {
_ <- typeof(t).right
} yield {
var term = t
var nextTerm = step(t)
while(term != nextTerm) {
term = nextTerm
nextTerm = step(nextTerm)
}
term
}
val expParser: CFGParsable[Exp] = makeExpParser(this)
val typeParser: CFGParsable[Type] = makeTypeParser(this)
}
abstract class ExpSpec(val g: GlobalExpSpec) {
val dependencies: List[ExpSpec] = Nil
type E // the type of expressions
type T // the type of the new types (if any; otherwise g.Type)
// convenience method
final def makeExp(e: E): g.Exp = g.makeExp(this)(e)
final def makeType(t: T): g.Type = g.makeType(this)(t)
def isValue(e: E): Boolean
def freeVars(e: E): Set[String]
def substitute(sub: g.Exp, name: String, target: E): g.Exp
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck
def step(t: E): g.Exp
def toStringExp(e: E): String
def toStringType(t: T): String
val expParser: CFGParsable[E]
val typeParser: Option[CFGParsable[T]]
}
case class VarSpec(override val g: GlobalExpSpec) extends ExpSpec(g) {
case class Var(x: String)
type E = Var
type T = g.Type
def isValue(e: E): Boolean = false
def freeVars(e: E): Set[String] = e match {
case Var(name) => Set(name)
}
def substitute(sub: g.Exp, name: String, target: E): g.Exp = target match {
case Var(y) if y == name => sub
case v@Var(_) => g.makeExp(this)(v)
}
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck = t match {
case Var(x) => env.get(x) match {
case Some(typ) => Right(typ)
case None => Left(TypeError(s"free variable $x"))
}
}
def step(t: E): g.Exp = ???
override def toStringExp(e: E): String = e match { case Var(x) => x }
def toStringType(t: T): String = t.toString
val expParser: CFGParsable[E] = makeVarExpParser(this)
val typeParser: Option[CFGParsable[T]] = None
}
case class FuncSpec(override val g: GlobalExpSpec) extends ExpSpec(g) {
private[this] val innerVarSpec = VarSpec(g)
override val dependencies = List(innerVarSpec)
sealed trait FuncTerm
case class Lam(param: String, paramType: g.Type, body: g.Exp) extends FuncTerm
case class App(t1: g.Exp, t2: g.Exp) extends FuncTerm
case class TFunc(a: g.Type, b: g.Type)
type E = FuncTerm
type T = TFunc
def isValue(e: E): Boolean = e match {
case Lam(_, _, _) => true
case App(_, _) => false
}
def freeVars(e: E): Set[String] = e match {
case Lam(param, typ, body) => g.freeVars(body) - param
case App(t1, t2) => g.freeVars(t1) ++ g.freeVars(t2)
}
private[this] def alpha(prohib: Set[String], lam: Lam): Lam = lam match {
case Lam(param, typ, t) if(prohib(param)) =>
val newVar = freshVar(prohib)
Lam(newVar, typ,
g.substitute(g.makeExp(innerVarSpec)(innerVarSpec.Var(newVar)), param, t))
case x => x
}
def substitute(sub: g.Exp, name: String, target: E): g.Exp = {
def doSub(x: g.Exp) = g.substitute(sub, name, x)
target match {
case l@Lam(_, _, _) =>
val Lam(newP, typ, newT) = alpha(g.freeVars(sub) ++ freeVars(l), l)
makeExp(Lam(newP, typ, doSub(newT)))
case App(t1, t2) => g.makeExp(this)(App(doSub(t1), doSub(t2)))
}
}
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck = t match {
case Lam(p, typ, body) => for {
returnType <- g.typeWithEnv(env + (p -> typ), body).right
} yield makeType(TFunc(typ, returnType))
case App(t1, t2) => for {
apperType <- g.typeWithEnv(env, t1).right
appeeType <- g.typeWithEnv(env, t2).right
resultType <- (apperType.typ match {
case TFunc(ante, consq) if ante.typ == appeeType.typ => Right(consq)
case x => Left(TypeError(s"tried to apply type $x to $appeeType"))
}).right
} yield resultType
}
def step(t: E): g.Exp = t match {
case App(t1, t2) if !g.isValue(t1) => makeExp(App(g.step(t1), t2))
case App(v1, t2) if !g.isValue(t2) => makeExp(App(v1, g.step(t2)))
case App(v1, v2) => v1.exp match {
case Lam(p, _, t) => g.substitute(v2, p, t)
}
}
override def toStringExp(e: E): String = e match {
case Lam(p, typ, body) => s"(\\\\$p: $typ. $body)"
case App(t1, t2) => s"($t1 $t2)"
}
def toStringType(t: T): String = t match {
case TFunc(t1, t2) => s"$t1 -> t2"
}
val expParser: CFGParsable[E] = makeFuncExpParser(this)
val typeParser: Some[CFGParsable[T]] = Some(makeFuncTypeParser(this))
}
case class BoolSpec(override val g: GlobalExpSpec) extends ExpSpec(g) {
sealed trait BoolExp
case class BoolLiteral(b: Boolean) extends BoolExp
case class And(a: g.Exp, b: g.Exp) extends BoolExp
case class Or(a: g.Exp, b: g.Exp) extends BoolExp
case class Not(t: g.Exp) extends BoolExp
object TBool
type E = BoolExp
type T = TBool.type
def isValue(e: E): Boolean = e match {
case BoolLiteral(_) => true
case _ => false
}
def freeVars(e: E): Set[String] = e match {
case BoolLiteral(_) => Set.empty[String]
case And(a, b) => g.freeVars(a) ++ g.freeVars(b)
case Or(a, b) => g.freeVars(a) ++ g.freeVars(b)
case Not(a) => g.freeVars(a)
}
def substitute(sub: g.Exp, name: String, target: E): g.Exp = {
def doSub(x: g.Exp) = g.substitute(sub, name, x)
target match {
case b@BoolLiteral(_) => makeExp(b)
case And(a, b) => makeExp(And(doSub(a), doSub(b)))
case Or(a, b) => makeExp(Or(doSub(a), doSub(b)))
case Not(a) => makeExp(Not(doSub(a)))
}
}
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck = t match {
case b@BoolLiteral(_) => Right(makeType(TBool))
case And(l, r) => for {
leftType <- g.typeWithEnv(env, l).right
rightType <- g.typeWithEnv(env, r).right
result <- (if(leftType.typ == TBool && rightType.typ == TBool) Right(makeType(TBool))
else Left(TypeError(s"tried to && terms of type $leftType and $rightType"))).right
} yield result
case Or(l, r) => for {
leftType <- g.typeWithEnv(env, l).right
rightType <- g.typeWithEnv(env, r).right
result <- (if(leftType.typ == TBool && rightType.typ == TBool) Right(makeType(TBool))
else Left(TypeError(s"tried to || terms of type $leftType and $rightType"))).right
} yield result
case Not(t) => for {
innerType <- g.typeWithEnv(env, t).right
result <- (if(innerType.typ == TBool) Right(makeType(TBool))
else Left(TypeError(s"tried to ! term of type $innerType"))).right
} yield result
}
def step(t: E): g.Exp = makeExp(t match {
case And(t1, t2) if !g.isValue(t1) => And(g.step(t1), t2)
case And(v1, t2) if !g.isValue(t2) => And(v1, g.step(t2))
case And(v1, v2) => (v1.exp, v2.exp) match {
case (BoolLiteral(b1), BoolLiteral(b2)) => BoolLiteral(b1 && b2)
}
case Or(t1, t2) if !g.isValue(t1) => Or(g.step(t1), t2)
case Or(v1, t2) if !g.isValue(t2) => Or(v1, g.step(t2))
case Or(v1, v2) => (v1.exp, v2.exp) match {
case (BoolLiteral(b1), BoolLiteral(b2)) => BoolLiteral(b1 || b2)
}
case Not(t) if !g.isValue(t) => Not(g.step(t))
case Not(v) => v.exp match {
case BoolLiteral(b) => BoolLiteral(!b)
}
})
def toStringExp(e: E): String = e match {
case BoolLiteral(false) => "False"
case BoolLiteral(true) => "True"
case And(a, b) => s"$a && $b"
case Or(a, b) => s"$a || $b"
case Not(t) => s"!$t"
}
def toStringType(t: T): String = "Bool"
val expParser: CFGParsable[E] = makeBoolExpParser(this)
val typeParser: Option[CFGParsable[T]] = Some(makeBoolTypeParser(this))
}
case class CondSpec(override val g: GlobalExpSpec) extends ExpSpec(g) {
private[this] val innerBoolSpec = BoolSpec(g)
override val dependencies = List(innerBoolSpec)
case class Cond(cond: g.Exp, body: g.Exp, otherwise: g.Exp)
type E = Cond
type T = g.Type
def isValue(e: E): Boolean = false
def freeVars(e: E): Set[String] = e match {
case Cond(c, b, ow) => g.freeVars(c) ++ g.freeVars(b) ++ g.freeVars(ow)
}
def substitute(sub: g.Exp, name: String, target: E): g.Exp = {
def doSub(x: g.Exp) = g.substitute(sub, name, x)
target match { case Cond(c, b, ow) =>
makeExp(Cond(doSub(c), doSub(b), doSub(ow)))
}
}
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck = t match {
case Cond(cond, body, ow) => for {
condType <- g.typeWithEnv(env, cond).right
_ <- (if(condType.typ == innerBoolSpec.TBool) Right(condType)
else Left(TypeError(s"condition $cond not of type Bool"))).right
bodyType <- g.typeWithEnv(env, body).right
elseType <- g.typeWithEnv(env, ow).right
resultType <- (if(bodyType == elseType) Right(bodyType)
else Left(TypeError(s"mismatched types in if-else: $bodyType and $elseType"))).right
} yield resultType
}
def step(t: E): g.Exp = t match {
case Cond(cond, body, ow) if !g.isValue(cond) =>
makeExp(Cond(g.step(cond), body, ow))
case Cond(v, body, ow) => v.exp match {
case innerBoolSpec.BoolLiteral(true) => body
case innerBoolSpec.BoolLiteral(false) => ow
}
}
def toStringExp(e: E): String = e match {
case Cond(c, b, ow) => s"if $c then $b else $ow"
}
def toStringType(t: T): String = t.toString
val expParser: CFGParsable[E] = makeCondExpParser(this)
val typeParser: Option[CFGParsable[T]] = None
}
case class UnitSpec(override val g: GlobalExpSpec) extends ExpSpec(g) {
case object Unit
case object TUnit
type E = Unit.type
type T = TUnit.type
def isValue(e: E): Boolean = true
def freeVars(e: E): Set[String] = Set.empty[String]
def substitute(sub: g.Exp, name: String, target: E): g.Exp = makeExp(Unit)
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck = Right(makeType(TUnit))
def step(t: E): g.Exp = ???
def toStringExp(e: E): String = "()"
def toStringType(t: T): String = "Unit"
val expParser: CFGParsable[E] = makeUnitExpParser(this)
val typeParser: Some[CFGParsable[T]] = Some(makeUnitTypeParser(this))
}
case class ProdSpec(override val g: GlobalExpSpec) extends ExpSpec(g) {
sealed trait ProdExp
case class Pair(t1: g.Exp, t2: g.Exp) extends ProdExp
case class Pi1(t: g.Exp) extends ProdExp
case class Pi2(t: g.Exp) extends ProdExp
case class TProd(l: g.Type, r: g.Type)
type E = ProdExp
type T = TProd
def isValue(e: E): Boolean = e match {
case Pair(v1, v2) => g.isValue(v1) && g.isValue(v2)
case _ => false
}
def freeVars(e: E): Set[String] = e match {
case Pair(t1, t2) => g.freeVars(t1) ++ g.freeVars(t2)
case Pi1(t) => g.freeVars(t)
case Pi2(t) => g.freeVars(t)
}
def substitute(sub: g.Exp, name: String, target: E): g.Exp = {
def doSub(x: g.Exp) = g.substitute(sub, name, x)
target match {
case Pair(t1, t2) => makeExp(Pair(doSub(t1), doSub(t2)))
case Pi1(t) => makeExp(Pi1(doSub(t)))
case Pi2(t) => makeExp(Pi2(doSub(t)))
}
}
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck = t match {
case Pair(t1, t2) => for {
leftType <- g.typeWithEnv(env, t1).right
rightType <- g.typeWithEnv(env, t2).right
} yield makeType(TProd(leftType, rightType))
case Pi1(t) => for {
innerType <- g.typeWithEnv(env, t).right
resultType <- (innerType.typ match {
case TProd(l, r) => Right(l)
case x => Left(TypeError(s"projection π1 operating on type $x"))
}).right
} yield resultType
case Pi2(t) => for {
innerType <- g.typeWithEnv(env, t).right
resultType <- (innerType.typ match {
case TProd(l, r) => Right(r)
case x => Left(TypeError(s"projection π2 operating on type $x"))
}).right
} yield resultType
}
def step(t: E): g.Exp = t match {
case Pair(t1, t2) if !g.isValue(t1) => makeExp(Pair(g.step(t1), t2))
case Pair(v1, t2) if !g.isValue(t2) => makeExp(Pair(v1, g.step(t2)))
case Pi1(t) if !g.isValue(t) => makeExp(Pi1(g.step(t)))
case Pi1(v) => v.exp match { case Pair(v1, v2) => v1 }
case Pi2(t) if !g.isValue(t) => makeExp(Pi2(g.step(t)))
case Pi2(v) => v.exp match { case Pair(v1, v2) => v2 }
}
def toStringExp(e: E): String = e match {
case Pair(t1, t2) => s"($t1, $t2)"
case Pi1(t) => s"π1 $t"
case Pi2(t) => s"π2 $t"
}
def toStringType(t: T): String = t match {
case TProd(t1, t2) => s"$t1 x t2"
}
val expParser: CFGParsable[E] = makeProdExpParser(this)
val typeParser: Option[CFGParsable[T]] = Some(makeProdTypeParser(this))
}
case class CoprodSpec(override val g: GlobalExpSpec) extends ExpSpec(g) {
private[this] val innerVarSpec = VarSpec(g)
override val dependencies = List(innerVarSpec)
sealed trait CoprodExp
case class Inl(t: g.Exp, rType: g.Type) extends CoprodExp
case class Inr(t: g.Exp, lType: g.Type) extends CoprodExp
case class Case(t: g.Exp, lName: String, lBody: g.Exp,
rName: String, rBody: g.Exp) extends CoprodExp
case class TCoprod(t1: g.Type, t2: g.Type)
type E = CoprodExp
type T = TCoprod
def isValue(e: E): Boolean = e match {
case Inl(v, _) => g.isValue(v)
case Inr(v, _) => g.isValue(v)
case _ => false
}
def freeVars(e: E): Set[String] = e match {
case Inl(t, _) => g.freeVars(t)
case Inr(t, _) => g.freeVars(t)
case Case(t, lName, lBody, rName, rBody) =>
g.freeVars(t) ++ (g.freeVars(lBody) - lName) ++ (g.freeVars(rBody) - rName)
}
private[this] def alpha(prohib: Set[String], cas: Case): Case = cas match {
case Case(t, lName, lBody, rName, rBody) =>
val newName = freshVar(prohib)
val (newLName, newLBody) =
if(prohib(lName)) {
(newName,
g.substitute(
innerVarSpec.makeExp(innerVarSpec.Var(newName)).asInstanceOf[g.Exp],
lName,
lBody))
} else {
(lName, lBody)
}
val (newRName, newRBody) =
if(prohib(rName)) {
(newName,
g.substitute(
innerVarSpec.makeExp(innerVarSpec.Var(newName)).asInstanceOf[g.Exp],
rName,
rBody))
} else {
(rName, rBody)
}
Case(t, newLName, newLBody, newRName, newRBody)
}
def substitute(sub: g.Exp, name: String, target: E): g.Exp = {
def doSub(x: g.Exp) = g.substitute(sub, name, x)
target match {
case Inl(t, rType) => makeExp(Inl(doSub(t), rType))
case Inr(t, lType) => makeExp(Inr(doSub(t), lType))
case c@Case(_, _, _, _, _) =>
val Case(t, lName, lBody, rName, rBody) =
alpha(g.freeVars(sub) ++ freeVars(c), c)
makeExp(Case(t, lName, doSub(lBody), rName, doSub(rBody)))
}
}
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck = t match {
case Inl(t, rType) => for {
lType <- g.typeWithEnv(env, t).right
} yield makeType(TCoprod(lType, rType))
case Inr(t, lType) => for {
rType <- g.typeWithEnv(env, t).right
} yield makeType(TCoprod(lType, rType))
case Case(t, lName, lBody, rName, rBody) => for {
termType <- g.typeWithEnv(env, t).right
typePair <- (termType.typ match {
case TCoprod(lType, rType) => Right((lType, rType))
case x => Left(TypeError(s"cannot take case of type $x"))
}).right
lBodyType <- g.typeWithEnv(env + (lName -> typePair._1), lBody).right
rBodyType <- g.typeWithEnv(env + (rName -> typePair._2), rBody).right
_ <- (if(lBodyType == rBodyType) Right(lBodyType)
else Left(TypeError(
s"cases (${lBodyType.typ} and ${rBodyType.typ}) do not match"))).right
} yield lBodyType
}
def step(e: E): g.Exp = e match {
case Inl(t, rType) if !g.isValue(t) => makeExp(Inl(g.step(t), rType))
case Inr(t, lType) if !g.isValue(t) => makeExp(Inr(g.step(t), lType))
case Case(t, lName, lBody, rName, rBody) if !g.isValue(t) =>
makeExp(Case(g.step(t), lName, lBody, rName, rBody))
case Case(v, lName, lBody, rName, rBody) => v.exp match {
case Inl(t, _) => g.substitute(t, lName, lBody)
case Inr(t, _) => g.substitute(t, rName, rBody)
}
}
def toStringExp(e: E): String = e match {
case Inl(t, rType) => s"inl $t: (_ + $rType)"
case Inr(t, lType) => s"inr $t: ($lType + _)"
case Case(t, lName, lBody, rName, rBody) =>
s"case $t of (inl $lName => $lBody) (inr $rName => $rBody)"
}
def toStringType(t: T): String = t match {
case TCoprod(t1, t2) => s"$t1 + t2"
}
val expParser: CFGParsable[E] = makeCoprodExpParser(this)
val typeParser: Option[CFGParsable[T]] = Some(makeCoprodTypeParser(this))
}
case class IntSpec(override val g: GlobalExpSpec) extends ExpSpec(g) {
sealed trait IntExp
case class IntLiteral(n: Int) extends IntExp
case class Plus(a: g.Exp, b: g.Exp) extends IntExp
case class Minus(a: g.Exp, b: g.Exp) extends IntExp
case class Times(a: g.Exp, b: g.Exp) extends IntExp
case class Div(a: g.Exp, b: g.Exp) extends IntExp
case object TInt
type E = IntExp
type T = TInt.type
def isValue(e: E): Boolean = e match {
case IntLiteral(_) => true
case _ => false
}
def freeVars(e: E): Set[String] = e match {
case Plus(a, b) => g.freeVars(a) ++ g.freeVars(b)
case Minus(a, b) => g.freeVars(a) ++ g.freeVars(b)
case Times(a, b) => g.freeVars(a) ++ g.freeVars(b)
case Div(a, b) => g.freeVars(a) ++ g.freeVars(b)
case _ => Set.empty[String]
}
def substitute(sub: g.Exp, name: String, target: E): g.Exp = {
def doSub(x: g.Exp) = g.substitute(sub, name, x)
target match {
case Plus(a, b) => makeExp(Plus(doSub(a), doSub(b)))
case Minus(a, b) => makeExp(Minus(doSub(a), doSub(b)))
case Times(a, b) => makeExp(Times(doSub(a), doSub(b)))
case Div(a, b) => makeExp(Div(doSub(a), doSub(b)))
case i@IntLiteral(_) => makeExp(i)
}
}
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck = {
def opType(a: g.Exp, b: g.Exp, s: String): g.TypeCheck = for {
aType <- g.typeWithEnv(env, a).right
bType <- g.typeWithEnv(env, b).right
result <- (if(this == aType.expSpec && this == bType.expSpec) {
Right(makeType(TInt))
} else {
Left(TypeError(s"cannot $s ${aType.typ} and ${bType.typ}"))
}).right
} yield result
t match {
case IntLiteral(_) => Right(makeType(TInt))
case Plus(a, b) => opType(a, b, "add")
case Minus(a, b) => opType(a, b, "subtract")
case Times(a, b) => opType(a, b, "multiply")
case Div(a, b) => opType(a, b, "divide")
}
}
def step(t: E): g.Exp = t match {
case Plus(t1, t2) if !g.isValue(t1) => makeExp(Plus(g.step(t1), t2))
case Plus(v1, t2) if !g.isValue(t2) => makeExp(Plus(v1, g.step(t2)))
case Plus(v1, v2) => (v1.exp, v2.exp) match {
case (IntLiteral(i), IntLiteral(j)) => makeExp(IntLiteral(i + j))
}
case Minus(t1, t2) if !g.isValue(t1) => makeExp(Minus(g.step(t1), t2))
case Minus(v1, t2) if !g.isValue(t2) => makeExp(Minus(v1, g.step(t2)))
case Minus(v1, v2) => (v1.exp, v2.exp) match {
case (IntLiteral(i), IntLiteral(j)) => makeExp(IntLiteral(i - j))
}
case Times(t1, t2) if !g.isValue(t1) => makeExp(Times(g.step(t1), t2))
case Times(v1, t2) if !g.isValue(t2) => makeExp(Times(v1, g.step(t2)))
case Times(v1, v2) => (v1.exp, v2.exp) match {
case (IntLiteral(i), IntLiteral(j)) => makeExp(IntLiteral(i * j))
}
case Div(t1, t2) if !g.isValue(t1) => makeExp(Div(g.step(t1), t2))
case Div(v1, t2) if !g.isValue(t2) => makeExp(Div(v1, g.step(t2)))
case Div(v1, v2) => (v1.exp, v2.exp) match {
case (IntLiteral(i), IntLiteral(j)) => makeExp(IntLiteral(i / j))
}
}
def toStringExp(e: E): String = e match {
case IntLiteral(i) => s"$i"
case Plus(a, b) => s"$a + $b"
case Minus(a, b) => s"$a - $b"
case Times(a, b) => s"$a * $b"
case Div(a, b) => s"$a / $b"
}
def toStringType(t: T): String = "Int"
val expParser: CFGParsable[E] = makeIntExpParser(this)
val typeParser: Option[CFGParsable[T]] = Some(makeIntTypeParser(this))
}
case class FixSpec(override val g: GlobalExpSpec) extends ExpSpec(g) {
val innerFuncSpec = FuncSpec(g)
override val dependencies: List[ExpSpec] = List(innerFuncSpec)
case class Fix(e: g.Exp)
type E = Fix
type T = g.Exp
def isValue(e: E): Boolean = false
def freeVars(e: E): Set[String] = e match {
case Fix(t) => g.freeVars(t)
}
def substitute(sub: g.Exp, name: String, target: E): g.Exp = target match {
case Fix(t) => makeExp(Fix(g.substitute(sub, name, t)))
}
def typeWithEnv(env: g.Environment, t: E): g.TypeCheck = t match {
case Fix(e) => for {
typ <- g.typeWithEnv(env, e).right
innerType <- (typ.typ match {
case innerFuncSpec.TFunc(t1, t2) if t1 == t2 => Right(t1)
case x => Left(TypeError(
"can only get fixed point of functions T1 -> T1; found type $x"))
}).right
} yield innerType.asInstanceOf[g.Type]
}
def step(t: E): g.Exp = t match {
case Fix(t1) if !g.isValue(t1) => makeExp(Fix(g.step(t1)))
case Fix(v1) => v1.exp match {
case innerFuncSpec.Lam(x, _, body) =>
g.substitute(makeExp(t), x, body.asInstanceOf[g.Exp])
}
}
def toStringExp(e: E): String = s"Fix $e"
def toStringType(t: T): String = t.toString
val expParser: CFGParsable[E] = ???
val typeParser: Option[CFGParsable[T]] = ???
}
}
|
julianmichael/5langs
|
src/main/scala/fivelangs/lambda/ExtensibleLambda.scala
|
Scala
|
mit
| 25,084
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.eagle.datastream.storm
import java.util
import org.apache.eagle.datastream.Collector
import org.apache.eagle.datastream.StormStreamExecutor
import org.apache.eagle.datastream.StormStreamExecutor3
import org.apache.eagle.datastream.Tuple2
import org.apache.eagle.datastream.Tuple3
import com.typesafe.config.Config
case class StormExecutorForAlertWrapper(delegate: StormStreamExecutor[Tuple2[String, util.SortedMap[AnyRef, AnyRef]]], streamName: String)
extends StormStreamExecutor3[String, String, util.SortedMap[Object, Object]]{
override def prepareConfig(config: Config): Unit = {
delegate.prepareConfig(config)
}
override def init: Unit = {
delegate.init
}
override def flatMap(input: Seq[AnyRef], collector: Collector[Tuple3[String, String, util.SortedMap[Object, Object]]]): Unit = {
delegate.flatMap(input, new Collector[Tuple2[String, util.SortedMap[AnyRef, AnyRef]]] {
override def collect(r: Tuple2[String, util.SortedMap[AnyRef, AnyRef]]): Unit = {
collector.collect(Tuple3(r.f0, streamName, r.f1))
}
})
}
}
|
qinzhaokun/incubator-eagle
|
eagle-core/eagle-data-process/eagle-stream-process-api/src/main/scala/org/apache/eagle/datastream/storm/StormExecutorForAlertWrapper.scala
|
Scala
|
apache-2.0
| 1,892
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.sources
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.control.NonFatal
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.apache.spark.internal.Logging
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, Statistics}
import org.apache.spark.sql.catalyst.plans.logical.statsEstimation.EstimationUtils
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes.{Append, Complete, Update}
import org.apache.spark.sql.execution.streaming.{MemorySinkBase, Sink}
import org.apache.spark.sql.sources.v2.{CustomMetrics, DataSourceOptions, DataSourceV2, StreamingWriteSupportProvider}
import org.apache.spark.sql.sources.v2.writer._
import org.apache.spark.sql.sources.v2.writer.streaming.{StreamingDataWriterFactory, StreamingWriteSupport, SupportsCustomWriterMetrics}
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.StructType
/**
* A sink that stores the results in memory. This [[Sink]] is primarily intended for use in unit
* tests and does not provide durability.
*/
class MemorySinkV2 extends DataSourceV2 with StreamingWriteSupportProvider
with MemorySinkBase with Logging {
override def createStreamingWriteSupport(
queryId: String,
schema: StructType,
mode: OutputMode,
options: DataSourceOptions): StreamingWriteSupport = {
new MemoryStreamingWriteSupport(this, mode, schema)
}
private case class AddedData(batchId: Long, data: Array[Row])
/** An order list of batches that have been written to this [[Sink]]. */
@GuardedBy("this")
private val batches = new ArrayBuffer[AddedData]()
/** Returns all rows that are stored in this [[Sink]]. */
def allData: Seq[Row] = synchronized {
batches.flatMap(_.data)
}
def latestBatchId: Option[Long] = synchronized {
batches.lastOption.map(_.batchId)
}
def latestBatchData: Seq[Row] = synchronized {
batches.lastOption.toSeq.flatten(_.data)
}
def dataSinceBatch(sinceBatchId: Long): Seq[Row] = synchronized {
batches.filter(_.batchId > sinceBatchId).flatMap(_.data)
}
def toDebugString: String = synchronized {
batches.map { case AddedData(batchId, data) =>
val dataStr = try data.mkString(" ") catch {
case NonFatal(e) => "[Error converting to string]"
}
s"$batchId: $dataStr"
}.mkString("\\n")
}
def write(batchId: Long, outputMode: OutputMode, newRows: Array[Row]): Unit = {
val notCommitted = synchronized {
latestBatchId.isEmpty || batchId > latestBatchId.get
}
if (notCommitted) {
logDebug(s"Committing batch $batchId to $this")
outputMode match {
case Append | Update =>
val rows = AddedData(batchId, newRows)
synchronized { batches += rows }
case Complete =>
val rows = AddedData(batchId, newRows)
synchronized {
batches.clear()
batches += rows
}
case _ =>
throw new IllegalArgumentException(
s"Output mode $outputMode is not supported by MemorySinkV2")
}
} else {
logDebug(s"Skipping already committed batch: $batchId")
}
}
def clear(): Unit = synchronized {
batches.clear()
}
def numRows: Int = synchronized {
batches.foldLeft(0)(_ + _.data.length)
}
override def toString(): String = "MemorySinkV2"
}
case class MemoryWriterCommitMessage(partition: Int, data: Seq[Row])
extends WriterCommitMessage {}
class MemoryV2CustomMetrics(sink: MemorySinkV2) extends CustomMetrics {
private implicit val formats = Serialization.formats(NoTypeHints)
override def json(): String = Serialization.write(Map("numRows" -> sink.numRows))
}
class MemoryStreamingWriteSupport(
val sink: MemorySinkV2, outputMode: OutputMode, schema: StructType)
extends StreamingWriteSupport with SupportsCustomWriterMetrics {
private val customMemoryV2Metrics = new MemoryV2CustomMetrics(sink)
override def createStreamingWriterFactory: MemoryWriterFactory = {
MemoryWriterFactory(outputMode, schema)
}
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
val newRows = messages.flatMap {
case message: MemoryWriterCommitMessage => message.data
}
sink.write(epochId, outputMode, newRows)
}
override def abort(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
// Don't accept any of the new input.
}
override def getCustomMetrics: CustomMetrics = customMemoryV2Metrics
}
case class MemoryWriterFactory(outputMode: OutputMode, schema: StructType)
extends DataWriterFactory with StreamingDataWriterFactory {
override def createWriter(
partitionId: Int,
taskId: Long): DataWriter[InternalRow] = {
new MemoryDataWriter(partitionId, outputMode, schema)
}
override def createWriter(
partitionId: Int,
taskId: Long,
epochId: Long): DataWriter[InternalRow] = {
createWriter(partitionId, taskId)
}
}
class MemoryDataWriter(partition: Int, outputMode: OutputMode, schema: StructType)
extends DataWriter[InternalRow] with Logging {
private val data = mutable.Buffer[Row]()
private val encoder = RowEncoder(schema).resolveAndBind()
override def write(row: InternalRow): Unit = {
data.append(encoder.fromRow(row))
}
override def commit(): MemoryWriterCommitMessage = {
val msg = MemoryWriterCommitMessage(partition, data.clone())
data.clear()
msg
}
override def abort(): Unit = {}
}
/**
* Used to query the data that has been written into a [[MemorySinkV2]].
*/
case class MemoryPlanV2(sink: MemorySinkV2, override val output: Seq[Attribute]) extends LeafNode {
private val sizePerRow = EstimationUtils.getSizePerRow(output)
override def computeStats(): Statistics = Statistics(sizePerRow * sink.allData.size)
}
|
sahilTakiar/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/memoryV2.scala
|
Scala
|
apache-2.0
| 6,968
|
import sigmastate.Values.Value
import sigmastate.lang.CheckingSigmaBuilder
package object sigmastate {
import CheckingSigmaBuilder._
/** Represents cost estimation computed by JITC interpreter.
* The JITC costs use 10x more accurate scale comparing to block cost values.
* @see toBlockCost
*/
case class JitCost private[sigmastate] (private[sigmastate] val value: Int) extends AnyVal {
/** Adds two cost values. */
def + (y: JitCost): JitCost =
new JitCost(java7.compat.Math.addExact(value, y.value))
/** Multiplies this cost to the given integer. */
def * (n: Int): JitCost =
new JitCost(java7.compat.Math.multiplyExact(value, n))
/** Divides this cost by the given integer. */
def / (n: Int): JitCost =
new JitCost(value / n)
/** Return true if this value > y.value in the normal Int ordering. */
def > (y: JitCost): Boolean = value > y.value
/** Return true if this value >= y.value in the normal Int ordering. */
def >= (y: JitCost): Boolean = value >= y.value
/** Scales JitCost back to block cost value. This is inverse to JitCost.fromBlockCost. */
def toBlockCost: Int = value / 10
}
object JitCost {
/** Scales the given block cost to the JitCost scale. This is inverse to toBlockCost*/
def fromBlockCost(blockCost: Int): JitCost =
new JitCost(java7.compat.Math.multiplyExact(blockCost, 10))
}
/**
* SInt addition
*/
def Plus[T <: SNumericType](left: Value[T], right: Value[T]): Value[T] =
mkPlus(left, right)
/**
* SInt subtraction
*/
def Minus[T <: SNumericType](left: Value[T], right: Value[T]): Value[T] =
mkMinus(left, right)
/**
* SInt multiplication
*/
def Multiply[T <: SNumericType](left: Value[T], right: Value[T]): Value[T] =
mkMultiply(left, right)
/**
* SInt division
*/
def Divide[T <: SNumericType](left: Value[T], right: Value[T]): Value[T] =
mkDivide(left, right)
/**
* SInt modulo
*/
def Modulo[T <: SNumericType](left: Value[T], right: Value[T]): Value[T] =
mkModulo(left, right)
def Min[T <: SNumericType](left: Value[T], right: Value[T]): Value[T] =
mkMin(left, right)
def Max[T <: SNumericType](left: Value[T], right: Value[T]): Value[T] =
mkMax(left, right)
def PlusModQ(left: Value[SBigInt.type], right: Value[SBigInt.type]): Value[SBigInt.type] =
mkPlusModQ(left, right)
def MinusModQ(left: Value[SBigInt.type], right: Value[SBigInt.type]): Value[SBigInt.type] =
mkMinusModQ(left, right)
}
|
ScorexFoundation/sigmastate-interpreter
|
sigmastate/src/main/scala/sigmastate/sigmastate.scala
|
Scala
|
mit
| 2,547
|
package ohnosequences.stuff.test
import ohnosequences.stuff._
import scala.{Int}
import scala.Predef.String
import org.scalatest.FunSuite
class ScalaCategoryTests extends FunSuite {
val l: String -> Int = { x: String =>
x.length
}
val toStr: Int -> String = { x: Int =>
x.toString
}
// val idInt : Int -> Int = Scala.identity
val uh: (Int -> String) -> (String -> Int) =
Category
.homFunctor(Scala)
.at[Int × String, String × Int](l and l) // no good inference here
// Category.hom(Scala)(l and l)
test("Hom functor") {
assert { uh(toStr)("hola") === 1 }
}
test("composition and identity") {
assert {
Scala.composition(Scala.identity[String] and Scala.identity[String])(
"hola") === "hola"
}
assert {
Scala.composition(Scala.identity[Int] and toStr)(234243) === toStr(234243)
}
}
test("syntax") {
Product(ohnosequences.stuff.tuples) ⊢ {
val z0: String >=> String =
id[String] >=> id[String]
val z1: String >=> String =
id[String]
val u: (String × Int) >=> (Int × String) =
l × toStr
val _2and2: Int × String =
u("ab" and 2)
assert { z0("hola") === "hola" }
assert { z1("hola") === "hola" }
assert { left(_2and2) === 2 }
assert { right(_2and2) === "2" }
}
}
test("Distributive category") {
DistributiveCategory(ScalaDist) ⊢ {
val u0 =
l × (l + toStr)
val u1 =
u0 >=> expand >=> pack
val x: String × (String + Int) =
"hola" and inRight(2)
assert { (u0 at x) == (u1 at x) }
}
}
}
|
ohnosequences/stuff
|
src/test/scala/ScalaCategory.scala
|
Scala
|
agpl-3.0
| 1,650
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.containerpool.logging
import java.time.Instant
import akka.actor.ActorSystem
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.containerpool.Container
import org.apache.openwhisk.core.entity.{ActivationId, ActivationLogs, ExecutableWhiskAction, Identity, WhiskActivation}
import org.apache.openwhisk.core.database.UserContext
import scala.concurrent.Future
/**
* Docker log driver based LogStore impl. Uses docker log driver to emit container logs to an external store.
* Fetching logs from that external store is not provided in this trait. This SPI requires the
* ContainerArgs.extraArgs to be used to indicate where the logs are shipped.
* see https://docs.docker.com/config/containers/logging/configure/#configure-the-logging-driver-for-a-container
*
* Fetching logs here is a NOOP, but extended versions can customize fetching, e.g. from ELK or Splunk etc.
*/
class LogDriverLogStore(actorSystem: ActorSystem) extends LogStore {
/** Indicate --log-driver and --log-opt flags via ContainerArgsConfig.extraArgs */
override def containerParameters = Map.empty
override val logCollectionOutOfBand: Boolean = true
def collectLogs(transid: TransactionId,
user: Identity,
activation: WhiskActivation,
container: Container,
action: ExecutableWhiskAction): Future[ActivationLogs] =
Future.successful(ActivationLogs()) //no logs collected when using docker log drivers (see DockerLogStore for json-file exception)
/** no logs exposed to API/CLI using only the LogDriverLogStore; use an extended version,
* e.g. the SplunkLogStore to expose logs from some external source */
def fetchLogs(namespace: String,
activationId: ActivationId,
start: Option[Instant],
end: Option[Instant],
activationLogs: Option[ActivationLogs],
context: UserContext): Future[ActivationLogs] =
Future.successful(ActivationLogs(Vector("Logs are not available.")))
}
object LogDriverLogStoreProvider extends LogStoreProvider {
override def instance(actorSystem: ActorSystem) = new LogDriverLogStore(actorSystem)
}
|
jeremiaswerner/openwhisk
|
common/scala/src/main/scala/org/apache/openwhisk/core/containerpool/logging/LogDriverLogStore.scala
|
Scala
|
apache-2.0
| 3,044
|
package scala.meta.testkit
import java.util.concurrent.CopyOnWriteArrayList
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.GenIterable
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.meta._
import scala.util.control.NonFatal
object SyntaxAnalysis {
/** Run syntactic analysis on a corpus of [[CorpusFile]].
*
* @param corpus The corpus to run analysis on. Has type GenIterable
* to support both parallel and synchronous collections.
* @param f Callback to analyse a single [[CorpusFile]].
* @tparam T The kind of analysis we want to collect.
* @return The aggregate sum of all analysis results.
*/
def run[T](corpus: GenIterable[CorpusFile])(
f: CorpusFile => List[T]
): mutable.Buffer[(CorpusFile, T)] = Phase.run("syntax analysis") {
val results = new CopyOnWriteArrayList[(CorpusFile, T)]
val counter = new AtomicInteger()
val errors = new AtomicInteger()
def analyze(file: CorpusFile): Unit = {
val n = counter.incrementAndGet()
if (n % 1000 == 0) {
println(n)
}
try {
f(file).foreach(t => results.add(file -> t))
} catch {
// TODO(olafur) investigate these scala.meta errors.
case _: org.scalameta.UnreachableError => // scala.meta error
case _: org.scalameta.invariants.InvariantFailedException => // scala.meta error
case _: java.nio.charset.MalformedInputException => // scala.meta error
case _: java.util.NoSuchElementException => // scala.meta error
case NonFatal(e) =>
// unexpected errors are printed in the console.
println(s"Unexpected error analysing file: $file")
println(s"Error: ${e.getClass.getName} $e")
val stack = e.getStackTrace.take(10) // print small stacktrace
stack.foreach(println)
val i = errors.incrementAndGet()
if (i > 10) {
throw new IllegalStateException(
"Too many unexpected errors (printed to console), fix your analysis.")
}
}
}
corpus.foreach(analyze)
results.asScala
}
def onParsed[A](corpus: GenIterable[CorpusFile])(
f: Source => List[A]): mutable.Buffer[(CorpusFile, A)] =
SyntaxAnalysis.run[A](corpus)(_.jFile.parse[Source] match {
case parsers.Parsed.Success(ast: Source) => f(ast)
case _ => Nil
})
}
|
MasseGuillaume/scalameta
|
scalameta/testkit/src/main/scala/scala/meta/testkit/SyntaxAnalysis.scala
|
Scala
|
bsd-3-clause
| 2,558
|
package com.github.takezoe.solr.scala.async
import java.io.{ByteArrayOutputStream, InputStream}
/**
* @author steven
*
*/
class UpdatableInputStream extends InputStream {
private var finished = false
private val baos = new ByteArrayOutputStream()
def appendBytes(bytes: Array[Byte]) = this.synchronized {
baos.write(bytes)
this.notifyAll()
}
def finishedAppending() = this.synchronized {
finished = true
this.notifyAll()
}
private def dequeue(max: Int): Option[Array[Byte]] = this.synchronized {
while (baos.size() == 0 && !finished) this.wait()
if (baos.size() == 0 && finished)
None
else {
val bytes = baos.toByteArray
baos.reset()
if (bytes.length <= max)
Some(bytes)
else {
val ret = new Array[Byte](max)
java.lang.System.arraycopy(bytes, 0, ret, 0, max)
baos.write(bytes, max, bytes.length - max)
Some(ret)
}
}
}
override def read(): Int = {
val arr = new Array[Byte](1)
read(arr)
arr(0)
}
override def read(b: Array[Byte], off: Int, len: Int): Int = {
if (b == null)
throw new NullPointerException
else if (off < 0 || len < 0 || len > b.length - off)
throw new IndexOutOfBoundsException
else if (len == 0)
0
else {
dequeue(len) match {
case None => -1
case Some(bytes) =>
java.lang.System.arraycopy(bytes, 0, b, off, bytes.length)
bytes.length
}
}
}
}
|
takezoe/solr-scala-client
|
src/main/scala/com/github/takezoe/solr/scala/async/UpdatableInputStream.scala
|
Scala
|
apache-2.0
| 1,731
|
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.redis.data
import java.util.{Collections, Date}
import org.geotools.data.{DataStoreFinder, DataUtilities, Query, Transaction}
import org.geotools.filter.text.ecql.ECQL
import org.geotools.geometry.jts.ReferencedEnvelope
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.Configs
import org.locationtech.geomesa.utils.geotools.{CRS_EPSG_4326, FeatureUtils, SimpleFeatureTypes}
import org.locationtech.geomesa.utils.io.WithClose
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class RedisDataStoreIntegrationTest extends Specification {
import scala.collection.JavaConverters._
sequential
val url = "redis://localhost:6379"
val sft = SimpleFeatureTypes.createImmutableType("test", "name:String:index=true,dtg:Date,*geom:Point:srid=4326")
val features = Seq.tabulate(10) { i =>
ScalaSimpleFeature.create(sft, i.toString, s"name$i", s"2019-01-03T0$i:00:00.000Z", s"POINT (-4$i 55)")
}
val filters = Seq(
"bbox(geom, -39, 54, -51, 56)",
"bbox(geom, -45, 54, -49, 56)",
"bbox(geom, -39, 54, -51, 56) AND dtg >= '2019-01-03T00:00:00.000Z' AND dtg < '2019-01-03T12:00:00.000Z'",
"bbox(geom, -45, 54, -49, 56) AND dtg >= '2019-01-03T00:00:00.000Z' AND dtg < '2019-01-03T12:00:00.000Z'",
"bbox(geom, -39, 54, -51, 56) AND dtg during 2019-01-03T04:30:00.000Z/2019-01-03T08:30:00.000Z",
s"name IN('${features.map(_.getAttribute("name")).mkString("', '")}')",
"name IN('name0', 'name2') AND dtg >= '2019-01-03T00:00:00.000Z' AND dtg < '2019-01-03T01:00:00.000Z'",
features.map(_.getID).mkString("IN('", "', '", "')")
).map(ECQL.toFilter)
val transforms = Seq(null, Array("dtg", "geom"), Array("name", "geom"))
val params = Map(
RedisDataStoreParams.RedisUrlParam.key -> url,
RedisDataStoreParams.RedisCatalogParam.key -> "gm-test",
RedisDataStoreParams.PipelineParam.key -> "false" // "true"
)
"RedisDataStore" should {
"read and write features" in {
skipped("Integration tests")
val ds = DataStoreFinder.getDataStore(params.asJava).asInstanceOf[RedisDataStore]
ds must not(beNull)
try {
ds.getSchema(sft.getTypeName) must beNull
ds.createSchema(sft)
ds.getSchema(sft.getTypeName) mustEqual sft
WithClose(ds.getFeatureWriterAppend(sft.getTypeName, Transaction.AUTO_COMMIT)) { writer =>
features.foreach(FeatureUtils.write(writer, _, useProvidedFid = true))
}
foreach(filters) { filter =>
val filtered = features.filter(filter.evaluate)
foreach(transforms) { transform =>
val query = new Query(sft.getTypeName, filter, transform)
val result = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
val expected = if (transform == null) { filtered } else {
val tsft = DataUtilities.createSubType(sft, transform)
filtered.map(DataUtilities.reType(tsft, _)).map(ScalaSimpleFeature.copy)
}
result must containTheSameElementsAs(expected)
}
}
ds.stats.getCount(sft) must beSome(10L)
ds.stats.getBounds(sft) mustEqual new ReferencedEnvelope(-49, -40, 55, 55, CRS_EPSG_4326)
} finally {
ds.removeSchema(sft.getTypeName)
ds.dispose()
}
}
"expire features based on ingest time" in {
skipped("Integration tests")
RedisSystemProperties.AgeOffInterval.set("5 seconds")
val sft = SimpleFeatureTypes.immutable(this.sft, Collections.singletonMap(Configs.FeatureExpiration, "10 seconds"))
val ds = DataStoreFinder.getDataStore(params.asJava).asInstanceOf[RedisDataStore]
ds must not(beNull)
try {
ds.getSchema(sft.getTypeName) must beNull
ds.createSchema(sft)
ds.getSchema(sft.getTypeName) mustEqual sft
WithClose(ds.getFeatureWriterAppend(sft.getTypeName, Transaction.AUTO_COMMIT)) { writer =>
features.foreach(FeatureUtils.write(writer, _, useProvidedFid = true))
}
foreach(filters) { filter =>
val expected = features.filter(filter.evaluate)
val query = new Query(sft.getTypeName, filter)
val result = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
result must containTheSameElementsAs(expected)
}
ds.stats.getCount(sft) must beSome(10L)
ds.stats.getBounds(sft) mustEqual new ReferencedEnvelope(-49, -40, 55, 55, CRS_EPSG_4326)
Thread.sleep(1000 * 20)
foreach(filters) { filter =>
val query = new Query(sft.getTypeName, filter)
val result = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
result must beEmpty
}
ds.stats.getCount(sft) must beSome(0L)
} finally {
RedisSystemProperties.AgeOffInterval.clear()
ds.removeSchema(sft.getTypeName)
ds.dispose()
}
}
"expire features based on attribute time" in {
skipped("Integration tests")
RedisSystemProperties.AgeOffInterval.set("5 seconds")
// age off the first feature, since they are one hour apart
val time = System.currentTimeMillis() + 10000L - features.head.getAttribute("dtg").asInstanceOf[Date].getTime
val sft = SimpleFeatureTypes.immutable(this.sft, Collections.singletonMap(Configs.FeatureExpiration, s"dtg($time ms)"))
val ds = DataStoreFinder.getDataStore(params.asJava).asInstanceOf[RedisDataStore]
ds must not(beNull)
try {
ds.getSchema(sft.getTypeName) must beNull
ds.createSchema(sft)
ds.getSchema(sft.getTypeName) mustEqual sft
WithClose(ds.getFeatureWriterAppend(sft.getTypeName, Transaction.AUTO_COMMIT)) { writer =>
features.foreach(FeatureUtils.write(writer, _, useProvidedFid = true))
}
foreach(filters) { filter =>
val expected = features.filter(filter.evaluate)
val query = new Query(sft.getTypeName, filter)
val result = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
result must containTheSameElementsAs(expected)
}
ds.stats.getCount(sft) must beSome(10L)
ds.stats.getBounds(sft) mustEqual new ReferencedEnvelope(-49, -40, 55, 55, CRS_EPSG_4326)
Thread.sleep(1000 * 20)
foreach(filters) { filter =>
val expected = features.drop(1).filter(filter.evaluate)
val query = new Query(sft.getTypeName, filter)
val result = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
result must containTheSameElementsAs(expected)
}
ds.stats.getCount(sft) must beSome(9L)
} finally {
RedisSystemProperties.AgeOffInterval.clear()
ds.removeSchema(sft.getTypeName)
ds.dispose()
}
}
}
}
|
elahrvivaz/geomesa
|
geomesa-redis/geomesa-redis-datastore/src/test/scala/org/locationtech/geomesa/redis/data/RedisDataStoreIntegrationTest.scala
|
Scala
|
apache-2.0
| 7,621
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.nio.ByteBuffer
import java.util.Optional
import java.util.concurrent.atomic.AtomicInteger
import kafka.cluster.BrokerEndPoint
import kafka.log.LogAppendInfo
import kafka.message.NoCompressionCodec
import kafka.metrics.KafkaYammerMetrics
import kafka.server.AbstractFetcherThread.ReplicaFetch
import kafka.server.AbstractFetcherThread.ResultWithPartitions
import kafka.utils.Implicits.MapExtensionMethods
import kafka.utils.TestUtils
import org.apache.kafka.common.KafkaException
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.{FencedLeaderEpochException, UnknownLeaderEpochException}
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse}
import org.apache.kafka.common.utils.Time
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{BeforeEach, Test}
import scala.jdk.CollectionConverters._
import scala.collection.{Map, Set, mutable}
import scala.util.Random
import scala.collection.mutable.ArrayBuffer
import scala.compat.java8.OptionConverters._
class AbstractFetcherThreadTest {
val truncateOnFetch = true
private val partition1 = new TopicPartition("topic1", 0)
private val partition2 = new TopicPartition("topic2", 0)
private val failedPartitions = new FailedPartitions
@BeforeEach
def cleanMetricRegistry(): Unit = {
TestUtils.clearYammerMetrics()
}
private def allMetricsNames: Set[String] = KafkaYammerMetrics.defaultRegistry().allMetrics().asScala.keySet.map(_.getName)
private def mkBatch(baseOffset: Long, leaderEpoch: Int, records: SimpleRecord*): RecordBatch = {
MemoryRecords.withRecords(baseOffset, CompressionType.NONE, leaderEpoch, records: _*)
.batches.asScala.head
}
private def initialFetchState(fetchOffset: Long, leaderEpoch: Int): InitialFetchState = {
InitialFetchState(leader = new BrokerEndPoint(0, "localhost", 9092),
initOffset = fetchOffset, currentLeaderEpoch = leaderEpoch)
}
@Test
def testMetricsRemovedOnShutdown(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
// add one partition to create the consumer lag metric
fetcher.setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 0)))
fetcher.setLeaderState(partition, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.start()
val brokerTopicStatsMetrics = fetcher.brokerTopicStats.allTopicsStats.metricMap.keySet
val fetcherMetrics = Set(FetcherMetrics.BytesPerSec, FetcherMetrics.RequestsPerSec, FetcherMetrics.ConsumerLag)
// wait until all fetcher metrics are present
TestUtils.waitUntilTrue(() => allMetricsNames == brokerTopicStatsMetrics ++ fetcherMetrics,
"Failed waiting for all fetcher metrics to be registered")
fetcher.shutdown()
// verify that all the fetcher metrics are removed and only brokerTopicStats left
val metricNames = KafkaYammerMetrics.defaultRegistry().allMetrics().asScala.keySet.map(_.getName).toSet
assertTrue(metricNames.intersect(fetcherMetrics).isEmpty)
assertEquals(brokerTopicStatsMetrics, metricNames.intersect(brokerTopicStatsMetrics))
}
@Test
def testConsumerLagRemovedWithPartition(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
// add one partition to create the consumer lag metric
fetcher.setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 0)))
fetcher.setLeaderState(partition, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.doWork()
assertTrue(allMetricsNames(FetcherMetrics.ConsumerLag),
"Failed waiting for consumer lag metric")
// remove the partition to simulate leader migration
fetcher.removePartitions(Set(partition))
// the lag metric should now be gone
assertFalse(allMetricsNames(FetcherMetrics.ConsumerLag))
}
@Test
def testSimpleFetch(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
fetcher.setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 0)))
val batch = mkBatch(baseOffset = 0L, leaderEpoch = 0,
new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))
val leaderState = MockFetcherThread.PartitionState(Seq(batch), leaderEpoch = 0, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
fetcher.doWork()
val replicaState = fetcher.replicaPartitionState(partition)
assertEquals(2L, replicaState.logEndOffset)
assertEquals(2L, replicaState.highWatermark)
}
@Test
def testFencedTruncation(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
fetcher.setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 0)))
val batch = mkBatch(baseOffset = 0L, leaderEpoch = 1,
new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))
val leaderState = MockFetcherThread.PartitionState(Seq(batch), leaderEpoch = 1, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
fetcher.doWork()
// No progress should be made
val replicaState = fetcher.replicaPartitionState(partition)
assertEquals(0L, replicaState.logEndOffset)
assertEquals(0L, replicaState.highWatermark)
// After fencing, the fetcher should remove the partition from tracking and mark as failed
assertTrue(fetcher.fetchState(partition).isEmpty)
assertTrue(failedPartitions.contains(partition))
}
@Test
def testFencedFetch(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
val replicaState = MockFetcherThread.PartitionState(leaderEpoch = 0)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 0)))
val batch = mkBatch(baseOffset = 0L, leaderEpoch = 0,
new SimpleRecord("a".getBytes),
new SimpleRecord("b".getBytes))
val leaderState = MockFetcherThread.PartitionState(Seq(batch), leaderEpoch = 0, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
fetcher.doWork()
// Verify we have caught up
assertEquals(2, replicaState.logEndOffset)
// Bump the epoch on the leader
fetcher.leaderPartitionState(partition).leaderEpoch += 1
fetcher.doWork()
// After fencing, the fetcher should remove the partition from tracking and mark as failed
assertTrue(fetcher.fetchState(partition).isEmpty)
assertTrue(failedPartitions.contains(partition))
}
@Test
def testUnknownLeaderEpochInTruncation(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
// The replica's leader epoch is ahead of the leader
val replicaState = MockFetcherThread.PartitionState(leaderEpoch = 1)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 1)), forceTruncation = true)
val batch = mkBatch(baseOffset = 0L, leaderEpoch = 0, new SimpleRecord("a".getBytes))
val leaderState = MockFetcherThread.PartitionState(Seq(batch), leaderEpoch = 0, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
fetcher.doWork()
// Not data has been fetched and the follower is still truncating
assertEquals(0, replicaState.logEndOffset)
assertEquals(Some(Truncating), fetcher.fetchState(partition).map(_.state))
// Bump the epoch on the leader
fetcher.leaderPartitionState(partition).leaderEpoch += 1
// Now we can make progress
fetcher.doWork()
assertEquals(1, replicaState.logEndOffset)
assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state))
}
@Test
def testUnknownLeaderEpochWhileFetching(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
// This test is contrived because it shouldn't be possible to to see unknown leader epoch
// in the Fetching state as the leader must validate the follower's epoch when it checks
// the truncation offset.
val replicaState = MockFetcherThread.PartitionState(leaderEpoch = 1)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 1)))
val leaderState = MockFetcherThread.PartitionState(Seq(
mkBatch(baseOffset = 0L, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1L, leaderEpoch = 0, new SimpleRecord("b".getBytes))
), leaderEpoch = 1, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
fetcher.doWork()
// We have fetched one batch and gotten out of the truncation phase
assertEquals(1, replicaState.logEndOffset)
assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state))
// Somehow the leader epoch rewinds
fetcher.leaderPartitionState(partition).leaderEpoch = 0
// We are stuck at the current offset
fetcher.doWork()
assertEquals(1, replicaState.logEndOffset)
assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state))
// After returning to the right epoch, we can continue fetching
fetcher.leaderPartitionState(partition).leaderEpoch = 1
fetcher.doWork()
assertEquals(2, replicaState.logEndOffset)
assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state))
}
@Test
def testTruncation(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
val replicaLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val replicaState = MockFetcherThread.PartitionState(replicaLog, leaderEpoch = 5, highWatermark = 0L)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(3L, leaderEpoch = 5)))
val leaderLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 1, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 3, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 5, new SimpleRecord("c".getBytes)))
val leaderState = MockFetcherThread.PartitionState(leaderLog, leaderEpoch = 5, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
TestUtils.waitUntilTrue(() => {
fetcher.doWork()
fetcher.replicaPartitionState(partition).log == fetcher.leaderPartitionState(partition).log
}, "Failed to reconcile leader and follower logs")
assertEquals(leaderState.logStartOffset, replicaState.logStartOffset)
assertEquals(leaderState.logEndOffset, replicaState.logEndOffset)
assertEquals(leaderState.highWatermark, replicaState.highWatermark)
}
@Test
def testTruncateToHighWatermarkIfLeaderEpochRequestNotSupported(): Unit = {
val highWatermark = 2L
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread {
override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = {
assertEquals(highWatermark, truncationState.offset)
assertTrue(truncationState.truncationCompleted)
super.truncate(topicPartition, truncationState)
}
override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] =
throw new UnsupportedOperationException
override protected val isOffsetForLeaderEpochSupported: Boolean = false
override protected val isTruncationOnFetchSupported: Boolean = false
}
val replicaLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val replicaState = MockFetcherThread.PartitionState(replicaLog, leaderEpoch = 5, highWatermark)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(highWatermark, leaderEpoch = 5)))
fetcher.doWork()
assertEquals(highWatermark, replicaState.logEndOffset)
assertEquals(highWatermark, fetcher.fetchState(partition).get.fetchOffset)
assertTrue(fetcher.fetchState(partition).get.isReadyForFetch)
}
@Test
def testTruncateToHighWatermarkIfLeaderEpochInfoNotAvailable(): Unit = {
val highWatermark = 2L
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread {
override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = {
assertEquals(highWatermark, truncationState.offset)
assertTrue(truncationState.truncationCompleted)
super.truncate(topicPartition, truncationState)
}
override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] =
throw new UnsupportedOperationException
override def latestEpoch(topicPartition: TopicPartition): Option[Int] = None
}
val replicaLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val replicaState = MockFetcherThread.PartitionState(replicaLog, leaderEpoch = 5, highWatermark)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(highWatermark, leaderEpoch = 5)))
fetcher.doWork()
assertEquals(highWatermark, replicaState.logEndOffset)
assertEquals(highWatermark, fetcher.fetchState(partition).get.fetchOffset)
assertTrue(fetcher.fetchState(partition).get.isReadyForFetch)
}
@Test
def testTruncateToHighWatermarkDuringRemovePartitions(): Unit = {
val highWatermark = 2L
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread {
override def truncateToHighWatermark(partitions: Set[TopicPartition]): Unit = {
removePartitions(Set(partition))
super.truncateToHighWatermark(partitions)
}
override def latestEpoch(topicPartition: TopicPartition): Option[Int] = None
}
val replicaLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val replicaState = MockFetcherThread.PartitionState(replicaLog, leaderEpoch = 5, highWatermark)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(highWatermark, leaderEpoch = 5)))
fetcher.doWork()
assertEquals(replicaLog.last.nextOffset(), replicaState.logEndOffset)
assertTrue(fetcher.fetchState(partition).isEmpty)
}
@Test
def testTruncationSkippedIfNoEpochChange(): Unit = {
val partition = new TopicPartition("topic", 0)
var truncations = 0
val fetcher = new MockFetcherThread {
override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = {
truncations += 1
super.truncate(topicPartition, truncationState)
}
}
val replicaState = MockFetcherThread.PartitionState(leaderEpoch = 5)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 5)), forceTruncation = true)
val leaderLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 1, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 3, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 5, new SimpleRecord("c".getBytes)))
val leaderState = MockFetcherThread.PartitionState(leaderLog, leaderEpoch = 5, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
// Do one round of truncation
fetcher.doWork()
// We only fetch one record at a time with mock fetcher
assertEquals(1, replicaState.logEndOffset)
assertEquals(1, truncations)
// Add partitions again with the same epoch
fetcher.addPartitions(Map(partition -> initialFetchState(3L, leaderEpoch = 5)))
// Verify we did not truncate
fetcher.doWork()
// No truncations occurred and we have fetched another record
assertEquals(1, truncations)
assertEquals(2, replicaState.logEndOffset)
}
@Test
def testFollowerFetchOutOfRangeHigh(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread()
val replicaLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val replicaState = MockFetcherThread.PartitionState(replicaLog, leaderEpoch = 4, highWatermark = 0L)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(3L, leaderEpoch = 4)))
val leaderLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val leaderState = MockFetcherThread.PartitionState(leaderLog, leaderEpoch = 4, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
// initial truncation and verify that the log end offset is updated
fetcher.doWork()
assertEquals(3L, replicaState.logEndOffset)
assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state))
// To hit this case, we have to change the leader log without going through the truncation phase
leaderState.log.clear()
leaderState.logEndOffset = 0L
leaderState.logStartOffset = 0L
leaderState.highWatermark = 0L
fetcher.doWork()
assertEquals(0L, replicaState.logEndOffset)
assertEquals(0L, replicaState.logStartOffset)
assertEquals(0L, replicaState.highWatermark)
}
@Test
def testFencedOffsetResetAfterOutOfRange(): Unit = {
val partition = new TopicPartition("topic", 0)
var fetchedEarliestOffset = false
val fetcher = new MockFetcherThread() {
override protected def fetchEarliestOffsetFromLeader(topicPartition: TopicPartition, leaderEpoch: Int): Long = {
fetchedEarliestOffset = true
throw new FencedLeaderEpochException(s"Epoch $leaderEpoch is fenced")
}
}
val replicaLog = Seq()
val replicaState = MockFetcherThread.PartitionState(replicaLog, leaderEpoch = 4, highWatermark = 0L)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 4)))
val leaderLog = Seq(
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val leaderState = MockFetcherThread.PartitionState(leaderLog, leaderEpoch = 4, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
// After the out of range error, we get a fenced error and remove the partition and mark as failed
fetcher.doWork()
assertEquals(0, replicaState.logEndOffset)
assertTrue(fetchedEarliestOffset)
assertTrue(fetcher.fetchState(partition).isEmpty)
assertTrue(failedPartitions.contains(partition))
}
@Test
def testFollowerFetchOutOfRangeLow(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
// The follower begins from an offset which is behind the leader's log start offset
val replicaLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)))
val replicaState = MockFetcherThread.PartitionState(replicaLog, leaderEpoch = 0, highWatermark = 0L)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(3L, leaderEpoch = 0)))
val leaderLog = Seq(
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val leaderState = MockFetcherThread.PartitionState(leaderLog, leaderEpoch = 0, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
// initial truncation and verify that the log start offset is updated
fetcher.doWork()
if (truncateOnFetch) {
// Second iteration required here since first iteration is required to
// perform initial truncaton based on diverging epoch.
fetcher.doWork()
}
assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state))
assertEquals(2, replicaState.logStartOffset)
assertEquals(List(), replicaState.log.toList)
TestUtils.waitUntilTrue(() => {
fetcher.doWork()
fetcher.replicaPartitionState(partition).log == fetcher.leaderPartitionState(partition).log
}, "Failed to reconcile leader and follower logs")
assertEquals(leaderState.logStartOffset, replicaState.logStartOffset)
assertEquals(leaderState.logEndOffset, replicaState.logEndOffset)
assertEquals(leaderState.highWatermark, replicaState.highWatermark)
}
@Test
def testRetryAfterUnknownLeaderEpochInLatestOffsetFetch(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher: MockFetcherThread = new MockFetcherThread {
val tries = new AtomicInteger(0)
override protected def fetchLatestOffsetFromLeader(topicPartition: TopicPartition, leaderEpoch: Int): Long = {
if (tries.getAndIncrement() == 0)
throw new UnknownLeaderEpochException("Unexpected leader epoch")
super.fetchLatestOffsetFromLeader(topicPartition, leaderEpoch)
}
}
// The follower begins from an offset which is behind the leader's log start offset
val replicaLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)))
val replicaState = MockFetcherThread.PartitionState(replicaLog, leaderEpoch = 0, highWatermark = 0L)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(3L, leaderEpoch = 0)))
val leaderLog = Seq(
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val leaderState = MockFetcherThread.PartitionState(leaderLog, leaderEpoch = 0, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
// initial truncation and initial error response handling
fetcher.doWork()
assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state))
TestUtils.waitUntilTrue(() => {
fetcher.doWork()
fetcher.replicaPartitionState(partition).log == fetcher.leaderPartitionState(partition).log
}, "Failed to reconcile leader and follower logs")
assertEquals(leaderState.logStartOffset, replicaState.logStartOffset)
assertEquals(leaderState.logEndOffset, replicaState.logEndOffset)
assertEquals(leaderState.highWatermark, replicaState.highWatermark)
}
@Test
def testCorruptMessage(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread {
var fetchedOnce = false
override def fetchFromLeader(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = {
val fetchedData = super.fetchFromLeader(fetchRequest)
if (!fetchedOnce) {
val records = fetchedData.head._2.records.asInstanceOf[MemoryRecords]
val buffer = records.buffer()
buffer.putInt(15, buffer.getInt(15) ^ 23422)
buffer.putInt(30, buffer.getInt(30) ^ 93242)
fetchedOnce = true
}
fetchedData
}
}
fetcher.setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 0)))
val batch = mkBatch(baseOffset = 0L, leaderEpoch = 0,
new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))
val leaderState = MockFetcherThread.PartitionState(Seq(batch), leaderEpoch = 0, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
fetcher.doWork() // fails with corrupt record
fetcher.doWork() // should succeed
val replicaState = fetcher.replicaPartitionState(partition)
assertEquals(2L, replicaState.logEndOffset)
}
@Test
def testLeaderEpochChangeDuringFencedFetchEpochsFromLeader(): Unit = {
// The leader is on the new epoch when the OffsetsForLeaderEpoch with old epoch is sent, so it
// returns the fence error. Validate that response is ignored if the leader epoch changes on
// the follower while OffsetsForLeaderEpoch request is in flight, but able to truncate and fetch
// in the next of round of "doWork"
testLeaderEpochChangeDuringFetchEpochsFromLeader(leaderEpochOnLeader = 1)
}
@Test
def testLeaderEpochChangeDuringSuccessfulFetchEpochsFromLeader(): Unit = {
// The leader is on the old epoch when the OffsetsForLeaderEpoch with old epoch is sent
// and returns the valid response. Validate that response is ignored if the leader epoch changes
// on the follower while OffsetsForLeaderEpoch request is in flight, but able to truncate and
// fetch once the leader is on the newer epoch (same as follower)
testLeaderEpochChangeDuringFetchEpochsFromLeader(leaderEpochOnLeader = 0)
}
private def testLeaderEpochChangeDuringFetchEpochsFromLeader(leaderEpochOnLeader: Int): Unit = {
val partition = new TopicPartition("topic", 1)
val initialLeaderEpochOnFollower = 0
val nextLeaderEpochOnFollower = initialLeaderEpochOnFollower + 1
val fetcher = new MockFetcherThread {
var fetchEpochsFromLeaderOnce = false
override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = {
val fetchedEpochs = super.fetchEpochEndOffsets(partitions)
if (!fetchEpochsFromLeaderOnce) {
// leader epoch changes while fetching epochs from leader
removePartitions(Set(partition))
setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = nextLeaderEpochOnFollower))
addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = nextLeaderEpochOnFollower)), forceTruncation = true)
fetchEpochsFromLeaderOnce = true
}
fetchedEpochs
}
}
fetcher.setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = initialLeaderEpochOnFollower))
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = initialLeaderEpochOnFollower)), forceTruncation = true)
val leaderLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = initialLeaderEpochOnFollower, new SimpleRecord("c".getBytes)))
val leaderState = MockFetcherThread.PartitionState(leaderLog, leaderEpochOnLeader, highWatermark = 0L)
fetcher.setLeaderState(partition, leaderState)
// first round of truncation
fetcher.doWork()
// Since leader epoch changed, fetch epochs response is ignored due to partition being in
// truncating state with the updated leader epoch
assertEquals(Option(Truncating), fetcher.fetchState(partition).map(_.state))
assertEquals(Option(nextLeaderEpochOnFollower), fetcher.fetchState(partition).map(_.currentLeaderEpoch))
if (leaderEpochOnLeader < nextLeaderEpochOnFollower) {
fetcher.setLeaderState(
partition, MockFetcherThread.PartitionState(leaderLog, nextLeaderEpochOnFollower, highWatermark = 0L))
}
// make sure the fetcher is now able to truncate and fetch
fetcher.doWork()
assertEquals(fetcher.leaderPartitionState(partition).log, fetcher.replicaPartitionState(partition).log)
}
@Test
def testTruncateToEpochEndOffsetsDuringRemovePartitions(): Unit = {
val partition = new TopicPartition("topic", 0)
val leaderEpochOnLeader = 0
val initialLeaderEpochOnFollower = 0
val nextLeaderEpochOnFollower = initialLeaderEpochOnFollower + 1
val fetcher = new MockFetcherThread {
override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = {
val fetchedEpochs = super.fetchEpochEndOffsets(partitions)
// leader epoch changes while fetching epochs from leader
// at the same time, the replica fetcher manager removes the partition
removePartitions(Set(partition))
setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = nextLeaderEpochOnFollower))
fetchedEpochs
}
}
fetcher.setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = initialLeaderEpochOnFollower))
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = initialLeaderEpochOnFollower)))
val leaderLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = initialLeaderEpochOnFollower, new SimpleRecord("c".getBytes)))
val leaderState = MockFetcherThread.PartitionState(leaderLog, leaderEpochOnLeader, highWatermark = 0L)
fetcher.setLeaderState(partition, leaderState)
// first round of work
fetcher.doWork()
// since the partition was removed before the fetched endOffsets were filtered against the leader epoch,
// we do not expect the partition to be in Truncating state
assertEquals(None, fetcher.fetchState(partition).map(_.state))
assertEquals(None, fetcher.fetchState(partition).map(_.currentLeaderEpoch))
fetcher.setLeaderState(
partition, MockFetcherThread.PartitionState(leaderLog, nextLeaderEpochOnFollower, highWatermark = 0L))
// make sure the fetcher is able to continue work
fetcher.doWork()
assertEquals(ArrayBuffer.empty, fetcher.replicaPartitionState(partition).log)
}
@Test
def testTruncationThrowsExceptionIfLeaderReturnsPartitionsNotRequestedInFetchEpochs(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread {
override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = {
val unrequestedTp = new TopicPartition("topic2", 0)
super.fetchEpochEndOffsets(partitions).toMap + (unrequestedTp -> new EpochEndOffset()
.setPartition(unrequestedTp.partition)
.setErrorCode(Errors.NONE.code)
.setLeaderEpoch(0)
.setEndOffset(0))
}
}
fetcher.setReplicaState(partition, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.addPartitions(Map(partition -> initialFetchState(0L, leaderEpoch = 0)), forceTruncation = true)
fetcher.setLeaderState(partition, MockFetcherThread.PartitionState(leaderEpoch = 0))
// first round of truncation should throw an exception
assertThrows(classOf[IllegalStateException], () => fetcher.doWork())
}
@Test
def testFetcherThreadHandlingPartitionFailureDuringAppending(): Unit = {
val fetcherForAppend = new MockFetcherThread {
override def processPartitionData(topicPartition: TopicPartition, fetchOffset: Long, partitionData: FetchData): Option[LogAppendInfo] = {
if (topicPartition == partition1) {
throw new KafkaException()
} else {
super.processPartitionData(topicPartition, fetchOffset, partitionData)
}
}
}
verifyFetcherThreadHandlingPartitionFailure(fetcherForAppend)
}
@Test
def testFetcherThreadHandlingPartitionFailureDuringTruncation(): Unit = {
val fetcherForTruncation = new MockFetcherThread {
override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = {
if(topicPartition == partition1)
throw new Exception()
else {
super.truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState)
}
}
}
verifyFetcherThreadHandlingPartitionFailure(fetcherForTruncation)
}
private def verifyFetcherThreadHandlingPartitionFailure(fetcher: MockFetcherThread): Unit = {
fetcher.setReplicaState(partition1, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.addPartitions(Map(partition1 -> initialFetchState(0L, leaderEpoch = 0)), forceTruncation = true)
fetcher.setLeaderState(partition1, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.setReplicaState(partition2, MockFetcherThread.PartitionState(leaderEpoch = 0))
fetcher.addPartitions(Map(partition2 -> initialFetchState(0L, leaderEpoch = 0)), forceTruncation = true)
fetcher.setLeaderState(partition2, MockFetcherThread.PartitionState(leaderEpoch = 0))
// processing data fails for partition1
fetcher.doWork()
// partition1 marked as failed
assertTrue(failedPartitions.contains(partition1))
assertEquals(None, fetcher.fetchState(partition1))
// make sure the fetcher continues to work with rest of the partitions
fetcher.doWork()
assertEquals(Some(Fetching), fetcher.fetchState(partition2).map(_.state))
assertFalse(failedPartitions.contains(partition2))
// simulate a leader change
fetcher.removePartitions(Set(partition1))
failedPartitions.removeAll(Set(partition1))
fetcher.addPartitions(Map(partition1 -> initialFetchState(0L, leaderEpoch = 1)), forceTruncation = true)
// partition1 added back
assertEquals(Some(Truncating), fetcher.fetchState(partition1).map(_.state))
assertFalse(failedPartitions.contains(partition1))
}
@Test
def testDivergingEpochs(): Unit = {
val partition = new TopicPartition("topic", 0)
val fetcher = new MockFetcherThread
val replicaLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)))
val replicaState = MockFetcherThread.PartitionState(replicaLog, leaderEpoch = 5, highWatermark = 0L)
fetcher.setReplicaState(partition, replicaState)
fetcher.addPartitions(Map(partition -> initialFetchState(3L, leaderEpoch = 5)))
assertEquals(3L, replicaState.logEndOffset)
fetcher.verifyLastFetchedEpoch(partition, expectedEpoch = Some(4))
val leaderLog = Seq(
mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)),
mkBatch(baseOffset = 1, leaderEpoch = 2, new SimpleRecord("b".getBytes)),
mkBatch(baseOffset = 2, leaderEpoch = 5, new SimpleRecord("d".getBytes)))
val leaderState = MockFetcherThread.PartitionState(leaderLog, leaderEpoch = 5, highWatermark = 2L)
fetcher.setLeaderState(partition, leaderState)
fetcher.doWork()
fetcher.verifyLastFetchedEpoch(partition, Some(2))
TestUtils.waitUntilTrue(() => {
fetcher.doWork()
fetcher.replicaPartitionState(partition).log == fetcher.leaderPartitionState(partition).log
}, "Failed to reconcile leader and follower logs")
fetcher.verifyLastFetchedEpoch(partition, Some(5))
}
object MockFetcherThread {
class PartitionState(var log: mutable.Buffer[RecordBatch],
var leaderEpoch: Int,
var logStartOffset: Long,
var logEndOffset: Long,
var highWatermark: Long)
object PartitionState {
def apply(log: Seq[RecordBatch], leaderEpoch: Int, highWatermark: Long): PartitionState = {
val logStartOffset = log.headOption.map(_.baseOffset).getOrElse(0L)
val logEndOffset = log.lastOption.map(_.nextOffset).getOrElse(0L)
new PartitionState(log.toBuffer, leaderEpoch, logStartOffset, logEndOffset, highWatermark)
}
def apply(leaderEpoch: Int): PartitionState = {
apply(Seq(), leaderEpoch = leaderEpoch, highWatermark = 0L)
}
}
}
class MockFetcherThread(val replicaId: Int = 0, val leaderId: Int = 1)
extends AbstractFetcherThread("mock-fetcher",
clientId = "mock-fetcher",
sourceBroker = new BrokerEndPoint(leaderId, host = "localhost", port = Random.nextInt()),
failedPartitions,
brokerTopicStats = new BrokerTopicStats) {
import MockFetcherThread.PartitionState
private val replicaPartitionStates = mutable.Map[TopicPartition, PartitionState]()
private val leaderPartitionStates = mutable.Map[TopicPartition, PartitionState]()
private var latestEpochDefault: Option[Int] = Some(0)
def setLeaderState(topicPartition: TopicPartition, state: PartitionState): Unit = {
leaderPartitionStates.put(topicPartition, state)
}
def setReplicaState(topicPartition: TopicPartition, state: PartitionState): Unit = {
replicaPartitionStates.put(topicPartition, state)
}
def replicaPartitionState(topicPartition: TopicPartition): PartitionState = {
replicaPartitionStates.getOrElse(topicPartition,
throw new IllegalArgumentException(s"Unknown partition $topicPartition"))
}
def leaderPartitionState(topicPartition: TopicPartition): PartitionState = {
leaderPartitionStates.getOrElse(topicPartition,
throw new IllegalArgumentException(s"Unknown partition $topicPartition"))
}
def addPartitions(initialFetchStates: Map[TopicPartition, InitialFetchState], forceTruncation: Boolean): Set[TopicPartition] = {
latestEpochDefault = if (forceTruncation) None else Some(0)
val partitions = super.addPartitions(initialFetchStates)
latestEpochDefault = Some(0)
partitions
}
override def processPartitionData(topicPartition: TopicPartition,
fetchOffset: Long,
partitionData: FetchData): Option[LogAppendInfo] = {
val state = replicaPartitionState(topicPartition)
if (isTruncationOnFetchSupported && FetchResponse.isDivergingEpoch(partitionData)) {
val divergingEpoch = partitionData.divergingEpoch
truncateOnFetchResponse(Map(topicPartition -> new EpochEndOffset()
.setPartition(topicPartition.partition)
.setErrorCode(Errors.NONE.code)
.setLeaderEpoch(divergingEpoch.epoch)
.setEndOffset(divergingEpoch.endOffset)))
return None
}
// Throw exception if the fetchOffset does not match the fetcherThread partition state
if (fetchOffset != state.logEndOffset)
throw new RuntimeException(s"Offset mismatch for partition $topicPartition: " +
s"fetched offset = $fetchOffset, log end offset = ${state.logEndOffset}.")
// Now check message's crc
val batches = FetchResponse.recordsOrFail(partitionData).batches.asScala
var maxTimestamp = RecordBatch.NO_TIMESTAMP
var offsetOfMaxTimestamp = -1L
var lastOffset = state.logEndOffset
var lastEpoch: Option[Int] = None
for (batch <- batches) {
batch.ensureValid()
if (batch.maxTimestamp > maxTimestamp) {
maxTimestamp = batch.maxTimestamp
offsetOfMaxTimestamp = batch.baseOffset
}
state.log.append(batch)
state.logEndOffset = batch.nextOffset
lastOffset = batch.lastOffset
lastEpoch = Some(batch.partitionLeaderEpoch)
}
state.logStartOffset = partitionData.logStartOffset
state.highWatermark = partitionData.highWatermark
Some(LogAppendInfo(firstOffset = Some(LogOffsetMetadata(fetchOffset)),
lastOffset = lastOffset,
lastLeaderEpoch = lastEpoch,
maxTimestamp = maxTimestamp,
offsetOfMaxTimestamp = offsetOfMaxTimestamp,
logAppendTime = Time.SYSTEM.milliseconds(),
logStartOffset = state.logStartOffset,
recordConversionStats = RecordConversionStats.EMPTY,
sourceCodec = NoCompressionCodec,
targetCodec = NoCompressionCodec,
shallowCount = batches.size,
validBytes = FetchResponse.recordsSize(partitionData),
offsetsMonotonic = true,
lastOffsetOfFirstBatch = batches.headOption.map(_.lastOffset).getOrElse(-1)))
}
override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = {
val state = replicaPartitionState(topicPartition)
state.log = state.log.takeWhile { batch =>
batch.lastOffset < truncationState.offset
}
state.logEndOffset = state.log.lastOption.map(_.lastOffset + 1).getOrElse(state.logStartOffset)
state.highWatermark = math.min(state.highWatermark, state.logEndOffset)
}
override def truncateFullyAndStartAt(topicPartition: TopicPartition, offset: Long): Unit = {
val state = replicaPartitionState(topicPartition)
state.log.clear()
state.logStartOffset = offset
state.logEndOffset = offset
state.highWatermark = offset
}
override def buildFetch(partitionMap: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = {
val fetchData = mutable.Map.empty[TopicPartition, FetchRequest.PartitionData]
partitionMap.foreach { case (partition, state) =>
if (state.isReadyForFetch) {
val replicaState = replicaPartitionState(partition)
val lastFetchedEpoch = if (isTruncationOnFetchSupported)
state.lastFetchedEpoch.map(_.asInstanceOf[Integer]).asJava
else
Optional.empty[Integer]
fetchData.put(partition, new FetchRequest.PartitionData(state.fetchOffset, replicaState.logStartOffset,
1024 * 1024, Optional.of[Integer](state.currentLeaderEpoch), lastFetchedEpoch))
}
}
val fetchRequest = FetchRequest.Builder.forReplica(ApiKeys.FETCH.latestVersion, replicaId, 0, 1, fetchData.asJava)
ResultWithPartitions(Some(ReplicaFetch(fetchData.asJava, fetchRequest)), Set.empty)
}
override def latestEpoch(topicPartition: TopicPartition): Option[Int] = {
val state = replicaPartitionState(topicPartition)
state.log.lastOption.map(_.partitionLeaderEpoch).orElse(latestEpochDefault)
}
override def logStartOffset(topicPartition: TopicPartition): Long = replicaPartitionState(topicPartition).logStartOffset
override def logEndOffset(topicPartition: TopicPartition): Long = replicaPartitionState(topicPartition).logEndOffset
override def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = {
val epochData = new EpochData()
.setPartition(topicPartition.partition)
.setLeaderEpoch(epoch)
val result = lookupEndOffsetForEpoch(topicPartition, epochData, replicaPartitionState(topicPartition))
if (result.endOffset == UNDEFINED_EPOCH_OFFSET)
None
else
Some(OffsetAndEpoch(result.endOffset, result.leaderEpoch))
}
private def checkExpectedLeaderEpoch(expectedEpochOpt: Optional[Integer],
partitionState: PartitionState): Option[Errors] = {
if (expectedEpochOpt.isPresent) {
checkExpectedLeaderEpoch(expectedEpochOpt.get, partitionState)
} else {
None
}
}
private def checkExpectedLeaderEpoch(expectedEpoch: Int,
partitionState: PartitionState): Option[Errors] = {
if (expectedEpoch != RecordBatch.NO_PARTITION_LEADER_EPOCH) {
if (expectedEpoch < partitionState.leaderEpoch)
Some(Errors.FENCED_LEADER_EPOCH)
else if (expectedEpoch > partitionState.leaderEpoch)
Some(Errors.UNKNOWN_LEADER_EPOCH)
else
None
} else {
None
}
}
def verifyLastFetchedEpoch(partition: TopicPartition, expectedEpoch: Option[Int]): Unit = {
if (isTruncationOnFetchSupported) {
assertEquals(Some(Fetching), fetchState(partition).map(_.state))
assertEquals(expectedEpoch, fetchState(partition).flatMap(_.lastFetchedEpoch))
}
}
private def divergingEpochAndOffset(topicPartition: TopicPartition,
lastFetchedEpoch: Optional[Integer],
fetchOffset: Long,
partitionState: PartitionState): Option[FetchResponseData.EpochEndOffset] = {
lastFetchedEpoch.asScala.flatMap { fetchEpoch =>
val epochEndOffset = fetchEpochEndOffsets(
Map(topicPartition -> new EpochData()
.setPartition(topicPartition.partition)
.setLeaderEpoch(fetchEpoch)))(topicPartition)
if (partitionState.log.isEmpty
|| epochEndOffset.endOffset == UNDEFINED_EPOCH_OFFSET
|| epochEndOffset.leaderEpoch == UNDEFINED_EPOCH)
None
else if (epochEndOffset.leaderEpoch < fetchEpoch || epochEndOffset.endOffset < fetchOffset) {
Some(new FetchResponseData.EpochEndOffset()
.setEpoch(epochEndOffset.leaderEpoch)
.setEndOffset(epochEndOffset.endOffset))
} else
None
}
}
private def lookupEndOffsetForEpoch(topicPartition: TopicPartition,
epochData: EpochData,
partitionState: PartitionState): EpochEndOffset = {
checkExpectedLeaderEpoch(epochData.currentLeaderEpoch, partitionState).foreach { error =>
return new EpochEndOffset()
.setPartition(topicPartition.partition)
.setErrorCode(error.code)
}
var epochLowerBound = UNDEFINED_EPOCH
for (batch <- partitionState.log) {
if (batch.partitionLeaderEpoch > epochData.leaderEpoch) {
// If we don't have the requested epoch, return the next higher entry
if (epochLowerBound == UNDEFINED_EPOCH)
return new EpochEndOffset()
.setPartition(topicPartition.partition)
.setErrorCode(Errors.NONE.code)
.setLeaderEpoch(batch.partitionLeaderEpoch)
.setEndOffset(batch.baseOffset)
else
return new EpochEndOffset()
.setPartition(topicPartition.partition)
.setErrorCode(Errors.NONE.code)
.setLeaderEpoch(epochLowerBound)
.setEndOffset(batch.baseOffset)
}
epochLowerBound = batch.partitionLeaderEpoch
}
new EpochEndOffset()
.setPartition(topicPartition.partition)
.setErrorCode(Errors.NONE.code)
}
override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = {
val endOffsets = mutable.Map[TopicPartition, EpochEndOffset]()
partitions.forKeyValue { (partition, epochData) =>
assert(partition.partition == epochData.partition,
"Partition must be consistent between TopicPartition and EpochData")
val leaderState = leaderPartitionState(partition)
val epochEndOffset = lookupEndOffsetForEpoch(partition, epochData, leaderState)
endOffsets.put(partition, epochEndOffset)
}
endOffsets
}
override protected val isOffsetForLeaderEpochSupported: Boolean = true
override protected val isTruncationOnFetchSupported: Boolean = truncateOnFetch
override def fetchFromLeader(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = {
fetchRequest.fetchData.asScala.map { case (partition, fetchData) =>
val leaderState = leaderPartitionState(partition)
val epochCheckError = checkExpectedLeaderEpoch(fetchData.currentLeaderEpoch, leaderState)
val divergingEpoch = divergingEpochAndOffset(partition, fetchData.lastFetchedEpoch, fetchData.fetchOffset, leaderState)
val (error, records) = if (epochCheckError.isDefined) {
(epochCheckError.get, MemoryRecords.EMPTY)
} else if (fetchData.fetchOffset > leaderState.logEndOffset || fetchData.fetchOffset < leaderState.logStartOffset) {
(Errors.OFFSET_OUT_OF_RANGE, MemoryRecords.EMPTY)
} else if (divergingEpoch.nonEmpty) {
(Errors.NONE, MemoryRecords.EMPTY)
} else {
// for simplicity, we fetch only one batch at a time
val records = leaderState.log.find(_.baseOffset >= fetchData.fetchOffset) match {
case Some(batch) =>
val buffer = ByteBuffer.allocate(batch.sizeInBytes)
batch.writeTo(buffer)
buffer.flip()
MemoryRecords.readableRecords(buffer)
case None =>
MemoryRecords.EMPTY
}
(Errors.NONE, records)
}
val partitionData = new FetchData()
.setPartitionIndex(partition.partition)
.setErrorCode(error.code)
.setHighWatermark(leaderState.highWatermark)
.setLastStableOffset(leaderState.highWatermark)
.setLogStartOffset(leaderState.logStartOffset)
.setRecords(records)
divergingEpoch.foreach(partitionData.setDivergingEpoch)
(partition, partitionData)
}.toMap
}
private def checkLeaderEpochAndThrow(expectedEpoch: Int, partitionState: PartitionState): Unit = {
checkExpectedLeaderEpoch(expectedEpoch, partitionState).foreach { error =>
throw error.exception()
}
}
override protected def fetchEarliestOffsetFromLeader(topicPartition: TopicPartition, leaderEpoch: Int): Long = {
val leaderState = leaderPartitionState(topicPartition)
checkLeaderEpochAndThrow(leaderEpoch, leaderState)
leaderState.logStartOffset
}
override protected def fetchLatestOffsetFromLeader(topicPartition: TopicPartition, leaderEpoch: Int): Long = {
val leaderState = leaderPartitionState(topicPartition)
checkLeaderEpochAndThrow(leaderEpoch, leaderState)
leaderState.logEndOffset
}
}
}
|
Chasego/kafka
|
core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala
|
Scala
|
apache-2.0
| 51,676
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.nn.Sigmoid
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Table
import scala.reflect.ClassTag
class SigmoidGrad[T: ClassTag, D: ClassTag]
(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D])
extends Operation[Table, Tensor[D], T]{
private val module = Sigmoid[D]()
override def updateOutput(input: Table): Tensor[D] = {
val (y, grads) = (input[Tensor[D]](1), input[Tensor[D]](2))
output = module.updateGradInputInternal(y, grads).toTensor[D]
output
}
}
object SigmoidGrad {
def apply[T: ClassTag, D: ClassTag]()
(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D]): SigmoidGrad[T, D] =
new SigmoidGrad[T, D]()
}
|
jenniew/BigDL
|
spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/ops/SigmoidGrad.scala
|
Scala
|
apache-2.0
| 1,442
|
package strd.util
import scala.util.Random
import org.slf4j.LoggerFactory
/**
* $Id$
* $URL$
* User: bulay
* Date: 7/31/13
* Time: 4:15 PM
*/
class MonotonicallyIncreaseGenerator(val workerId : Int) {
import GeneratorConsts._
val twepoch = 1349035200000L
// 1288834974657L
val log = LoggerFactory.getLogger(getClass)
var lastTimestamp = -1L
var sequence: Long = 0L
if (workerId > maxWorkerId || workerId < 0) {
throw new IllegalStateException("can not generate workerId: " + workerId)
}
def nextId(): Long = synchronized {
nextId(timeGen())
}
def nextId(time: Long): Long = synchronized {
var timestamp = time
if (timestamp < lastTimestamp) {
log.error("clock is moving backwards. Rejecting requests until %d.", lastTimestamp)
throw new IllegalStateException("Clock moved backwards. Refusing to generate id for %d milliseconds".format(
lastTimestamp - timestamp))
}
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & sequenceMask
if (sequence == 0) {
timestamp = tilNextMillis(lastTimestamp)
}
} else {
sequence = 0
}
lastTimestamp = timestamp
((timestamp - twepoch) << timestampLeftShift) |
(workerId << workerIdShift) |
sequence
}
def fromTimestamp(timestamp : Long) = {
((timestamp - twepoch) << timestampLeftShift) |
(workerId << workerIdShift) |
sequence
}
protected def tilNextMillis(lastTimestamp: Long): Long = {
var timestamp = timeGen()
while (timestamp <= lastTimestamp) {
timestamp = timeGen()
}
timestamp
}
def fetchDate(id : Long) = {
(id >> timestampLeftShift) + twepoch
}
def minIdForDate(date: Long) : Long = {
(date - twepoch) << timestampLeftShift
}
protected def timeGen(): Long = System.currentTimeMillis()
}
object GeneratorConsts {
val workerIdBits = 10L
val maxWorkerId = -1L ^ (-1L << workerIdBits)
val sequenceBits = 12L
val workerIdShift = sequenceBits
val timestampLeftShift = sequenceBits + workerIdBits
val sequenceMask = -1L ^ (-1L << sequenceBits)
val tsMask = -1L ^ (1L << timestampLeftShift)
}
object MonotonicallyIncreaseGenerator {
val instance = new MonotonicallyIncreaseGenerator( Random.nextInt(GeneratorConsts.maxWorkerId.toInt) )
def nextId(): Long = instance.nextId()
def nextId(time: Long): Long = instance.nextId(time)
def fetchDate(id : Long) = instance.fetchDate(id)
def minIdForDate(date: Long) = instance.minIdForDate(date)
}
|
onerinvestments/strd
|
strd-commons/src/main/scala/strd/util/MonotonicallyIncreaseGenerator.scala
|
Scala
|
apache-2.0
| 2,542
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spot.utilities
import org.apache.spark.broadcast.Broadcast
/**
* Routines and data for processing URLs for domains, subdomains, country code, top-level domains, etc.
*/
object DomainProcessor extends Serializable {
val CountryCodes = Set("ac", "ad", "ae", "af", "ag", "ai", "al", "am", "an", "ao", "aq", "ar", "as", "at", "au",
"aw", "ax", "az", "ba", "bb", "bd", "be", "bf", "bg", "bh", "bi", "bj", "bm", "bn", "bo", "bq", "br", "bs", "bt",
"bv", "bw", "by", "bz", "ca", "cc", "cd", "cf", "cg", "ch", "ci", "ck", "cl", "cm", "cn", "co", "cr", "cu", "cv",
"cw", "cx", "cy", "cz", "de", "dj", "dk", "dm", "do", "dz", "ec", "ee", "eg", "eh", "er", "es", "et", "eu", "fi",
"fj", "fk", "fm", "fo", "fr", "ga", "gb", "gd", "ge", "gf", "gg", "gh", "gi", "gl", "gm", "gn", "gp", "gq", "gr",
"gs", "gt", "gu", "gw", "gy", "hk", "hm", "hn", "hr", "ht", "hu", "id", "ie", "il", "im", "in", "io", "iq", "ir",
"is", "it", "je", "jm", "jo", "jp", "ke", "kg", "kh", "ki", "km", "kn", "kp", "kr", "krd", "kw", "ky", "kz", "la",
"lb", "lc", "li", "lk", "lr", "ls", "lt", "lu", "lv", "ly", "ma", "mc", "md", "me", "mg", "mh", "mk", "ml", "mm",
"mn", "mo", "mp", "mq", "mr", "ms", "mt", "mu", "mv", "mw", "mx", "my", "mz", "na", "nc", "ne", "nf", "ng", "ni",
"nl", "no", "np", "nr", "nu", "nz", "om", "pa", "pe", "pf", "pg", "ph", "pk", "pl", "pm", "pn", "pr", "ps", "pt",
"pw", "py", "qa", "re", "ro", "rs", "ru", "rw", "sa", "sb", "sc", "sd", "se", "sg", "sh", "si", "sj", "", "sk",
"sl", "sm", "sn", "so", "sr", "ss", "st", "su", "sv", "sx", "sy", "sz", "tc", "td", "tf", "tg", "th", "tj", "tk",
"tl", "tm", "tn", "to", "tp", "tr", "tt", "tv", "tw", "tz", "ua", "ug", "uk", "us", "uy", "uz", "va", "vc", "ve",
"vg", "vi", "vn", "vu", "wf", "ws", "ye", "yt", "za", "zm", "zw")
val TopLevelDomainNames = Set("com", "org", "net", "int", "edu", "gov", "mil")
val None = "None"
/**
* Commonly extracted domain features.
* @param domain Domain (if any) of a url.
* @param topDomain Numerical class of domain: 2 for Intel, 1 for Alexa top domains, 0 for others.
* @param subdomain Subdomain (if any) in the url.
* @param subdomainLength Length of the subdomain. 0 if there is none.
* @param subdomainEntropy Entropy of the subdomain viewed as a distribution on its character set.
* 0 if there is no subdomain.
* @param numPeriods Number of periods + 1 in the url. (Number of sub-strings where url is split by periods.)
*/
case class DomainInfo(domain: String,
topDomain: Int,
subdomain: String,
subdomainLength: Int,
subdomainEntropy: Double,
numPeriods: Int)
/**
* Extract domain info from a url.
* @param url Incoming url.
* @param topDomainsBC Broadcast variable containing the top domains set.
* @param userDomain Domain of the spot user (example:'intel').
* @return New [[DomainInfo]] object containing extracted domain information.
*/
def extractDomainInfo(url: String, topDomainsBC: Broadcast[Set[String]], userDomain: String): DomainInfo = {
val spliturl = url.split('.')
val numParts = spliturl.length
val (domain, subdomain) = extractDomainSubdomain(url)
val subdomainLength = if (subdomain != None) {
subdomain.length
} else {
0
}
val topDomainClass = if (userDomain != "" && domain == userDomain) {
2
} else if (topDomainsBC.value contains domain) {
1
} else {
0
}
val subdomainEntropy = if (subdomain != "None") Entropy.stringEntropy(subdomain) else 0d
DomainInfo(domain, topDomainClass, subdomain, subdomainLength, subdomainEntropy, numParts)
}
/**
*
* @param url Url from which to extract domain.
* @return Domain name or "None" if there is none.
*/
def extractDomain(url: String) : String = {
val (domain, _) = extractDomainSubdomain(url)
domain
}
/**
* Extrat the domain and subdomain from a URL.
* @param url URL to be parsed.
* @return Pair of (domain, subdomain). If there is no domain, both fields contain "None".
* If there is no subdomain then the subdomain field is "None"
*/
def extractDomainSubdomain(url: String) : (String, String) = {
val spliturl = url.split('.')
val numParts = spliturl.length
var domain = None
var subdomain = None
// First check if query is an IP address e.g.: 123.103.104.10.in-addr.arpa or a name.
// Such URLs receive a domain of NO_DOMAIN
if (numParts >= 2
&& !(numParts > 2 && spliturl(numParts - 1) == "arpa" && spliturl(numParts - 2) == "in-addr")
&& (CountryCodes.contains(spliturl.last) || TopLevelDomainNames.contains(spliturl.last))) {
val strippedSplitURL = removeTopLevelDomainName(removeCountryCode(spliturl))
if (strippedSplitURL.length > 0) {
domain = strippedSplitURL.last
if (strippedSplitURL.length > 1) {
subdomain = strippedSplitURL.slice(0, strippedSplitURL.length - 1).mkString(".")
}
}
}
(domain, subdomain)
}
/**
* Strip the country code from a split URL.
* @param urlComponents Array of the entries of a URL after splitting on periods.
* @return URL components with the country code stripped.
*/
def removeCountryCode(urlComponents: Array[String]): Array[String] = {
if (CountryCodes.contains(urlComponents.last)) {
urlComponents.dropRight(1)
} else {
urlComponents
}
}
/**
* Strip the top-level domain name from a split URL.
* @param urlComponents Array of the entries ofa URL after splitting on periods.
* @return URL components with the top-level domain name stripped.
*/
def removeTopLevelDomainName(urlComponents: Array[String]): Array[String] = {
if (TopLevelDomainNames.contains(urlComponents.last)) {
urlComponents.dropRight(1)
} else {
urlComponents
}
}
}
|
bryanmontesv/incubator-spot
|
spot-ml/src/main/scala/org/apache/spot/utilities/DomainProcessor.scala
|
Scala
|
apache-2.0
| 6,927
|
package org.bfn.ninetynineprobs
object P80 {
// TODO
}
|
bfontaine/99Scala
|
src/main/scala/P80.scala
|
Scala
|
mit
| 60
|
package monocle.std
import cats.data.NonEmptyVector
import monocle.MonocleSuite
import monocle.law.discipline.{IsoTests, PrismTests}
import monocle.law.discipline.function._
import scala.annotation.nowarn
class NonEmptyVectorSpec extends MonocleSuite {
import cats.laws.discipline.arbitrary._
checkAll("nevToAndOne", IsoTests(nevToOneAnd[Int]))
checkAll("optNevToVector", IsoTests(optNevToVector[Int]))
checkAll("vectorToNev", PrismTests(vectorToNev[Int]))
checkAll("each NonEmptyVector", EachTests[NonEmptyVector[Int], Int])
checkAll("index NonEmptyVector", IndexTests[NonEmptyVector[Int], Int, Int])
checkAll("filterIndex NonEmptyVector", FilterIndexTests[NonEmptyVector[Int], Int, Int])
checkAll("reverse NonEmptyVector", ReverseTests[NonEmptyVector[Int]]): @nowarn
checkAll("cons1 NonEmptyVector", Cons1Tests[NonEmptyVector[Int], Int, Vector[Int]]): @nowarn
checkAll("snoc1 NonEmptyVector", Snoc1Tests[NonEmptyVector[Int], Vector[Int], Int]): @nowarn
}
|
julien-truffaut/Monocle
|
test/shared/src/test/scala/monocle/std/NonEmptyVectorSpec.scala
|
Scala
|
mit
| 982
|
package com.github.before.uadetector
import scala.concurrent.duration.Duration
import scala.io.Codec
import com.github.before.uadetector.Time.measure
import com.github.before.uadetector.datasource.Data
import com.github.before.uadetector.datasource.Entry
import com.github.before.uadetector.datasource.IniFormat.Comment
import scalaz.stream.io
object ShowDataStatistics extends App {
private def printDuration(d: Duration): Unit = {
println(s"INI loaded in ${d.toMillis} milliseconds")
}
val version = "20140609-03"
val resource = getClass().getClassLoader().getResource(s"uas_${version}.ini")
var data: Data = null
for (i <- 0 to 100) {
val readLines = io.linesR(resource.getFile)(Codec.UTF8)
val load = task.loadData(readLines)
data = measure(load.runLastOr(Map()).run, printDuration)
}
// val data = measure(Data.load(readLines).run, printDuration)
def print(data: Data) {
def comments(e: Entry): Boolean = e.isInstanceOf[Comment]
val filtered = (for {
entrySet <- data
} yield (entrySet._1 -> entrySet._2.filterNot(comments(_))))
val entriesSizeOfGroups = (for {
entrySet <- filtered
} yield s" ${entrySet._1} : ${entrySet._2.size}").toSeq.sorted.mkString("\n")
println(entriesSizeOfGroups)
}
print(data)
}
|
before/uadetector-scala
|
src/test/scala/com/github/before/uadetector/ShowDataStatistics.scala
|
Scala
|
apache-2.0
| 1,301
|
package chana
import java.lang.ref.SoftReference
import java.text.DateFormat
import java.text.SimpleDateFormat
import java.util.Calendar
import java.util.TimeZone
/**
* @author Caoyuan Deng
*/
package object util {
private val defaultDfPattern = "yyyy-MM-dd HH:mm:ss"
private val dfTl = new ThreadLocal[SoftReference[DateFormat]]()
def dateFormatOf(tz: TimeZone = TimeZone.getDefault, pattern: String = defaultDfPattern): DateFormat = {
val ref = dfTl.get
if (ref != null) {
val instance = ref.get
if (instance != null) {
instance.setTimeZone(tz)
instance.asInstanceOf[SimpleDateFormat].applyPattern(pattern)
return instance
}
}
val instance = new SimpleDateFormat(pattern)
instance.setTimeZone(tz)
dfTl.set(new SoftReference[DateFormat](instance))
instance
}
private val calTl = new ThreadLocal[SoftReference[Calendar]]()
def calendarOf(tz: TimeZone = TimeZone.getDefault): Calendar = {
val ref = calTl.get
if (ref != null) {
val instance = ref.get
if (instance != null) {
instance.setTimeZone(tz)
return instance
}
}
val instance = Calendar.getInstance(tz)
calTl.set(new SoftReference[Calendar](instance))
instance
}
def formatTime(long: Long, tz: TimeZone = TimeZone.getDefault, pattern: String = defaultDfPattern): String = {
val cal = calendarOf(tz)
cal.setTimeInMillis(long)
dateFormatOf(tz, pattern).format(cal.getTime)
}
}
|
matthewtt/chana
|
src/main/scala/chana/util/util.scala
|
Scala
|
apache-2.0
| 1,498
|
package utils
import java.time.LocalDateTime
/**
* Created by henrik on 2017-02-24.
*/
trait Timestamp {
def timeOfCreation: LocalDateTime
}
|
hnrklssn/game-check-match
|
app/utils/Timestamp.scala
|
Scala
|
apache-2.0
| 147
|
package spark.broadcast
import com.ning.compress.lzf.{LZFInputStream, LZFOutputStream}
import java.io._
import java.net._
import java.util.UUID
import it.unimi.dsi.fastutil.io.FastBufferedInputStream
import it.unimi.dsi.fastutil.io.FastBufferedOutputStream
import spark._
import spark.storage.StorageLevel
private[spark] class HttpBroadcast[T](@transient var value_ : T, isLocal: Boolean, id: Long)
extends Broadcast[T](id) with Logging with Serializable {
def value = value_
def blockId: String = "broadcast_" + id
HttpBroadcast.synchronized {
SparkEnv.get.blockManager.putSingle(blockId, value_, StorageLevel.MEMORY_AND_DISK, false)
}
if (!isLocal) {
HttpBroadcast.write(id, value_)
}
// Called by JVM when deserializing an object
private def readObject(in: ObjectInputStream) {
in.defaultReadObject()
HttpBroadcast.synchronized {
SparkEnv.get.blockManager.getSingle(blockId) match {
case Some(x) => value_ = x.asInstanceOf[T]
case None => {
logInfo("Started reading broadcast variable " + id)
val start = System.nanoTime
value_ = HttpBroadcast.read[T](id)
SparkEnv.get.blockManager.putSingle(blockId, value_, StorageLevel.MEMORY_AND_DISK, false)
val time = (System.nanoTime - start) / 1e9
logInfo("Reading broadcast variable " + id + " took " + time + " s")
}
}
}
}
}
private[spark] class HttpBroadcastFactory extends BroadcastFactory {
def initialize(isMaster: Boolean) { HttpBroadcast.initialize(isMaster) }
def newBroadcast[T](value_ : T, isLocal: Boolean, id: Long) =
new HttpBroadcast[T](value_, isLocal, id)
def stop() { HttpBroadcast.stop() }
}
private object HttpBroadcast extends Logging {
private var initialized = false
private var broadcastDir: File = null
private var compress: Boolean = false
private var bufferSize: Int = 65536
private var serverUri: String = null
private var server: HttpServer = null
def initialize(isMaster: Boolean) {
synchronized {
if (!initialized) {
bufferSize = System.getProperty("spark.buffer.size", "65536").toInt
compress = System.getProperty("spark.broadcast.compress", "true").toBoolean
if (isMaster) {
createServer()
}
serverUri = System.getProperty("spark.httpBroadcast.uri")
initialized = true
}
}
}
def stop() {
synchronized {
if (server != null) {
server.stop()
server = null
}
initialized = false
}
}
private def createServer() {
broadcastDir = Utils.createTempDir()
server = new HttpServer(broadcastDir)
server.start()
serverUri = server.uri
System.setProperty("spark.httpBroadcast.uri", serverUri)
logInfo("Broadcast server started at " + serverUri)
}
def write(id: Long, value: Any) {
val file = new File(broadcastDir, "broadcast-" + id)
val out: OutputStream = if (compress) {
new LZFOutputStream(new FileOutputStream(file)) // Does its own buffering
} else {
new FastBufferedOutputStream(new FileOutputStream(file), bufferSize)
}
val ser = SparkEnv.get.serializer.newInstance()
val serOut = ser.serializeStream(out)
serOut.writeObject(value)
serOut.close()
}
def read[T](id: Long): T = {
val url = serverUri + "/broadcast-" + id
var in = if (compress) {
new LZFInputStream(new URL(url).openStream()) // Does its own buffering
} else {
new FastBufferedInputStream(new URL(url).openStream(), bufferSize)
}
val ser = SparkEnv.get.serializer.newInstance()
val serIn = ser.deserializeStream(in)
val obj = serIn.readObject[T]()
serIn.close()
obj
}
}
|
joeywen/spark_cpp_api
|
core/src/main/scala/spark/broadcast/HttpBroadcast.scala
|
Scala
|
bsd-3-clause
| 3,741
|
package lila.lobby
import akka.actor._
import com.typesafe.config.Config
import lila.common.PimpedConfig._
final class Env(
config: Config,
db: lila.db.Env,
hub: lila.hub.Env,
onStart: String => Unit,
blocking: String => Fu[Set[String]],
playban: String => Fu[Option[lila.playban.TempBan]],
gameCache: lila.game.Cached,
poolApi: lila.pool.PoolApi,
system: ActorSystem,
scheduler: lila.common.Scheduler) {
private val settings = new {
val NetDomain = config getString "net.domain"
val SocketName = config getString "socket.name"
val SocketUidTtl = config duration "socket.uid.ttl"
val ActorName = config getString "actor.name"
val BroomPeriod = config duration "broom_period"
val ResyncIdsPeriod = config duration "resync_ids_period"
val CollectionSeek = config getString "collection.seek"
val CollectionSeekArchive = config getString "collection.seek_archive"
val SeekMaxPerPage = config getInt "seek.max_per_page"
val SeekMaxPerUser = config getInt "seek.max_per_user"
val MaxPlaying = config getInt "max_playing"
}
import settings._
private val socket = system.actorOf(Props(new Socket(
uidTtl = SocketUidTtl)), name = SocketName)
lazy val seekApi = new SeekApi(
coll = db(CollectionSeek),
archiveColl = db(CollectionSeekArchive),
blocking = blocking,
maxPerPage = SeekMaxPerPage,
maxPerUser = SeekMaxPerUser)
val lobby = Lobby.start(system, ActorName,
broomPeriod = BroomPeriod,
resyncIdsPeriod = ResyncIdsPeriod) {
new Lobby(
socket = socket,
seekApi = seekApi,
gameCache = gameCache,
maxPlaying = MaxPlaying,
blocking = blocking,
playban = playban,
poolApi = poolApi,
onStart = onStart)
}
lazy val socketHandler = new SocketHandler(
hub = hub,
lobby = lobby,
socket = socket,
poolApi = poolApi,
blocking = blocking)
private val abortListener = new AbortListener(seekApi = seekApi)
system.lilaBus.subscribe(system.actorOf(Props(new Actor {
def receive = {
case lila.game.actorApi.AbortedBy(pov) => abortListener(pov)
}
})), 'abortGame)
}
object Env {
lazy val current = "lobby" boot new Env(
config = lila.common.PlayApp loadConfig "lobby",
db = lila.db.Env.current,
hub = lila.hub.Env.current,
onStart = lila.game.Env.current.onStart,
blocking = lila.relation.Env.current.api.fetchBlocking,
playban = lila.playban.Env.current.api.currentBan _,
gameCache = lila.game.Env.current.cached,
poolApi = lila.pool.Env.current.api,
system = lila.common.PlayApp.system,
scheduler = lila.common.PlayApp.scheduler)
}
|
clarkerubber/lila
|
modules/lobby/src/main/Env.scala
|
Scala
|
agpl-3.0
| 2,708
|
package com.typesafe.slick.testkit.util
import java.io._
import java.net.{URL, URLClassLoader}
import java.sql.Driver
import java.util.Properties
import java.util.zip.GZIPInputStream
import scala.collection.mutable
import scala.slick.jdbc.{StaticQuery => Q, ResultSetInvoker}
import scala.slick.jdbc.GetResult._
import scala.slick.driver._
import scala.slick.profile.{SqlDriver, RelationalDriver, BasicDriver, Capability}
import org.junit.Assert
object TestDB {
/** Marks a driver which is specially supported by the test kit for plain SQL queries */
val plainSql = new Capability("test.plainSql")
/** Marks a driver which is specially supported by the test kit for plain SQL wide result set queries */
val plainSqlWide = new Capability("test.plainSqlWide")
val testDBDir = dbProps.getProperty("testDir", "test-dbs")
def testDBPath = {
val f = new File(testDBDir)
val s = f.getPath().replace('\\', '/')
if(f.isAbsolute) s else "./" + s
}
private lazy val dbProps = {
val p = new Properties
val f = new File(sys.props.getOrElse("slick.testkit.dbprops", "test-dbs/databases.properties"))
if(f.isFile) {
val in = new FileInputStream(f)
try { p.load(in) } finally { in.close() }
}
p
}
private lazy val testDBs = Option(dbProps.getProperty("testDBs")).map(_.split(',').map(_.trim).toSet)
def isInternalEnabled(db: String) = testDBs.map(_.contains(db)).getOrElse(true)
def isExternalEnabled(db: String) = isInternalEnabled(db) && "true" == dbProps.getProperty(db+".enabled")
def get(db: String, o: String) = Option(dbProps.getProperty(db+"."+o))
def getMulti(db: String, key: String): Seq[String] = get(db, key) match {
case Some(s) => Seq(s)
case None =>
Iterator.from(1).map(i => get(db, key+"."+i)).takeWhile(_.isDefined).toSeq.flatten
}
/** Copy a file, expanding it if the source name ends with .gz */
def copy(src: File, dest: File) {
dest.createNewFile()
val out = new FileOutputStream(dest)
try {
var in: InputStream = new FileInputStream(src)
try {
if(src.getName.endsWith(".gz")) in = new GZIPInputStream(in)
val buf = new Array[Byte](4096)
var cont = true
while(cont) {
val len = in.read(buf)
if(len < 0) cont = false
else out.write(buf, 0, len)
}
} finally in.close()
} finally out.close()
}
/** Delete files in the testDB directory */
def deleteDBFiles(prefix: String) {
assert(!prefix.isEmpty, "prefix must not be empty")
def deleteRec(f: File): Boolean = {
if(f.isDirectory()) f.listFiles.forall(deleteRec _) && f.delete()
else f.delete()
}
val dir = new File(TestDB.testDBDir)
if(!dir.isDirectory) throw new IOException("Directory "+TestDB.testDBDir+" not found")
for(f <- dir.listFiles if f.getName startsWith prefix) {
val p = TestDB.testDBDir+"/"+f.getName
if(deleteRec(f)) println("[Deleted database file "+p+"]")
else throw new IOException("Couldn't delete database file "+p)
}
}
def mapToProps(m: Map[String, String]) = {
val p = new Properties
if(m ne null)
for((k,v) <- m) if(k.ne(null) && v.ne(null)) p.setProperty(k, v)
p
}
}
/**
* Describes a database against which you can run TestKit tests. It includes
* features such as reading the configuration file, setting up a DB connection,
* removing DB files left over by a test run, etc.
*/
trait TestDB {
type Driver <: BasicDriver
/** The test database name */
val confName: String
/** Check if this test database is enabled */
def isEnabled = TestDB.isInternalEnabled(confName)
/** This method is called to clean up before running all tests. */
def cleanUpBefore() {}
/** This method is called to clean up after running all tests. It
* defaults to cleanUpBefore(). */
def cleanUpAfter() = cleanUpBefore()
/** The Slick driver for the database */
val driver: Driver
/** The Slick driver for the database */
lazy val profile: driver.profile.type = driver.asInstanceOf[driver.profile.type]
/** Indicates whether the database persists after closing the last connection */
def isPersistent = true
/** This method is called between individual test methods to remove all
* database artifacts that were created by the test. */
def dropUserArtifacts(implicit session: profile.Backend#Session): Unit
/** Create the Database object for this test database configuration */
def createDB(): profile.Backend#Database
/** Indicates whether the database's sessions have shared state. When a
* database is shared but not persistent, Testkit keeps a session open
* to make it persistent. */
def isShared = true
/** The capabilities of the Slick driver, possibly modified for this
* test configuration. */
def capabilities: Set[Capability] = profile.capabilities
}
trait RelationalTestDB extends TestDB {
type Driver <: RelationalDriver
def assertTablesExist(tables: String*)(implicit session: profile.Backend#Session): Unit
def assertNotTablesExist(tables: String*)(implicit session: profile.Backend#Session): Unit
}
trait SqlTestDB extends RelationalTestDB { type Driver <: SqlDriver }
abstract class JdbcTestDB(val confName: String) extends SqlTestDB {
type Driver <: JdbcDriver
lazy val database = profile.backend.Database
val url: String
override def toString = url
val jdbcDriver: String
def createDB(): profile.Backend#Database = database.forURL(url, driver = jdbcDriver)
def getLocalTables(implicit session: profile.Backend#Session) = {
val tables = ResultSetInvoker[(String,String,String, String)](_.conn.getMetaData().getTables("", "", null, null))
tables.list.filter(_._4.toUpperCase == "TABLE").map(_._3).sorted
}
def getLocalSequences(implicit session: profile.Backend#Session) = {
val tables = ResultSetInvoker[(String,String,String, String)](_.conn.getMetaData().getTables("", "", null, null))
tables.list.filter(_._4.toUpperCase == "SEQUENCE").map(_._3).sorted
}
def dropUserArtifacts(implicit session: profile.Backend#Session) = {
for(t <- getLocalTables)
(Q.u+"drop table if exists "+driver.quoteIdentifier(t)+" cascade").execute
for(t <- getLocalSequences)
(Q.u+"drop sequence if exists "+driver.quoteIdentifier(t)+" cascade").execute
}
def assertTablesExist(tables: String*)(implicit session: profile.Backend#Session) {
for(t <- tables) {
try ((Q[Int]+"select 1 from "+driver.quoteIdentifier(t)+" where 1 < 0").list) catch { case _: Exception =>
Assert.fail("Table "+t+" should exist")
}
}
}
def assertNotTablesExist(tables: String*)(implicit session: profile.Backend#Session) {
for(t <- tables) {
try {
(Q[Int]+"select 1 from "+driver.quoteIdentifier(t)+" where 1 < 0").list
Assert.fail("Table "+t+" should not exist")
} catch { case _: Exception => }
}
}
def canGetLocalTables = true
}
abstract class ExternalJdbcTestDB(confName: String) extends JdbcTestDB(confName) {
val jdbcDriver = TestDB.get(confName, "driver").orNull
val urlTemplate = TestDB.get(confName, "url").getOrElse("")
val dbPath = TestDB.get(confName, "dir").getOrElse(new File(TestDB.testDBDir).getAbsolutePath)
val dbName = TestDB.get(confName, "testDB").getOrElse("").replace("[DBPATH]", dbPath)
val password = TestDB.get(confName, "password").orNull
val user = TestDB.get(confName, "user").orNull
val adminUser = TestDB.get(confName, "adminUser").getOrElse(user)
val adminPassword = TestDB.get(confName, "adminPassword").getOrElse(password)
lazy val url = replaceVars(urlTemplate)
lazy val adminDB = TestDB.get(confName, "adminDB").getOrElse("").replace("[DBPATH]", dbPath)
lazy val adminDBURL = replaceVars(urlTemplate.replace("[DB]", adminDB))
lazy val create = TestDB.getMulti(confName, "create").map(replaceVars)
lazy val postCreate = TestDB.getMulti(confName, "postCreate").map(replaceVars)
lazy val drop = TestDB.getMulti(confName, "drop").map(replaceVars)
def replaceVars(s: String): String =
s.replace("[DB]", dbName).replace("[DBPATH]", dbPath).
replace("[USER]", user).replace("[PASSWORD]", password)
override def isEnabled = TestDB.isExternalEnabled(confName)
def databaseFor(url: String, user: String, password: String, prop: Map[String, String] = null) = loadCustomDriver() match {
case Some(dr) => database.forDriver(dr, url, user = user, password = password, prop = TestDB.mapToProps(prop))
case None => database.forURL(url, user = user, password = password, driver = jdbcDriver, prop = TestDB.mapToProps(prop))
}
override def createDB() = databaseFor(url, user, password)
override def cleanUpBefore() {
if(!drop.isEmpty || !create.isEmpty) {
println("[Creating test database "+this+"]")
databaseFor(adminDBURL, adminUser, adminPassword) withSession { implicit session =>
for(s <- drop) (Q.u + s).execute
for(s <- create) (Q.u + s).execute
}
}
if(!postCreate.isEmpty) {
createDB() withSession { implicit session =>
for(s <- postCreate) (Q.u + s).execute
}
}
}
override def cleanUpAfter() {
if(!drop.isEmpty) {
println("[Dropping test database "+this+"]")
databaseFor(adminDBURL, adminUser, adminPassword) withSession { implicit session =>
for(s <- drop) (Q.u + s).execute
}
}
}
def loadCustomDriver() = TestDB.get(confName, "driverJar").map { jar =>
ExternalTestDB.getCustomDriver(jar, jdbcDriver)
}
}
object ExternalTestDB {
// A cache for custom drivers to avoid excessive reloading and memory leaks
private[this] val driverCache = new mutable.HashMap[(String, String), Driver]()
def getCustomDriver(url: String, driverClass: String): Driver = synchronized {
driverCache.getOrElseUpdate((url, driverClass),
new URLClassLoader(Array(new URL(url)), getClass.getClassLoader).loadClass(driverClass).newInstance.asInstanceOf[Driver]
)
}
}
|
dvinokurov/slick
|
slick-testkit/src/main/scala/com/typesafe/slick/testkit/util/TestDB.scala
|
Scala
|
bsd-2-clause
| 10,020
|
import java.io._
import scala.io._
import scala.util._
/** The SuccessChecker checks the success of students solving their
* generated problems. Running this application will produce output
* that looks like this:
* {{{
* X 12345678 MIN 443868 238719 238719 0 0
* X 87654321 FAC 8 40320 0 0
* X 19283746 IDX 891006 5 6 0 0
* G 11111111 MAX 187398 238719 238719 0 238719
* X 22222222 REV 788759 957887 0 0
* X 33333333 MAX 560634 238719 560634 0 0
* X 44444444 SUM 489994 238719 728713 0 0
* X 55555555 REV 820812 218028 0 0
* X 66666666 MUL 86 42 3612 0 0
* X 77777777 MAX 58607 238719 238719 0 0
* X 88888888 IDX 247635 1 4 0 0
* X 99999999 MUL 76 42 3192 0 0
* }}}
*
* The format is:
*
* STATUS SPIREID PROBLEM... FAILURES SOLUTION
*
* STATUS : Will be one of X (not successful) or G (success)
* SPIREID : Student's 8-digit spire ID
* PROBLEM...: The original problem to solve
* FAILURES : The number of failed attempts
* SOLUTION : The submitted solution from the student
*/
object SuccessChecker {
/** readStudents reads the `file` of 8-digit student IDs and returns
* the list of student IDs as an Option.
*/
def readStudents(file: String): Option[List[String]] = {
try {
val ids =
Source.fromFile(file)
.getLines
Some(ids.toList)
}
catch {
case _: Throwable => None
}
}
/** Read a single line from the file given the filename.
*/
def getLine(file: String): Option[String] = {
val f = new File(file)
if (f.exists)
Some(Source.fromFile(file).getLines().next())
else
Some("0")
}
/** Main function for running the checker. Easy enough to do from sbt:
*
* > run-main SuccessChecker students.csv
*
*/
def main(args: Array[String]) = {
if (args.length != 1) {
println("usage: scala SuccessChecker file")
sys.exit(1)
}
// Read the file containing the student IDs:
readStudents(args(0)) match {
case Some(ids) =>
// Iterate over the IDs reading the first line in each of the
// files associated with that student ID:
for (id <- ids) {
val recv = getLine(s"assets/$id.recv")
val send = getLine(s"assets/$id.send")
val fail = getLine(s"assets/$id.fail")
val succ = getLine(s"assets/$id.succ")
(send, recv, fail, succ) match {
case (Some(a), Some(b), Some(c), Some(d)) =>
// d will be 0 if the file did not exist (problem not solved)
if (d == "0")
print("X ")
else
print("G ")
println(s"$id $a $b $c $d")
case _ =>
println(s"Problem for $id.")
}
}
case None => println(s"could not read ids from ${args(0)}")
}
}
}
|
umass-cs-220/week-11-parallel
|
code/server/src/main/scala/SuccessChecker.scala
|
Scala
|
apache-2.0
| 2,897
|
package ch.uzh.ifi.pdeboer.pplib.process.recombination
import ch.uzh.ifi.pdeboer.pplib.process.entities._
import ch.uzh.ifi.pdeboer.pplib.util.{ProcessPrinter, TestUtils}
import org.junit.{Assert, Test}
import scala.reflect.io.File
/**
* Created by pdeboer on 04/05/15.
*/
class TextShorteningRecombinationTest {
@Test
def generateTextShorteningRecombinations: Unit = {
TestUtils.ensureThereIsAtLeast1Portal()
val toStore = <Data>
{candidates.map(c => {
new ProcessPrinter(c, Some(Nil)).lines
})}
</Data>
File("test.xml").writeAll(toStore + "")
Assert.assertEquals("We should have 31 recombinations", 31, candidates.size)
}
lazy val candidates = {
val r = new TypeRecombinator(RecombinationHints.create(TypeRecombinatorTest.DEFAULT_TESTING_HINTS))
r.materialize[CreateProcess[_ <: List[Patch], _ <: List[Patch]]]
}
}
|
uzh/PPLib
|
src/test/scala/ch/uzh/ifi/pdeboer/pplib/process/recombination/TextShorteningRecombinationTest.scala
|
Scala
|
mit
| 854
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.util.Properties
import scala.collection.JavaConverters._
import scala.collection.Map
import org.json4s.JsonAST.{JArray, JInt, JString, JValue}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.scalatest.Assertions
import org.scalatest.exceptions.TestFailedException
import org.apache.spark._
import org.apache.spark.executor._
import org.apache.spark.metrics.ExecutorMetricType
import org.apache.spark.rdd.RDDOperationScope
import org.apache.spark.resource._
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.shuffle.MetadataFetchFailedException
import org.apache.spark.storage._
class JsonProtocolSuite extends SparkFunSuite {
import JsonProtocolSuite._
test("SparkListenerEvent") {
val stageSubmitted =
SparkListenerStageSubmitted(makeStageInfo(100, 200, 300, 400L, 500L), properties)
val stageCompleted = SparkListenerStageCompleted(makeStageInfo(101, 201, 301, 401L, 501L))
val taskStart = SparkListenerTaskStart(111, 0, makeTaskInfo(222L, 333, 1, 444L, false))
val taskGettingResult =
SparkListenerTaskGettingResult(makeTaskInfo(1000L, 2000, 5, 3000L, true))
val taskEnd = SparkListenerTaskEnd(1, 0, "ShuffleMapTask", Success,
makeTaskInfo(123L, 234, 67, 345L, false),
new ExecutorMetrics(Array(543L, 123456L, 12345L, 1234L, 123L, 12L, 432L,
321L, 654L, 765L, 256912L, 123456L, 123456L, 61728L, 30364L, 15182L)),
makeTaskMetrics(300L, 400L, 500L, 600L, 700, 800, hasHadoopInput = false, hasOutput = false))
val taskEndWithHadoopInput = SparkListenerTaskEnd(1, 0, "ShuffleMapTask", Success,
makeTaskInfo(123L, 234, 67, 345L, false),
new ExecutorMetrics(Array(543L, 123456L, 12345L, 1234L, 123L, 12L, 432L,
321L, 654L, 765L, 256912L, 123456L, 123456L, 61728L, 30364L, 15182L)),
makeTaskMetrics(300L, 400L, 500L, 600L, 700, 800, hasHadoopInput = true, hasOutput = false))
val taskEndWithOutput = SparkListenerTaskEnd(1, 0, "ResultTask", Success,
makeTaskInfo(123L, 234, 67, 345L, false),
new ExecutorMetrics(Array(543L, 123456L, 12345L, 1234L, 123L, 12L, 432L,
321L, 654L, 765L, 256912L, 123456L, 123456L, 61728L, 30364L, 15182L)),
makeTaskMetrics(300L, 400L, 500L, 600L, 700, 800, hasHadoopInput = true, hasOutput = true))
val jobStart = {
val stageIds = Seq[Int](1, 2, 3, 4)
val stageInfos = stageIds.map(x =>
makeStageInfo(x, x * 200, x * 300, x * 400L, x * 500L))
SparkListenerJobStart(10, jobSubmissionTime, stageInfos, properties)
}
val jobEnd = SparkListenerJobEnd(20, jobCompletionTime, JobSucceeded)
val environmentUpdate = SparkListenerEnvironmentUpdate(Map[String, Seq[(String, String)]](
"JVM Information" -> Seq(("GC speed", "9999 objects/s"), ("Java home", "Land of coffee")),
"Spark Properties" -> Seq(("Job throughput", "80000 jobs/s, regardless of job type")),
"Hadoop Properties" -> Seq(("hadoop.tmp.dir", "/usr/local/hadoop/tmp")),
"System Properties" -> Seq(("Username", "guest"), ("Password", "guest")),
"Classpath Entries" -> Seq(("Super library", "/tmp/super_library"))
))
val blockManagerAdded = SparkListenerBlockManagerAdded(1L,
BlockManagerId("Stars", "In your multitude...", 300), 500)
val blockManagerRemoved = SparkListenerBlockManagerRemoved(2L,
BlockManagerId("Scarce", "to be counted...", 100))
val unpersistRdd = SparkListenerUnpersistRDD(12345)
val logUrlMap = Map("stderr" -> "mystderr", "stdout" -> "mystdout").toMap
val attributes = Map("ContainerId" -> "ct1", "User" -> "spark").toMap
val resources = Map(ResourceUtils.GPU ->
new ResourceInformation(ResourceUtils.GPU, Array("0", "1")))
val applicationStart = SparkListenerApplicationStart("The winner of all", Some("appId"),
42L, "Garfield", Some("appAttempt"))
val applicationStartWithLogs = SparkListenerApplicationStart("The winner of all", Some("appId"),
42L, "Garfield", Some("appAttempt"), Some(logUrlMap))
val applicationEnd = SparkListenerApplicationEnd(42L)
val executorAdded = SparkListenerExecutorAdded(executorAddedTime, "exec1",
new ExecutorInfo("Hostee.awesome.com", 11, logUrlMap, attributes, resources.toMap, 4))
val executorRemoved = SparkListenerExecutorRemoved(executorRemovedTime, "exec2", "test reason")
val executorBlacklisted = SparkListenerExecutorBlacklisted(executorBlacklistedTime, "exec1", 22)
val executorUnblacklisted =
SparkListenerExecutorUnblacklisted(executorUnblacklistedTime, "exec1")
val nodeBlacklisted = SparkListenerNodeBlacklisted(nodeBlacklistedTime, "node1", 33)
val nodeUnblacklisted =
SparkListenerNodeUnblacklisted(nodeUnblacklistedTime, "node1")
val executorMetricsUpdate = {
// Use custom accum ID for determinism
val accumUpdates =
makeTaskMetrics(300L, 400L, 500L, 600L, 700, 800, hasHadoopInput = true, hasOutput = true)
.accumulators().map(AccumulatorSuite.makeInfo)
.zipWithIndex.map { case (a, i) => a.copy(id = i) }
val executorUpdates = new ExecutorMetrics(
Array(543L, 123456L, 12345L, 1234L, 123L, 12L, 432L,
321L, 654L, 765L, 256912L, 123456L, 123456L, 61728L, 30364L, 15182L, 10L, 90L, 2L, 20L))
SparkListenerExecutorMetricsUpdate("exec3", Seq((1L, 2, 3, accumUpdates)),
Map((0, 0) -> executorUpdates))
}
val blockUpdated =
SparkListenerBlockUpdated(BlockUpdatedInfo(BlockManagerId("Stars",
"In your multitude...", 300), RDDBlockId(0, 0), StorageLevel.MEMORY_ONLY, 100L, 0L))
val stageExecutorMetrics =
SparkListenerStageExecutorMetrics("1", 2, 3,
new ExecutorMetrics(Array(543L, 123456L, 12345L, 1234L, 123L, 12L, 432L,
321L, 654L, 765L, 256912L, 123456L, 123456L, 61728L, 30364L, 15182L, 10L, 90L, 2L, 20L)))
val rprofBuilder = new ResourceProfileBuilder()
val taskReq = new TaskResourceRequests().cpus(1).resource("gpu", 1)
val execReq =
new ExecutorResourceRequests().cores(2).resource("gpu", 2, "myscript")
rprofBuilder.require(taskReq).require(execReq)
val resourceProfile = rprofBuilder.build
resourceProfile.setResourceProfileId(21)
val resourceProfileAdded = SparkListenerResourceProfileAdded(resourceProfile)
testEvent(stageSubmitted, stageSubmittedJsonString)
testEvent(stageCompleted, stageCompletedJsonString)
testEvent(taskStart, taskStartJsonString)
testEvent(taskGettingResult, taskGettingResultJsonString)
testEvent(taskEnd, taskEndJsonString)
testEvent(taskEndWithHadoopInput, taskEndWithHadoopInputJsonString)
testEvent(taskEndWithOutput, taskEndWithOutputJsonString)
testEvent(jobStart, jobStartJsonString)
testEvent(jobEnd, jobEndJsonString)
testEvent(environmentUpdate, environmentUpdateJsonString)
testEvent(blockManagerAdded, blockManagerAddedJsonString)
testEvent(blockManagerRemoved, blockManagerRemovedJsonString)
testEvent(unpersistRdd, unpersistRDDJsonString)
testEvent(applicationStart, applicationStartJsonString)
testEvent(applicationStartWithLogs, applicationStartJsonWithLogUrlsString)
testEvent(applicationEnd, applicationEndJsonString)
testEvent(executorAdded, executorAddedJsonString)
testEvent(executorRemoved, executorRemovedJsonString)
testEvent(executorBlacklisted, executorBlacklistedJsonString)
testEvent(executorUnblacklisted, executorUnblacklistedJsonString)
testEvent(nodeBlacklisted, nodeBlacklistedJsonString)
testEvent(nodeUnblacklisted, nodeUnblacklistedJsonString)
testEvent(executorMetricsUpdate, executorMetricsUpdateJsonString)
testEvent(blockUpdated, blockUpdatedJsonString)
testEvent(stageExecutorMetrics, stageExecutorMetricsJsonString)
testEvent(resourceProfileAdded, resourceProfileJsonString)
}
test("Dependent Classes") {
val logUrlMap = Map("stderr" -> "mystderr", "stdout" -> "mystdout").toMap
val attributes = Map("ContainerId" -> "ct1", "User" -> "spark").toMap
testRDDInfo(makeRddInfo(2, 3, 4, 5L, 6L))
testStageInfo(makeStageInfo(10, 20, 30, 40L, 50L))
testTaskInfo(makeTaskInfo(999L, 888, 55, 777L, false))
testTaskMetrics(makeTaskMetrics(
33333L, 44444L, 55555L, 66666L, 7, 8, hasHadoopInput = false, hasOutput = false))
testBlockManagerId(BlockManagerId("Hong", "Kong", 500))
testExecutorInfo(new ExecutorInfo("host", 43, logUrlMap, attributes))
// StorageLevel
testStorageLevel(StorageLevel.NONE)
testStorageLevel(StorageLevel.DISK_ONLY)
testStorageLevel(StorageLevel.DISK_ONLY_2)
testStorageLevel(StorageLevel.DISK_ONLY_3)
testStorageLevel(StorageLevel.MEMORY_ONLY)
testStorageLevel(StorageLevel.MEMORY_ONLY_2)
testStorageLevel(StorageLevel.MEMORY_ONLY_SER)
testStorageLevel(StorageLevel.MEMORY_ONLY_SER_2)
testStorageLevel(StorageLevel.MEMORY_AND_DISK)
testStorageLevel(StorageLevel.MEMORY_AND_DISK_2)
testStorageLevel(StorageLevel.MEMORY_AND_DISK_SER)
testStorageLevel(StorageLevel.MEMORY_AND_DISK_SER_2)
// JobResult
val exception = new Exception("Out of Memory! Please restock film.")
exception.setStackTrace(stackTrace)
val jobFailed = JobFailed(exception)
testJobResult(JobSucceeded)
testJobResult(jobFailed)
// TaskEndReason
val fetchFailed = FetchFailed(BlockManagerId("With or", "without you", 15), 17, 16L, 18, 19,
"Some exception")
val fetchMetadataFailed = new MetadataFetchFailedException(17,
19, "metadata Fetch failed exception").toTaskFailedReason
val exceptionFailure = new ExceptionFailure(exception, Seq.empty[AccumulableInfo])
testTaskEndReason(Success)
testTaskEndReason(Resubmitted)
testTaskEndReason(fetchFailed)
testTaskEndReason(fetchMetadataFailed)
testTaskEndReason(exceptionFailure)
testTaskEndReason(TaskResultLost)
testTaskEndReason(TaskKilled("test"))
testTaskEndReason(TaskCommitDenied(2, 3, 4))
testTaskEndReason(ExecutorLostFailure("100", true, Some("Induced failure")))
testTaskEndReason(UnknownReason)
// BlockId
testBlockId(RDDBlockId(1, 2))
testBlockId(ShuffleBlockId(1, 2, 3))
testBlockId(BroadcastBlockId(1L, "insert_words_of_wisdom_here"))
testBlockId(TaskResultBlockId(1L))
testBlockId(StreamBlockId(1, 2L))
}
/* ============================== *
| Backward compatibility tests |
* ============================== */
test("ExceptionFailure backward compatibility: full stack trace") {
val exceptionFailure = ExceptionFailure("To be", "or not to be", stackTrace, null, None)
val oldEvent = JsonProtocol.taskEndReasonToJson(exceptionFailure)
.removeField({ _._1 == "Full Stack Trace" })
assertEquals(exceptionFailure, JsonProtocol.taskEndReasonFromJson(oldEvent))
}
test("StageInfo backward compatibility (details, accumulables)") {
val info = makeStageInfo(1, 2, 3, 4L, 5L)
val newJson = JsonProtocol.stageInfoToJson(info)
// Fields added after 1.0.0.
assert(info.details.nonEmpty)
assert(info.accumulables.nonEmpty)
val oldJson = newJson
.removeField { case (field, _) => field == "Details" }
.removeField { case (field, _) => field == "Accumulables" }
val newInfo = JsonProtocol.stageInfoFromJson(oldJson)
assert(info.name === newInfo.name)
assert("" === newInfo.details)
assert(0 === newInfo.accumulables.size)
}
test("StageInfo resourceProfileId") {
val info = makeStageInfo(1, 2, 3, 4L, 5L, 5)
val json = JsonProtocol.stageInfoToJson(info)
// Fields added after 1.0.0.
assert(info.details.nonEmpty)
assert(info.resourceProfileId === 5)
val newInfo = JsonProtocol.stageInfoFromJson(json)
assert(info.name === newInfo.name)
assert(5 === newInfo.resourceProfileId)
}
test("InputMetrics backward compatibility") {
// InputMetrics were added after 1.0.1.
val metrics = makeTaskMetrics(1L, 2L, 3L, 4L, 5, 6, hasHadoopInput = true, hasOutput = false)
val newJson = JsonProtocol.taskMetricsToJson(metrics)
val oldJson = newJson.removeField { case (field, _) => field == "Input Metrics" }
val newMetrics = JsonProtocol.taskMetricsFromJson(oldJson)
}
test("Input/Output records backwards compatibility") {
// records read were added after 1.2
val metrics = makeTaskMetrics(1L, 2L, 3L, 4L, 5, 6,
hasHadoopInput = true, hasOutput = true, hasRecords = false)
val newJson = JsonProtocol.taskMetricsToJson(metrics)
val oldJson = newJson.removeField { case (field, _) => field == "Records Read" }
.removeField { case (field, _) => field == "Records Written" }
val newMetrics = JsonProtocol.taskMetricsFromJson(oldJson)
assert(newMetrics.inputMetrics.recordsRead == 0)
assert(newMetrics.outputMetrics.recordsWritten == 0)
}
test("Shuffle Read/Write records backwards compatibility") {
// records read were added after 1.2
val metrics = makeTaskMetrics(1L, 2L, 3L, 4L, 5, 6,
hasHadoopInput = false, hasOutput = false, hasRecords = false)
val newJson = JsonProtocol.taskMetricsToJson(metrics)
val oldJson = newJson.removeField { case (field, _) => field == "Total Records Read" }
.removeField { case (field, _) => field == "Shuffle Records Written" }
val newMetrics = JsonProtocol.taskMetricsFromJson(oldJson)
assert(newMetrics.shuffleReadMetrics.recordsRead == 0)
assert(newMetrics.shuffleWriteMetrics.recordsWritten == 0)
}
test("OutputMetrics backward compatibility") {
// OutputMetrics were added after 1.1
val metrics = makeTaskMetrics(1L, 2L, 3L, 4L, 5, 6, hasHadoopInput = false, hasOutput = true)
val newJson = JsonProtocol.taskMetricsToJson(metrics)
val oldJson = newJson.removeField { case (field, _) => field == "Output Metrics" }
val newMetrics = JsonProtocol.taskMetricsFromJson(oldJson)
}
test("BlockManager events backward compatibility") {
// SparkListenerBlockManagerAdded/Removed in Spark 1.0.0 do not have a "time" property.
val blockManagerAdded = SparkListenerBlockManagerAdded(1L,
BlockManagerId("Stars", "In your multitude...", 300), 500)
val blockManagerRemoved = SparkListenerBlockManagerRemoved(2L,
BlockManagerId("Scarce", "to be counted...", 100))
val oldBmAdded = JsonProtocol.blockManagerAddedToJson(blockManagerAdded)
.removeField({ _._1 == "Timestamp" })
val deserializedBmAdded = JsonProtocol.blockManagerAddedFromJson(oldBmAdded)
assert(SparkListenerBlockManagerAdded(-1L, blockManagerAdded.blockManagerId,
blockManagerAdded.maxMem) === deserializedBmAdded)
val oldBmRemoved = JsonProtocol.blockManagerRemovedToJson(blockManagerRemoved)
.removeField({ _._1 == "Timestamp" })
val deserializedBmRemoved = JsonProtocol.blockManagerRemovedFromJson(oldBmRemoved)
assert(SparkListenerBlockManagerRemoved(-1L, blockManagerRemoved.blockManagerId) ===
deserializedBmRemoved)
}
test("FetchFailed backwards compatibility") {
// FetchFailed in Spark 1.1.0 does not have a "Message" property.
val fetchFailed = FetchFailed(BlockManagerId("With or", "without you", 15), 17, 16L, 18, 19,
"ignored")
val oldEvent = JsonProtocol.taskEndReasonToJson(fetchFailed)
.removeField({ _._1 == "Message" })
val expectedFetchFailed = FetchFailed(BlockManagerId("With or", "without you", 15), 17, 16L,
18, 19, "Unknown reason")
assert(expectedFetchFailed === JsonProtocol.taskEndReasonFromJson(oldEvent))
}
test("SPARK-32124: FetchFailed Map Index backwards compatibility") {
// FetchFailed in Spark 2.4.0 does not have "Map Index" property.
val fetchFailed = FetchFailed(BlockManagerId("With or", "without you", 15), 17, 16L, 18, 19,
"ignored")
val oldEvent = JsonProtocol.taskEndReasonToJson(fetchFailed)
.removeField({ _._1 == "Map Index" })
val expectedFetchFailed = FetchFailed(BlockManagerId("With or", "without you", 15), 17, 16L,
Int.MinValue, 19, "ignored")
assert(expectedFetchFailed === JsonProtocol.taskEndReasonFromJson(oldEvent))
}
test("ShuffleReadMetrics: Local bytes read backwards compatibility") {
// Metrics about local shuffle bytes read were added in 1.3.1.
val metrics = makeTaskMetrics(1L, 2L, 3L, 4L, 5, 6,
hasHadoopInput = false, hasOutput = false, hasRecords = false)
val newJson = JsonProtocol.taskMetricsToJson(metrics)
val oldJson = newJson.removeField { case (field, _) => field == "Local Bytes Read" }
val newMetrics = JsonProtocol.taskMetricsFromJson(oldJson)
assert(newMetrics.shuffleReadMetrics.localBytesRead == 0)
}
test("SparkListenerApplicationStart backwards compatibility") {
// SparkListenerApplicationStart in Spark 1.0.0 do not have an "appId" property.
// SparkListenerApplicationStart pre-Spark 1.4 does not have "appAttemptId".
// SparkListenerApplicationStart pre-Spark 1.5 does not have "driverLogs
val applicationStart = SparkListenerApplicationStart("test", None, 1L, "user", None, None)
val oldEvent = JsonProtocol.applicationStartToJson(applicationStart)
.removeField({ _._1 == "App ID" })
.removeField({ _._1 == "App Attempt ID" })
.removeField({ _._1 == "Driver Logs"})
assert(applicationStart === JsonProtocol.applicationStartFromJson(oldEvent))
}
test("ExecutorLostFailure backward compatibility") {
// ExecutorLostFailure in Spark 1.1.0 does not have an "Executor ID" property.
val executorLostFailure = ExecutorLostFailure("100", true, Some("Induced failure"))
val oldEvent = JsonProtocol.taskEndReasonToJson(executorLostFailure)
.removeField({ _._1 == "Executor ID" })
val expectedExecutorLostFailure = ExecutorLostFailure("Unknown", true, Some("Induced failure"))
assert(expectedExecutorLostFailure === JsonProtocol.taskEndReasonFromJson(oldEvent))
}
test("SparkListenerJobStart backward compatibility") {
// Prior to Spark 1.2.0, SparkListenerJobStart did not have a "Stage Infos" property.
val stageIds = Seq[Int](1, 2, 3, 4)
val stageInfos = stageIds.map(x => makeStageInfo(x, x * 200, x * 300, x * 400L, x * 500L))
val dummyStageInfos =
stageIds.map(id => new StageInfo(id, 0, "unknown", 0, Seq.empty, Seq.empty, "unknown",
resourceProfileId = ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID))
val jobStart = SparkListenerJobStart(10, jobSubmissionTime, stageInfos, properties)
val oldEvent = JsonProtocol.jobStartToJson(jobStart).removeField({_._1 == "Stage Infos"})
val expectedJobStart =
SparkListenerJobStart(10, jobSubmissionTime, dummyStageInfos, properties)
assertEquals(expectedJobStart, JsonProtocol.jobStartFromJson(oldEvent))
}
test("SparkListenerJobStart and SparkListenerJobEnd backward compatibility") {
// Prior to Spark 1.3.0, SparkListenerJobStart did not have a "Submission Time" property.
// Also, SparkListenerJobEnd did not have a "Completion Time" property.
val stageIds = Seq[Int](1, 2, 3, 4)
val stageInfos = stageIds.map(x => makeStageInfo(x * 10, x * 20, x * 30, x * 40L, x * 50L))
val jobStart = SparkListenerJobStart(11, jobSubmissionTime, stageInfos, properties)
val oldStartEvent = JsonProtocol.jobStartToJson(jobStart)
.removeField({ _._1 == "Submission Time"})
val expectedJobStart = SparkListenerJobStart(11, -1, stageInfos, properties)
assertEquals(expectedJobStart, JsonProtocol.jobStartFromJson(oldStartEvent))
val jobEnd = SparkListenerJobEnd(11, jobCompletionTime, JobSucceeded)
val oldEndEvent = JsonProtocol.jobEndToJson(jobEnd)
.removeField({ _._1 == "Completion Time"})
val expectedJobEnd = SparkListenerJobEnd(11, -1, JobSucceeded)
assertEquals(expectedJobEnd, JsonProtocol.jobEndFromJson(oldEndEvent))
}
test("RDDInfo backward compatibility (scope, parent IDs, callsite)") {
// "Scope" and "Parent IDs" were introduced in Spark 1.4.0
// "Callsite" was introduced in Spark 1.6.0
val rddInfo = new RDDInfo(1, "one", 100, StorageLevel.NONE, false, Seq(1, 6, 8),
"callsite", Some(new RDDOperationScope("fable")))
val oldRddInfoJson = JsonProtocol.rddInfoToJson(rddInfo)
.removeField({ _._1 == "Parent IDs"})
.removeField({ _._1 == "Scope"})
.removeField({ _._1 == "Callsite"})
val expectedRddInfo = new RDDInfo(
1, "one", 100, StorageLevel.NONE, false, Seq.empty, "", scope = None)
assertEquals(expectedRddInfo, JsonProtocol.rddInfoFromJson(oldRddInfoJson))
}
test("StageInfo backward compatibility (parent IDs)") {
// Prior to Spark 1.4.0, StageInfo did not have the "Parent IDs" property
val stageInfo = new StageInfo(1, 1, "me-stage", 1, Seq.empty, Seq(1, 2, 3), "details",
resourceProfileId = ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
val oldStageInfo = JsonProtocol.stageInfoToJson(stageInfo).removeField({ _._1 == "Parent IDs"})
val expectedStageInfo = new StageInfo(1, 1, "me-stage", 1, Seq.empty, Seq.empty, "details",
resourceProfileId = ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
assertEquals(expectedStageInfo, JsonProtocol.stageInfoFromJson(oldStageInfo))
}
// `TaskCommitDenied` was added in 1.3.0 but JSON de/serialization logic was added in 1.5.1
test("TaskCommitDenied backward compatibility") {
val denied = TaskCommitDenied(1, 2, 3)
val oldDenied = JsonProtocol.taskEndReasonToJson(denied)
.removeField({ _._1 == "Job ID" })
.removeField({ _._1 == "Partition ID" })
.removeField({ _._1 == "Attempt Number" })
val expectedDenied = TaskCommitDenied(-1, -1, -1)
assertEquals(expectedDenied, JsonProtocol.taskEndReasonFromJson(oldDenied))
}
test("AccumulableInfo backward compatibility") {
// "Internal" property of AccumulableInfo was added in 1.5.1
val accumulableInfo = makeAccumulableInfo(1, internal = true, countFailedValues = true)
val accumulableInfoJson = JsonProtocol.accumulableInfoToJson(accumulableInfo)
val oldJson = accumulableInfoJson.removeField({ _._1 == "Internal" })
val oldInfo = JsonProtocol.accumulableInfoFromJson(oldJson)
assert(!oldInfo.internal)
// "Count Failed Values" property of AccumulableInfo was added in 2.0.0
val oldJson2 = accumulableInfoJson.removeField({ _._1 == "Count Failed Values" })
val oldInfo2 = JsonProtocol.accumulableInfoFromJson(oldJson2)
assert(!oldInfo2.countFailedValues)
// "Metadata" property of AccumulableInfo was added in 2.0.0
val oldJson3 = accumulableInfoJson.removeField({ _._1 == "Metadata" })
val oldInfo3 = JsonProtocol.accumulableInfoFromJson(oldJson3)
assert(oldInfo3.metadata.isEmpty)
}
test("ExceptionFailure backward compatibility: accumulator updates") {
// "Task Metrics" was replaced with "Accumulator Updates" in 2.0.0. For older event logs,
// we should still be able to fallback to constructing the accumulator updates from the
// "Task Metrics" field, if it exists.
val tm = makeTaskMetrics(1L, 2L, 3L, 4L, 5, 6, hasHadoopInput = true, hasOutput = true)
val tmJson = JsonProtocol.taskMetricsToJson(tm)
val accumUpdates = tm.accumulators().map(AccumulatorSuite.makeInfo)
val exception = new SparkException("sentimental")
val exceptionFailure = new ExceptionFailure(exception, accumUpdates)
val exceptionFailureJson = JsonProtocol.taskEndReasonToJson(exceptionFailure)
val tmFieldJson: JValue = "Task Metrics" -> tmJson
val oldExceptionFailureJson: JValue =
exceptionFailureJson.removeField { _._1 == "Accumulator Updates" }.merge(tmFieldJson)
val oldExceptionFailure =
JsonProtocol.taskEndReasonFromJson(oldExceptionFailureJson).asInstanceOf[ExceptionFailure]
assert(exceptionFailure.className === oldExceptionFailure.className)
assert(exceptionFailure.description === oldExceptionFailure.description)
assertSeqEquals[StackTraceElement](
exceptionFailure.stackTrace, oldExceptionFailure.stackTrace, assertStackTraceElementEquals)
assert(exceptionFailure.fullStackTrace === oldExceptionFailure.fullStackTrace)
assertSeqEquals[AccumulableInfo](
exceptionFailure.accumUpdates, oldExceptionFailure.accumUpdates, (x, y) => x == y)
}
test("ExecutorMetricsUpdate backward compatibility: executor metrics update") {
// executorMetricsUpdate was added in 2.4.0.
val executorMetricsUpdate = makeExecutorMetricsUpdate("1", true, true)
val oldExecutorMetricsUpdateJson =
JsonProtocol.executorMetricsUpdateToJson(executorMetricsUpdate)
.removeField( _._1 == "Executor Metrics Updated")
val expectedExecutorMetricsUpdate = makeExecutorMetricsUpdate("1", true, false)
assertEquals(expectedExecutorMetricsUpdate,
JsonProtocol.executorMetricsUpdateFromJson(oldExecutorMetricsUpdateJson))
}
test("executorMetricsFromJson backward compatibility: handle missing metrics") {
// any missing metrics should be set to 0
val executorMetrics = new ExecutorMetrics(Array(12L, 23L, 45L, 67L, 78L, 89L,
90L, 123L, 456L, 789L, 40L, 20L, 20L, 10L, 20L, 10L))
val oldExecutorMetricsJson =
JsonProtocol.executorMetricsToJson(executorMetrics)
.removeField( _._1 == "MappedPoolMemory")
val exepectedExecutorMetrics = new ExecutorMetrics(Array(12L, 23L, 45L, 67L,
78L, 89L, 90L, 123L, 456L, 0L, 40L, 20L, 20L, 10L, 20L, 10L))
assertEquals(exepectedExecutorMetrics,
JsonProtocol.executorMetricsFromJson(oldExecutorMetricsJson))
}
test("AccumulableInfo value de/serialization") {
import InternalAccumulator._
val blocks = Seq[(BlockId, BlockStatus)](
(TestBlockId("meebo"), BlockStatus(StorageLevel.MEMORY_ONLY, 1L, 2L)),
(TestBlockId("feebo"), BlockStatus(StorageLevel.DISK_ONLY, 3L, 4L)))
val blocksJson = JArray(blocks.toList.map { case (id, status) =>
("Block ID" -> id.toString) ~
("Status" -> JsonProtocol.blockStatusToJson(status))
})
testAccumValue(Some(RESULT_SIZE), 3L, JInt(3))
testAccumValue(Some(shuffleRead.REMOTE_BLOCKS_FETCHED), 2, JInt(2))
testAccumValue(Some(UPDATED_BLOCK_STATUSES), blocks.asJava, blocksJson)
// For anything else, we just cast the value to a string
testAccumValue(Some("anything"), blocks, JString(blocks.toString))
testAccumValue(Some("anything"), 123, JString("123"))
}
/** Create an AccumulableInfo and verify we can serialize and deserialize it. */
private def testAccumulableInfo(
name: String,
value: Option[Any],
expectedValue: Option[Any]): Unit = {
val isInternal = name.startsWith(InternalAccumulator.METRICS_PREFIX)
val accum = AccumulableInfo(
123L,
Some(name),
update = value,
value = value,
internal = isInternal,
countFailedValues = false)
val json = JsonProtocol.accumulableInfoToJson(accum)
val newAccum = JsonProtocol.accumulableInfoFromJson(json)
assert(newAccum == accum.copy(update = expectedValue, value = expectedValue))
}
test("SPARK-31923: unexpected value type of internal accumulator") {
// Because a user may use `METRICS_PREFIX` in an accumulator name, we should test unexpected
// types to make sure we don't crash.
import InternalAccumulator.METRICS_PREFIX
testAccumulableInfo(
METRICS_PREFIX + "fooString",
value = Some("foo"),
expectedValue = None)
testAccumulableInfo(
METRICS_PREFIX + "fooList",
value = Some(java.util.Arrays.asList("string")),
expectedValue = Some(java.util.Collections.emptyList())
)
val blocks = Seq(
(TestBlockId("block1"), BlockStatus(StorageLevel.MEMORY_ONLY, 1L, 2L)),
(TestBlockId("block2"), BlockStatus(StorageLevel.DISK_ONLY, 3L, 4L)))
testAccumulableInfo(
METRICS_PREFIX + "fooList",
value = Some(java.util.Arrays.asList(
"string",
blocks(0),
blocks(1))),
expectedValue = Some(blocks.asJava)
)
testAccumulableInfo(
METRICS_PREFIX + "fooSet",
value = Some(Set("foo")),
expectedValue = None)
}
test("SPARK-30936: forwards compatibility - ignore unknown fields") {
val expected = TestListenerEvent("foo", 123)
val unknownFieldsJson =
"""{
| "Event" : "org.apache.spark.util.TestListenerEvent",
| "foo" : "foo",
| "bar" : 123,
| "unknown" : "unknown"
|}""".stripMargin
assert(JsonProtocol.sparkEventFromJson(parse(unknownFieldsJson)) === expected)
}
test("SPARK-30936: backwards compatibility - set default values for missing fields") {
val expected = TestListenerEvent("foo", 0)
val unknownFieldsJson =
"""{
| "Event" : "org.apache.spark.util.TestListenerEvent",
| "foo" : "foo"
|}""".stripMargin
assert(JsonProtocol.sparkEventFromJson(parse(unknownFieldsJson)) === expected)
}
}
private[spark] object JsonProtocolSuite extends Assertions {
import InternalAccumulator._
private val jobSubmissionTime = 1421191042750L
private val jobCompletionTime = 1421191296660L
private val executorAddedTime = 1421458410000L
private val executorRemovedTime = 1421458922000L
private val executorBlacklistedTime = 1421458932000L
private val executorUnblacklistedTime = 1421458942000L
private val nodeBlacklistedTime = 1421458952000L
private val nodeUnblacklistedTime = 1421458962000L
private def testEvent(event: SparkListenerEvent, jsonString: String): Unit = {
val actualJsonString = compact(render(JsonProtocol.sparkEventToJson(event)))
val newEvent = JsonProtocol.sparkEventFromJson(parse(actualJsonString))
assertJsonStringEquals(jsonString, actualJsonString, event.getClass.getSimpleName)
assertEquals(event, newEvent)
}
private def testRDDInfo(info: RDDInfo): Unit = {
val newInfo = JsonProtocol.rddInfoFromJson(JsonProtocol.rddInfoToJson(info))
assertEquals(info, newInfo)
}
private def testStageInfo(info: StageInfo): Unit = {
val newInfo = JsonProtocol.stageInfoFromJson(JsonProtocol.stageInfoToJson(info))
assertEquals(info, newInfo)
}
private def testStorageLevel(level: StorageLevel): Unit = {
val newLevel = JsonProtocol.storageLevelFromJson(JsonProtocol.storageLevelToJson(level))
assertEquals(level, newLevel)
}
private def testTaskMetrics(metrics: TaskMetrics): Unit = {
val newMetrics = JsonProtocol.taskMetricsFromJson(JsonProtocol.taskMetricsToJson(metrics))
assertEquals(metrics, newMetrics)
}
private def testBlockManagerId(id: BlockManagerId): Unit = {
val newId = JsonProtocol.blockManagerIdFromJson(JsonProtocol.blockManagerIdToJson(id))
assert(id === newId)
}
private def testTaskInfo(info: TaskInfo): Unit = {
val newInfo = JsonProtocol.taskInfoFromJson(JsonProtocol.taskInfoToJson(info))
assertEquals(info, newInfo)
}
private def testJobResult(result: JobResult): Unit = {
val newResult = JsonProtocol.jobResultFromJson(JsonProtocol.jobResultToJson(result))
assertEquals(result, newResult)
}
private def testTaskEndReason(reason: TaskEndReason): Unit = {
val newReason = JsonProtocol.taskEndReasonFromJson(JsonProtocol.taskEndReasonToJson(reason))
assertEquals(reason, newReason)
}
private def testBlockId(blockId: BlockId): Unit = {
val newBlockId = BlockId(blockId.toString)
assert(blockId === newBlockId)
}
private def testExecutorInfo(info: ExecutorInfo): Unit = {
val newInfo = JsonProtocol.executorInfoFromJson(JsonProtocol.executorInfoToJson(info))
assertEquals(info, newInfo)
}
private def testAccumValue(name: Option[String], value: Any, expectedJson: JValue): Unit = {
val json = JsonProtocol.accumValueToJson(name, value)
assert(json === expectedJson)
val newValue = JsonProtocol.accumValueFromJson(name, json)
val expectedValue = if (name.exists(_.startsWith(METRICS_PREFIX))) value else value.toString
assert(newValue === expectedValue)
}
/** -------------------------------- *
| Util methods for comparing events |
* --------------------------------- */
private[spark] def assertEquals(event1: SparkListenerEvent, event2: SparkListenerEvent): Unit = {
(event1, event2) match {
case (e1: SparkListenerStageSubmitted, e2: SparkListenerStageSubmitted) =>
assert(e1.properties === e2.properties)
assertEquals(e1.stageInfo, e2.stageInfo)
case (e1: SparkListenerStageCompleted, e2: SparkListenerStageCompleted) =>
assertEquals(e1.stageInfo, e2.stageInfo)
case (e1: SparkListenerTaskStart, e2: SparkListenerTaskStart) =>
assert(e1.stageId === e2.stageId)
assertEquals(e1.taskInfo, e2.taskInfo)
case (e1: SparkListenerTaskGettingResult, e2: SparkListenerTaskGettingResult) =>
assertEquals(e1.taskInfo, e2.taskInfo)
case (e1: SparkListenerTaskEnd, e2: SparkListenerTaskEnd) =>
assert(e1.stageId === e2.stageId)
assert(e1.stageAttemptId === e2.stageAttemptId)
assert(e1.taskType === e2.taskType)
assertEquals(e1.reason, e2.reason)
assertEquals(e1.taskInfo, e2.taskInfo)
assertEquals(e1.taskExecutorMetrics, e2.taskExecutorMetrics)
assertEquals(e1.taskMetrics, e2.taskMetrics)
case (e1: SparkListenerJobStart, e2: SparkListenerJobStart) =>
assert(e1.jobId === e2.jobId)
assert(e1.properties === e2.properties)
assert(e1.stageIds === e2.stageIds)
case (e1: SparkListenerJobEnd, e2: SparkListenerJobEnd) =>
assert(e1.jobId === e2.jobId)
assertEquals(e1.jobResult, e2.jobResult)
case (e1: SparkListenerEnvironmentUpdate, e2: SparkListenerEnvironmentUpdate) =>
assertEquals(e1.environmentDetails, e2.environmentDetails)
case (e1: SparkListenerExecutorAdded, e2: SparkListenerExecutorAdded) =>
assert(e1.executorId === e1.executorId)
assertEquals(e1.executorInfo, e2.executorInfo)
case (e1: SparkListenerExecutorRemoved, e2: SparkListenerExecutorRemoved) =>
assert(e1.executorId === e1.executorId)
case (e1: SparkListenerExecutorMetricsUpdate, e2: SparkListenerExecutorMetricsUpdate) =>
assert(e1.execId === e2.execId)
assertSeqEquals[(Long, Int, Int, Seq[AccumulableInfo])](
e1.accumUpdates,
e2.accumUpdates,
(a, b) => {
val (taskId1, stageId1, stageAttemptId1, updates1) = a
val (taskId2, stageId2, stageAttemptId2, updates2) = b
assert(taskId1 === taskId2)
assert(stageId1 === stageId2)
assert(stageAttemptId1 === stageAttemptId2)
assertSeqEquals[AccumulableInfo](updates1, updates2, (a, b) => a.equals(b))
})
assertSeqEquals[((Int, Int), ExecutorMetrics)](
e1.executorUpdates.toSeq.sortBy(_._1),
e2.executorUpdates.toSeq.sortBy(_._1),
(a, b) => {
val (k1, v1) = a
val (k2, v2) = b
assert(k1 === k2)
assertEquals(v1, v2)
}
)
case (e1: SparkListenerStageExecutorMetrics, e2: SparkListenerStageExecutorMetrics) =>
assert(e1.execId === e2.execId)
assert(e1.stageId === e2.stageId)
assert(e1.stageAttemptId === e2.stageAttemptId)
assertEquals(e1.executorMetrics, e2.executorMetrics)
case (e1, e2) =>
assert(e1 === e2)
case _ => fail("Events don't match in types!")
}
}
private def assertEquals(info1: StageInfo, info2: StageInfo): Unit = {
assert(info1.stageId === info2.stageId)
assert(info1.name === info2.name)
assert(info1.numTasks === info2.numTasks)
assert(info1.submissionTime === info2.submissionTime)
assert(info1.completionTime === info2.completionTime)
assert(info1.rddInfos.size === info2.rddInfos.size)
(0 until info1.rddInfos.size).foreach { i =>
assertEquals(info1.rddInfos(i), info2.rddInfos(i))
}
assert(info1.accumulables === info2.accumulables)
assert(info1.details === info2.details)
}
private def assertEquals(info1: RDDInfo, info2: RDDInfo): Unit = {
assert(info1.id === info2.id)
assert(info1.name === info2.name)
assert(info1.numPartitions === info2.numPartitions)
assert(info1.numCachedPartitions === info2.numCachedPartitions)
assert(info1.memSize === info2.memSize)
assert(info1.diskSize === info2.diskSize)
assertEquals(info1.storageLevel, info2.storageLevel)
}
private def assertEquals(level1: StorageLevel, level2: StorageLevel): Unit = {
assert(level1.useDisk === level2.useDisk)
assert(level1.useMemory === level2.useMemory)
assert(level1.deserialized === level2.deserialized)
assert(level1.replication === level2.replication)
}
private def assertEquals(info1: TaskInfo, info2: TaskInfo): Unit = {
assert(info1.taskId === info2.taskId)
assert(info1.index === info2.index)
assert(info1.attemptNumber === info2.attemptNumber)
assert(info1.launchTime === info2.launchTime)
assert(info1.executorId === info2.executorId)
assert(info1.host === info2.host)
assert(info1.taskLocality === info2.taskLocality)
assert(info1.speculative === info2.speculative)
assert(info1.gettingResultTime === info2.gettingResultTime)
assert(info1.finishTime === info2.finishTime)
assert(info1.failed === info2.failed)
assert(info1.accumulables === info2.accumulables)
}
private def assertEquals(info1: ExecutorInfo, info2: ExecutorInfo): Unit = {
assert(info1.executorHost == info2.executorHost)
assert(info1.totalCores == info2.totalCores)
}
private def assertEquals(metrics1: TaskMetrics, metrics2: TaskMetrics): Unit = {
assert(metrics1.executorDeserializeTime === metrics2.executorDeserializeTime)
assert(metrics1.executorDeserializeCpuTime === metrics2.executorDeserializeCpuTime)
assert(metrics1.executorRunTime === metrics2.executorRunTime)
assert(metrics1.executorCpuTime === metrics2.executorCpuTime)
assert(metrics1.resultSize === metrics2.resultSize)
assert(metrics1.jvmGCTime === metrics2.jvmGCTime)
assert(metrics1.resultSerializationTime === metrics2.resultSerializationTime)
assert(metrics1.memoryBytesSpilled === metrics2.memoryBytesSpilled)
assert(metrics1.diskBytesSpilled === metrics2.diskBytesSpilled)
assertEquals(metrics1.shuffleReadMetrics, metrics2.shuffleReadMetrics)
assertEquals(metrics1.shuffleWriteMetrics, metrics2.shuffleWriteMetrics)
assertEquals(metrics1.inputMetrics, metrics2.inputMetrics)
assertBlocksEquals(metrics1.updatedBlockStatuses, metrics2.updatedBlockStatuses)
}
private def assertEquals(metrics1: ShuffleReadMetrics, metrics2: ShuffleReadMetrics): Unit = {
assert(metrics1.remoteBlocksFetched === metrics2.remoteBlocksFetched)
assert(metrics1.localBlocksFetched === metrics2.localBlocksFetched)
assert(metrics1.fetchWaitTime === metrics2.fetchWaitTime)
assert(metrics1.remoteBytesRead === metrics2.remoteBytesRead)
}
private def assertEquals(metrics1: ShuffleWriteMetrics, metrics2: ShuffleWriteMetrics): Unit = {
assert(metrics1.bytesWritten === metrics2.bytesWritten)
assert(metrics1.writeTime === metrics2.writeTime)
}
private def assertEquals(metrics1: InputMetrics, metrics2: InputMetrics): Unit = {
assert(metrics1.bytesRead === metrics2.bytesRead)
}
private def assertEquals(result1: JobResult, result2: JobResult): Unit = {
(result1, result2) match {
case (JobSucceeded, JobSucceeded) =>
case (r1: JobFailed, r2: JobFailed) =>
assertEquals(r1.exception, r2.exception)
case _ => fail("Job results don't match in types!")
}
}
private def assertEquals(reason1: TaskEndReason, reason2: TaskEndReason): Unit = {
(reason1, reason2) match {
case (Success, Success) =>
case (Resubmitted, Resubmitted) =>
case (r1: FetchFailed, r2: FetchFailed) =>
assert(r1.shuffleId === r2.shuffleId)
assert(r1.mapId === r2.mapId)
assert(r1.mapIndex === r2.mapIndex)
assert(r1.reduceId === r2.reduceId)
assert(r1.bmAddress === r2.bmAddress)
assert(r1.message === r2.message)
case (r1: ExceptionFailure, r2: ExceptionFailure) =>
assert(r1.className === r2.className)
assert(r1.description === r2.description)
assertSeqEquals(r1.stackTrace, r2.stackTrace, assertStackTraceElementEquals)
assert(r1.fullStackTrace === r2.fullStackTrace)
assertSeqEquals[AccumulableInfo](r1.accumUpdates, r2.accumUpdates, (a, b) => a.equals(b))
case (TaskResultLost, TaskResultLost) =>
case (r1: TaskKilled, r2: TaskKilled) =>
assert(r1.reason == r2.reason)
case (TaskCommitDenied(jobId1, partitionId1, attemptNumber1),
TaskCommitDenied(jobId2, partitionId2, attemptNumber2)) =>
assert(jobId1 === jobId2)
assert(partitionId1 === partitionId2)
assert(attemptNumber1 === attemptNumber2)
case (ExecutorLostFailure(execId1, exit1CausedByApp, reason1),
ExecutorLostFailure(execId2, exit2CausedByApp, reason2)) =>
assert(execId1 === execId2)
assert(exit1CausedByApp === exit2CausedByApp)
assert(reason1 === reason2)
case (UnknownReason, UnknownReason) =>
case _ => fail("Task end reasons don't match in types!")
}
}
private def assertEquals(
details1: Map[String, Seq[(String, String)]],
details2: Map[String, Seq[(String, String)]]): Unit = {
details1.zip(details2).foreach {
case ((key1, values1: Seq[(String, String)]), (key2, values2: Seq[(String, String)])) =>
assert(key1 === key2)
values1.zip(values2).foreach { case (v1, v2) => assert(v1 === v2) }
}
}
private def assertEquals(exception1: Exception, exception2: Exception): Unit = {
assert(exception1.getMessage === exception2.getMessage)
assertSeqEquals(
exception1.getStackTrace,
exception2.getStackTrace,
assertStackTraceElementEquals)
}
private def assertEquals(metrics1: ExecutorMetrics, metrics2: ExecutorMetrics): Unit = {
ExecutorMetricType.metricToOffset.foreach { metric =>
assert(metrics1.getMetricValue(metric._1) === metrics2.getMetricValue(metric._1))
}
}
private def assertJsonStringEquals(expected: String, actual: String, metadata: String): Unit = {
val expectedJson = parse(expected)
val actualJson = parse(actual)
if (expectedJson != actualJson) {
// scalastyle:off
// This prints something useful if the JSON strings don't match
println(s"=== EXPECTED ===\\n${pretty(expectedJson)}\\n")
println(s"=== ACTUAL ===\\n${pretty(actualJson)}\\n")
// scalastyle:on
throw new TestFailedException(s"$metadata JSON did not equal", 1)
}
}
private def assertSeqEquals[T](seq1: Seq[T], seq2: Seq[T], assertEquals: (T, T) => Unit): Unit = {
assert(seq1.length === seq2.length)
seq1.zip(seq2).foreach { case (t1, t2) =>
assertEquals(t1, t2)
}
}
private def assertOptionEquals[T](
opt1: Option[T],
opt2: Option[T],
assertEquals: (T, T) => Unit): Unit = {
if (opt1.isDefined) {
assert(opt2.isDefined)
assertEquals(opt1.get, opt2.get)
} else {
assert(!opt2.isDefined)
}
}
/**
* Use different names for methods we pass in to assertSeqEquals or assertOptionEquals
*/
private def assertBlocksEquals(
blocks1: Seq[(BlockId, BlockStatus)],
blocks2: Seq[(BlockId, BlockStatus)]) = {
assertSeqEquals(blocks1, blocks2, assertBlockEquals)
}
private def assertBlockEquals(b1: (BlockId, BlockStatus), b2: (BlockId, BlockStatus)): Unit = {
assert(b1 === b2)
}
private def assertStackTraceElementEquals(ste1: StackTraceElement,
ste2: StackTraceElement): Unit = {
// This mimics the equals() method from Java 8 and earlier. Java 9 adds checks for
// class loader and module, which will cause them to be not equal, when we don't
// care about those
assert(ste1.getClassName === ste2.getClassName)
assert(ste1.getMethodName === ste2.getMethodName)
assert(ste1.getLineNumber === ste2.getLineNumber)
assert(ste1.getFileName === ste2.getFileName)
}
private def assertEquals(rp1: ResourceProfile, rp2: ResourceProfile): Unit = {
assert(rp1 === rp2)
}
/** ----------------------------------- *
| Util methods for constructing events |
* ------------------------------------ */
private val properties = {
val p = new Properties
p.setProperty("Ukraine", "Kiev")
p.setProperty("Russia", "Moscow")
p.setProperty("France", "Paris")
p.setProperty("Germany", "Berlin")
p
}
private val stackTrace = {
Array[StackTraceElement](
new StackTraceElement("Apollo", "Venus", "Mercury", 42),
new StackTraceElement("Afollo", "Vemus", "Mercurry", 420),
new StackTraceElement("Ayollo", "Vesus", "Blackberry", 4200)
)
}
private def makeRddInfo(a: Int, b: Int, c: Int, d: Long, e: Long) = {
val r =
new RDDInfo(a, "mayor", b, StorageLevel.MEMORY_AND_DISK, false, Seq(1, 4, 7), a.toString)
r.numCachedPartitions = c
r.memSize = d
r.diskSize = e
r
}
private def makeStageInfo(
a: Int,
b: Int,
c: Int,
d: Long,
e: Long,
rpId: Int = ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID) = {
val rddInfos = (0 until a % 5).map { i => makeRddInfo(a + i, b + i, c + i, d + i, e + i) }
val stageInfo = new StageInfo(a, 0, "greetings", b, rddInfos, Seq(100, 200, 300), "details",
resourceProfileId = rpId)
val (acc1, acc2) = (makeAccumulableInfo(1), makeAccumulableInfo(2))
stageInfo.accumulables(acc1.id) = acc1
stageInfo.accumulables(acc2.id) = acc2
stageInfo
}
private def makeTaskInfo(a: Long, b: Int, c: Int, d: Long, speculative: Boolean) = {
val taskInfo = new TaskInfo(a, b, c, d, "executor", "your kind sir", TaskLocality.NODE_LOCAL,
speculative)
taskInfo.setAccumulables(
List(makeAccumulableInfo(1), makeAccumulableInfo(2), makeAccumulableInfo(3, internal = true)))
taskInfo
}
private def makeAccumulableInfo(
id: Int,
internal: Boolean = false,
countFailedValues: Boolean = false,
metadata: Option[String] = None): AccumulableInfo =
new AccumulableInfo(id, Some(s"Accumulable$id"), Some(s"delta$id"), Some(s"val$id"),
internal, countFailedValues, metadata)
/** Creates an SparkListenerExecutorMetricsUpdate event */
private def makeExecutorMetricsUpdate(
execId: String,
includeTaskMetrics: Boolean,
includeExecutorMetrics: Boolean): SparkListenerExecutorMetricsUpdate = {
val taskMetrics =
if (includeTaskMetrics) {
Seq((1L, 1, 1, Seq(makeAccumulableInfo(1, false, false, None),
makeAccumulableInfo(2, false, false, None))))
} else {
Seq()
}
val executorMetricsUpdate: Map[(Int, Int), ExecutorMetrics] =
if (includeExecutorMetrics) {
Map((0, 0) -> new ExecutorMetrics(Array(123456L, 543L, 0L, 0L, 0L, 0L, 0L,
0L, 0L, 0L, 256912L, 123456L, 123456L, 61728L, 30364L, 15182L, 10L, 90L, 2L, 20L)))
} else {
Map.empty
}
SparkListenerExecutorMetricsUpdate(execId, taskMetrics, executorMetricsUpdate)
}
/**
* Creates a TaskMetrics object describing a task that read data from Hadoop (if hasHadoopInput is
* set to true) or read data from a shuffle otherwise.
*/
private def makeTaskMetrics(
a: Long,
b: Long,
c: Long,
d: Long,
e: Int,
f: Int,
hasHadoopInput: Boolean,
hasOutput: Boolean,
hasRecords: Boolean = true) = {
val t = TaskMetrics.registered
// Set CPU times same as wall times for testing purpose
t.setExecutorDeserializeTime(a)
t.setExecutorDeserializeCpuTime(a)
t.setExecutorRunTime(b)
t.setExecutorCpuTime(b)
t.setPeakExecutionMemory(c)
t.setResultSize(c)
t.setJvmGCTime(d)
t.setResultSerializationTime(a + b)
t.incMemoryBytesSpilled(a + c)
if (hasHadoopInput) {
val inputMetrics = t.inputMetrics
inputMetrics.setBytesRead(d + e + f)
inputMetrics.incRecordsRead(if (hasRecords) (d + e + f) / 100 else -1)
} else {
val sr = t.createTempShuffleReadMetrics()
sr.incRemoteBytesRead(b + d)
sr.incRemoteBytesReadToDisk(b)
sr.incLocalBlocksFetched(e)
sr.incFetchWaitTime(a + d)
sr.incRemoteBlocksFetched(f)
sr.incRecordsRead(if (hasRecords) (b + d) / 100 else -1)
sr.incLocalBytesRead(a + f)
t.mergeShuffleReadMetrics()
}
if (hasOutput) {
t.outputMetrics.setBytesWritten(a + b + c)
t.outputMetrics.setRecordsWritten(if (hasRecords) (a + b + c) / 100 else -1)
} else {
val sw = t.shuffleWriteMetrics
sw.incBytesWritten(a + b + c)
sw.incWriteTime(b + c + d)
sw.incRecordsWritten(if (hasRecords) (a + b + c) / 100 else -1)
}
// Make at most 6 blocks
t.setUpdatedBlockStatuses((1 to (e % 5 + 1)).map { i =>
(RDDBlockId(e % i, f % i), BlockStatus(StorageLevel.MEMORY_AND_DISK_SER_2, a % i, b % i))
}.toSeq)
t
}
/** --------------------------------------- *
| JSON string representation of each event |
* ---------------------------------------- */
private val stageSubmittedJsonString =
"""
|{
| "Event": "SparkListenerStageSubmitted",
| "Stage Info": {
| "Stage ID": 100,
| "Stage Attempt ID": 0,
| "Stage Name": "greetings",
| "Number of Tasks": 200,
| "RDD Info": [],
| "Parent IDs" : [100, 200, 300],
| "Details": "details",
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| }
| ],
| "Resource Profile Id" : 0
| },
| "Properties": {
| "France": "Paris",
| "Germany": "Berlin",
| "Russia": "Moscow",
| "Ukraine": "Kiev"
| }
|}
""".stripMargin
private val stageCompletedJsonString =
"""
|{
| "Event": "SparkListenerStageCompleted",
| "Stage Info": {
| "Stage ID": 101,
| "Stage Attempt ID": 0,
| "Stage Name": "greetings",
| "Number of Tasks": 201,
| "RDD Info": [
| {
| "RDD ID": 101,
| "Name": "mayor",
| "Callsite": "101",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 201,
| "Number of Cached Partitions": 301,
| "Memory Size": 401,
| "Disk Size": 501
| }
| ],
| "Parent IDs" : [100, 200, 300],
| "Details": "details",
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| }
| ],
| "Resource Profile Id" : 0
| }
|}
""".stripMargin
private val taskStartJsonString =
"""
|{
| "Event": "SparkListenerTaskStart",
| "Stage ID": 111,
| "Stage Attempt ID": 0,
| "Task Info": {
| "Task ID": 222,
| "Index": 333,
| "Attempt": 1,
| "Launch Time": 444,
| "Executor ID": "executor",
| "Host": "your kind sir",
| "Locality": "NODE_LOCAL",
| "Speculative": false,
| "Getting Result Time": 0,
| "Finish Time": 0,
| "Failed": false,
| "Killed": false,
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 3,
| "Name": "Accumulable3",
| "Update": "delta3",
| "Value": "val3",
| "Internal": true,
| "Count Failed Values": false
| }
| ]
| }
|}
""".stripMargin
private val taskGettingResultJsonString =
"""
|{
| "Event": "SparkListenerTaskGettingResult",
| "Task Info": {
| "Task ID": 1000,
| "Index": 2000,
| "Attempt": 5,
| "Launch Time": 3000,
| "Executor ID": "executor",
| "Host": "your kind sir",
| "Locality": "NODE_LOCAL",
| "Speculative": true,
| "Getting Result Time": 0,
| "Finish Time": 0,
| "Failed": false,
| "Killed": false,
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 3,
| "Name": "Accumulable3",
| "Update": "delta3",
| "Value": "val3",
| "Internal": true,
| "Count Failed Values": false
| }
| ]
| }
|}
""".stripMargin
private val taskEndJsonString =
"""
|{
| "Event": "SparkListenerTaskEnd",
| "Stage ID": 1,
| "Stage Attempt ID": 0,
| "Task Type": "ShuffleMapTask",
| "Task End Reason": {
| "Reason": "Success"
| },
| "Task Info": {
| "Task ID": 123,
| "Index": 234,
| "Attempt": 67,
| "Launch Time": 345,
| "Executor ID": "executor",
| "Host": "your kind sir",
| "Locality": "NODE_LOCAL",
| "Speculative": false,
| "Getting Result Time": 0,
| "Finish Time": 0,
| "Failed": false,
| "Killed": false,
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 3,
| "Name": "Accumulable3",
| "Update": "delta3",
| "Value": "val3",
| "Internal": true,
| "Count Failed Values": false
| }
| ]
| },
| "Task Executor Metrics" : {
| "JVMHeapMemory" : 543,
| "JVMOffHeapMemory" : 123456,
| "OnHeapExecutionMemory" : 12345,
| "OffHeapExecutionMemory" : 1234,
| "OnHeapStorageMemory" : 123,
| "OffHeapStorageMemory" : 12,
| "OnHeapUnifiedMemory" : 432,
| "OffHeapUnifiedMemory" : 321,
| "DirectPoolMemory" : 654,
| "MappedPoolMemory" : 765,
| "ProcessTreeJVMVMemory": 256912,
| "ProcessTreeJVMRSSMemory": 123456,
| "ProcessTreePythonVMemory": 123456,
| "ProcessTreePythonRSSMemory": 61728,
| "ProcessTreeOtherVMemory": 30364,
| "ProcessTreeOtherRSSMemory": 15182,
| "MinorGCCount" : 0,
| "MinorGCTime" : 0,
| "MajorGCCount" : 0,
| "MajorGCTime" : 0
| },
| "Task Metrics": {
| "Executor Deserialize Time": 300,
| "Executor Deserialize CPU Time": 300,
| "Executor Run Time": 400,
| "Executor CPU Time": 400,
| "Peak Execution Memory": 500,
| "Result Size": 500,
| "JVM GC Time": 600,
| "Result Serialization Time": 700,
| "Memory Bytes Spilled": 800,
| "Disk Bytes Spilled": 0,
| "Shuffle Read Metrics": {
| "Remote Blocks Fetched": 800,
| "Local Blocks Fetched": 700,
| "Fetch Wait Time": 900,
| "Remote Bytes Read": 1000,
| "Remote Bytes Read To Disk": 400,
| "Local Bytes Read": 1100,
| "Total Records Read": 10
| },
| "Shuffle Write Metrics": {
| "Shuffle Bytes Written": 1200,
| "Shuffle Write Time": 1500,
| "Shuffle Records Written": 12
| },
| "Input Metrics" : {
| "Bytes Read" : 0,
| "Records Read" : 0
| },
| "Output Metrics" : {
| "Bytes Written" : 0,
| "Records Written" : 0
| },
| "Updated Blocks": [
| {
| "Block ID": "rdd_0_0",
| "Status": {
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": false,
| "Replication": 2
| },
| "Memory Size": 0,
| "Disk Size": 0
| }
| }
| ]
| }
|}
""".stripMargin
private val taskEndWithHadoopInputJsonString =
"""
|{
| "Event": "SparkListenerTaskEnd",
| "Stage ID": 1,
| "Stage Attempt ID": 0,
| "Task Type": "ShuffleMapTask",
| "Task End Reason": {
| "Reason": "Success"
| },
| "Task Info": {
| "Task ID": 123,
| "Index": 234,
| "Attempt": 67,
| "Launch Time": 345,
| "Executor ID": "executor",
| "Host": "your kind sir",
| "Locality": "NODE_LOCAL",
| "Speculative": false,
| "Getting Result Time": 0,
| "Finish Time": 0,
| "Failed": false,
| "Killed": false,
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 3,
| "Name": "Accumulable3",
| "Update": "delta3",
| "Value": "val3",
| "Internal": true,
| "Count Failed Values": false
| }
| ]
| },
| "Task Executor Metrics" : {
| "JVMHeapMemory" : 543,
| "JVMOffHeapMemory" : 123456,
| "OnHeapExecutionMemory" : 12345,
| "OffHeapExecutionMemory" : 1234,
| "OnHeapStorageMemory" : 123,
| "OffHeapStorageMemory" : 12,
| "OnHeapUnifiedMemory" : 432,
| "OffHeapUnifiedMemory" : 321,
| "DirectPoolMemory" : 654,
| "MappedPoolMemory" : 765,
| "ProcessTreeJVMVMemory": 256912,
| "ProcessTreeJVMRSSMemory": 123456,
| "ProcessTreePythonVMemory": 123456,
| "ProcessTreePythonRSSMemory": 61728,
| "ProcessTreeOtherVMemory": 30364,
| "ProcessTreeOtherRSSMemory": 15182,
| "MinorGCCount" : 0,
| "MinorGCTime" : 0,
| "MajorGCCount" : 0,
| "MajorGCTime" : 0
| },
| "Task Metrics": {
| "Executor Deserialize Time": 300,
| "Executor Deserialize CPU Time": 300,
| "Executor Run Time": 400,
| "Executor CPU Time": 400,
| "Peak Execution Memory": 500,
| "Result Size": 500,
| "JVM GC Time": 600,
| "Result Serialization Time": 700,
| "Memory Bytes Spilled": 800,
| "Disk Bytes Spilled": 0,
| "Shuffle Read Metrics" : {
| "Remote Blocks Fetched" : 0,
| "Local Blocks Fetched" : 0,
| "Fetch Wait Time" : 0,
| "Remote Bytes Read" : 0,
| "Remote Bytes Read To Disk" : 0,
| "Local Bytes Read" : 0,
| "Total Records Read" : 0
| },
| "Shuffle Write Metrics": {
| "Shuffle Bytes Written": 1200,
| "Shuffle Write Time": 1500,
| "Shuffle Records Written": 12
| },
| "Input Metrics": {
| "Bytes Read": 2100,
| "Records Read": 21
| },
| "Output Metrics" : {
| "Bytes Written" : 0,
| "Records Written" : 0
| },
| "Updated Blocks": [
| {
| "Block ID": "rdd_0_0",
| "Status": {
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": false,
| "Replication": 2
| },
| "Memory Size": 0,
| "Disk Size": 0
| }
| }
| ]
| }
|}
""".stripMargin
private val taskEndWithOutputJsonString =
"""
|{
| "Event": "SparkListenerTaskEnd",
| "Stage ID": 1,
| "Stage Attempt ID": 0,
| "Task Type": "ResultTask",
| "Task End Reason": {
| "Reason": "Success"
| },
| "Task Info": {
| "Task ID": 123,
| "Index": 234,
| "Attempt": 67,
| "Launch Time": 345,
| "Executor ID": "executor",
| "Host": "your kind sir",
| "Locality": "NODE_LOCAL",
| "Speculative": false,
| "Getting Result Time": 0,
| "Finish Time": 0,
| "Failed": false,
| "Killed": false,
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 3,
| "Name": "Accumulable3",
| "Update": "delta3",
| "Value": "val3",
| "Internal": true,
| "Count Failed Values": false
| }
| ]
| },
| "Task Executor Metrics" : {
| "JVMHeapMemory" : 543,
| "JVMOffHeapMemory" : 123456,
| "OnHeapExecutionMemory" : 12345,
| "OffHeapExecutionMemory" : 1234,
| "OnHeapStorageMemory" : 123,
| "OffHeapStorageMemory" : 12,
| "OnHeapUnifiedMemory" : 432,
| "OffHeapUnifiedMemory" : 321,
| "DirectPoolMemory" : 654,
| "MappedPoolMemory" : 765,
| "ProcessTreeJVMVMemory": 256912,
| "ProcessTreeJVMRSSMemory": 123456,
| "ProcessTreePythonVMemory": 123456,
| "ProcessTreePythonRSSMemory": 61728,
| "ProcessTreeOtherVMemory": 30364,
| "ProcessTreeOtherRSSMemory": 15182,
| "MinorGCCount" : 0,
| "MinorGCTime" : 0,
| "MajorGCCount" : 0,
| "MajorGCTime" : 0
| },
| "Task Metrics": {
| "Executor Deserialize Time": 300,
| "Executor Deserialize CPU Time": 300,
| "Executor Run Time": 400,
| "Executor CPU Time": 400,
| "Peak Execution Memory": 500,
| "Result Size": 500,
| "JVM GC Time": 600,
| "Result Serialization Time": 700,
| "Memory Bytes Spilled": 800,
| "Disk Bytes Spilled": 0,
| "Shuffle Read Metrics" : {
| "Remote Blocks Fetched" : 0,
| "Local Blocks Fetched" : 0,
| "Fetch Wait Time" : 0,
| "Remote Bytes Read" : 0,
| "Remote Bytes Read To Disk" : 0,
| "Local Bytes Read" : 0,
| "Total Records Read" : 0
| },
| "Shuffle Write Metrics": {
| "Shuffle Bytes Written" : 0,
| "Shuffle Write Time" : 0,
| "Shuffle Records Written" : 0
| },
| "Input Metrics": {
| "Bytes Read": 2100,
| "Records Read": 21
| },
| "Output Metrics": {
| "Bytes Written": 1200,
| "Records Written": 12
| },
| "Updated Blocks": [
| {
| "Block ID": "rdd_0_0",
| "Status": {
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": false,
| "Replication": 2
| },
| "Memory Size": 0,
| "Disk Size": 0
| }
| }
| ]
| }
|}
""".stripMargin
private val jobStartJsonString =
"""
|{
| "Event": "SparkListenerJobStart",
| "Job ID": 10,
| "Submission Time": 1421191042750,
| "Stage Infos": [
| {
| "Stage ID": 1,
| "Stage Attempt ID": 0,
| "Stage Name": "greetings",
| "Number of Tasks": 200,
| "RDD Info": [
| {
| "RDD ID": 1,
| "Name": "mayor",
| "Callsite": "1",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 200,
| "Number of Cached Partitions": 300,
| "Memory Size": 400,
| "Disk Size": 500
| }
| ],
| "Parent IDs" : [100, 200, 300],
| "Details": "details",
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| }
| ],
| "Resource Profile Id" : 0
| },
| {
| "Stage ID": 2,
| "Stage Attempt ID": 0,
| "Stage Name": "greetings",
| "Number of Tasks": 400,
| "RDD Info": [
| {
| "RDD ID": 2,
| "Name": "mayor",
| "Callsite": "2",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 400,
| "Number of Cached Partitions": 600,
| "Memory Size": 800,
| "Disk Size": 1000
| },
| {
| "RDD ID": 3,
| "Name": "mayor",
| "Callsite": "3",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 401,
| "Number of Cached Partitions": 601,
| "Memory Size": 801,
| "Disk Size": 1001
| }
| ],
| "Parent IDs" : [100, 200, 300],
| "Details": "details",
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| }
| ],
| "Resource Profile Id" : 0
| },
| {
| "Stage ID": 3,
| "Stage Attempt ID": 0,
| "Stage Name": "greetings",
| "Number of Tasks": 600,
| "RDD Info": [
| {
| "RDD ID": 3,
| "Name": "mayor",
| "Callsite": "3",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 600,
| "Number of Cached Partitions": 900,
| "Memory Size": 1200,
| "Disk Size": 1500
| },
| {
| "RDD ID": 4,
| "Name": "mayor",
| "Callsite": "4",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 601,
| "Number of Cached Partitions": 901,
| "Memory Size": 1201,
| "Disk Size": 1501
| },
| {
| "RDD ID": 5,
| "Name": "mayor",
| "Callsite": "5",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 602,
| "Number of Cached Partitions": 902,
| "Memory Size": 1202,
| "Disk Size": 1502
| }
| ],
| "Parent IDs" : [100, 200, 300],
| "Details": "details",
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| }
| ],
| "Resource Profile Id" : 0
| },
| {
| "Stage ID": 4,
| "Stage Attempt ID": 0,
| "Stage Name": "greetings",
| "Number of Tasks": 800,
| "RDD Info": [
| {
| "RDD ID": 4,
| "Name": "mayor",
| "Callsite": "4",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 800,
| "Number of Cached Partitions": 1200,
| "Memory Size": 1600,
| "Disk Size": 2000
| },
| {
| "RDD ID": 5,
| "Name": "mayor",
| "Callsite": "5",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 801,
| "Number of Cached Partitions": 1201,
| "Memory Size": 1601,
| "Disk Size": 2001
| },
| {
| "RDD ID": 6,
| "Name": "mayor",
| "Callsite": "6",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 802,
| "Number of Cached Partitions": 1202,
| "Memory Size": 1602,
| "Disk Size": 2002
| },
| {
| "RDD ID": 7,
| "Name": "mayor",
| "Callsite": "7",
| "Parent IDs": [1, 4, 7],
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Barrier" : false,
| "Number of Partitions": 803,
| "Number of Cached Partitions": 1203,
| "Memory Size": 1603,
| "Disk Size": 2003
| }
| ],
| "Parent IDs" : [100, 200, 300],
| "Details": "details",
| "Accumulables": [
| {
| "ID": 1,
| "Name": "Accumulable1",
| "Update": "delta1",
| "Value": "val1",
| "Internal": false,
| "Count Failed Values": false
| },
| {
| "ID": 2,
| "Name": "Accumulable2",
| "Update": "delta2",
| "Value": "val2",
| "Internal": false,
| "Count Failed Values": false
| }
| ],
| "Resource Profile Id" : 0
| }
| ],
| "Stage IDs": [
| 1,
| 2,
| 3,
| 4
| ],
| "Properties": {
| "France": "Paris",
| "Germany": "Berlin",
| "Russia": "Moscow",
| "Ukraine": "Kiev"
| }
|}
""".stripMargin
private val jobEndJsonString =
"""
|{
| "Event": "SparkListenerJobEnd",
| "Job ID": 20,
| "Completion Time": 1421191296660,
| "Job Result": {
| "Result": "JobSucceeded"
| }
|}
""".stripMargin
private val environmentUpdateJsonString =
"""
|{
| "Event": "SparkListenerEnvironmentUpdate",
| "JVM Information": {
| "GC speed": "9999 objects/s",
| "Java home": "Land of coffee"
| },
| "Spark Properties": {
| "Job throughput": "80000 jobs/s, regardless of job type"
| },
| "Hadoop Properties": {
| "hadoop.tmp.dir": "/usr/local/hadoop/tmp"
| },
| "System Properties": {
| "Username": "guest",
| "Password": "guest"
| },
| "Classpath Entries": {
| "Super library": "/tmp/super_library"
| }
|}
""".stripMargin
private val blockManagerAddedJsonString =
"""
|{
| "Event": "SparkListenerBlockManagerAdded",
| "Block Manager ID": {
| "Executor ID": "Stars",
| "Host": "In your multitude...",
| "Port": 300
| },
| "Maximum Memory": 500,
| "Timestamp": 1
|}
""".stripMargin
private val blockManagerRemovedJsonString =
"""
|{
| "Event": "SparkListenerBlockManagerRemoved",
| "Block Manager ID": {
| "Executor ID": "Scarce",
| "Host": "to be counted...",
| "Port": 100
| },
| "Timestamp": 2
|}
""".stripMargin
private val unpersistRDDJsonString =
"""
|{
| "Event": "SparkListenerUnpersistRDD",
| "RDD ID": 12345
|}
""".stripMargin
private val applicationStartJsonString =
"""
|{
| "Event": "SparkListenerApplicationStart",
| "App Name": "The winner of all",
| "App ID": "appId",
| "Timestamp": 42,
| "User": "Garfield",
| "App Attempt ID": "appAttempt"
|}
""".stripMargin
private val applicationStartJsonWithLogUrlsString =
"""
|{
| "Event": "SparkListenerApplicationStart",
| "App Name": "The winner of all",
| "App ID": "appId",
| "Timestamp": 42,
| "User": "Garfield",
| "App Attempt ID": "appAttempt",
| "Driver Logs" : {
| "stderr" : "mystderr",
| "stdout" : "mystdout"
| }
|}
""".stripMargin
private val applicationEndJsonString =
"""
|{
| "Event": "SparkListenerApplicationEnd",
| "Timestamp": 42
|}
""".stripMargin
private val executorAddedJsonString =
s"""
|{
| "Event": "SparkListenerExecutorAdded",
| "Timestamp": ${executorAddedTime},
| "Executor ID": "exec1",
| "Executor Info": {
| "Host": "Hostee.awesome.com",
| "Total Cores": 11,
| "Log Urls" : {
| "stderr" : "mystderr",
| "stdout" : "mystdout"
| },
| "Attributes" : {
| "ContainerId" : "ct1",
| "User" : "spark"
| },
| "Resources" : {
| "gpu" : {
| "name" : "gpu",
| "addresses" : [ "0", "1" ]
| }
| },
| "Resource Profile Id": 4
| }
|}
""".stripMargin
private val executorRemovedJsonString =
s"""
|{
| "Event": "SparkListenerExecutorRemoved",
| "Timestamp": ${executorRemovedTime},
| "Executor ID": "exec2",
| "Removed Reason": "test reason"
|}
""".stripMargin
private val executorMetricsUpdateJsonString =
s"""
|{
| "Event": "SparkListenerExecutorMetricsUpdate",
| "Executor ID": "exec3",
| "Metrics Updated": [
| {
| "Task ID": 1,
| "Stage ID": 2,
| "Stage Attempt ID": 3,
| "Accumulator Updates": [
| {
| "ID": 0,
| "Name": "$EXECUTOR_DESERIALIZE_TIME",
| "Update": 300,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 1,
| "Name": "$EXECUTOR_DESERIALIZE_CPU_TIME",
| "Update": 300,
| "Internal": true,
| "Count Failed Values": true
| },
|
| {
| "ID": 2,
| "Name": "$EXECUTOR_RUN_TIME",
| "Update": 400,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 3,
| "Name": "$EXECUTOR_CPU_TIME",
| "Update": 400,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 4,
| "Name": "$RESULT_SIZE",
| "Update": 500,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 5,
| "Name": "$JVM_GC_TIME",
| "Update": 600,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 6,
| "Name": "$RESULT_SERIALIZATION_TIME",
| "Update": 700,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 7,
| "Name": "$MEMORY_BYTES_SPILLED",
| "Update": 800,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 8,
| "Name": "$DISK_BYTES_SPILLED",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 9,
| "Name": "$PEAK_EXECUTION_MEMORY",
| "Update": 500,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 10,
| "Name": "$UPDATED_BLOCK_STATUSES",
| "Update": [
| {
| "Block ID": "rdd_0_0",
| "Status": {
| "Storage Level": {
| "Use Disk": true,
| "Use Memory": true,
| "Deserialized": false,
| "Replication": 2
| },
| "Memory Size": 0,
| "Disk Size": 0
| }
| }
| ],
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 11,
| "Name": "${shuffleRead.REMOTE_BLOCKS_FETCHED}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 12,
| "Name": "${shuffleRead.LOCAL_BLOCKS_FETCHED}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 13,
| "Name": "${shuffleRead.REMOTE_BYTES_READ}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 14,
| "Name": "${shuffleRead.REMOTE_BYTES_READ_TO_DISK}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 15,
| "Name": "${shuffleRead.LOCAL_BYTES_READ}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 16,
| "Name": "${shuffleRead.FETCH_WAIT_TIME}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 17,
| "Name": "${shuffleRead.RECORDS_READ}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 18,
| "Name": "${shuffleWrite.BYTES_WRITTEN}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 19,
| "Name": "${shuffleWrite.RECORDS_WRITTEN}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 20,
| "Name": "${shuffleWrite.WRITE_TIME}",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 21,
| "Name": "${input.BYTES_READ}",
| "Update": 2100,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 22,
| "Name": "${input.RECORDS_READ}",
| "Update": 21,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 23,
| "Name": "${output.BYTES_WRITTEN}",
| "Update": 1200,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 24,
| "Name": "${output.RECORDS_WRITTEN}",
| "Update": 12,
| "Internal": true,
| "Count Failed Values": true
| },
| {
| "ID": 25,
| "Name": "$TEST_ACCUM",
| "Update": 0,
| "Internal": true,
| "Count Failed Values": true
| }
| ]
| }
| ],
| "Executor Metrics Updated" : [
| {
| "Stage ID" : 0,
| "Stage Attempt ID" : 0,
| "Executor Metrics" : {
| "JVMHeapMemory" : 543,
| "JVMOffHeapMemory" : 123456,
| "OnHeapExecutionMemory" : 12345,
| "OffHeapExecutionMemory" : 1234,
| "OnHeapStorageMemory" : 123,
| "OffHeapStorageMemory" : 12,
| "OnHeapUnifiedMemory" : 432,
| "OffHeapUnifiedMemory" : 321,
| "DirectPoolMemory" : 654,
| "MappedPoolMemory" : 765,
| "ProcessTreeJVMVMemory": 256912,
| "ProcessTreeJVMRSSMemory": 123456,
| "ProcessTreePythonVMemory": 123456,
| "ProcessTreePythonRSSMemory": 61728,
| "ProcessTreeOtherVMemory": 30364,
| "ProcessTreeOtherRSSMemory": 15182,
| "MinorGCCount": 10,
| "MinorGCTime": 90,
| "MajorGCCount": 2,
| "MajorGCTime": 20
| }
| }
| ]
|}
""".stripMargin
private val stageExecutorMetricsJsonString =
"""
|{
| "Event": "SparkListenerStageExecutorMetrics",
| "Executor ID": "1",
| "Stage ID": 2,
| "Stage Attempt ID": 3,
| "Executor Metrics" : {
| "JVMHeapMemory" : 543,
| "JVMOffHeapMemory" : 123456,
| "OnHeapExecutionMemory" : 12345,
| "OffHeapExecutionMemory" : 1234,
| "OnHeapStorageMemory" : 123,
| "OffHeapStorageMemory" : 12,
| "OnHeapUnifiedMemory" : 432,
| "OffHeapUnifiedMemory" : 321,
| "DirectPoolMemory" : 654,
| "MappedPoolMemory" : 765,
| "ProcessTreeJVMVMemory": 256912,
| "ProcessTreeJVMRSSMemory": 123456,
| "ProcessTreePythonVMemory": 123456,
| "ProcessTreePythonRSSMemory": 61728,
| "ProcessTreeOtherVMemory": 30364,
| "ProcessTreeOtherRSSMemory": 15182,
| "MinorGCCount": 10,
| "MinorGCTime": 90,
| "MajorGCCount": 2,
| "MajorGCTime": 20
| }
|}
""".stripMargin
private val blockUpdatedJsonString =
"""
|{
| "Event": "SparkListenerBlockUpdated",
| "Block Updated Info": {
| "Block Manager ID": {
| "Executor ID": "Stars",
| "Host": "In your multitude...",
| "Port": 300
| },
| "Block ID": "rdd_0_0",
| "Storage Level": {
| "Use Disk": false,
| "Use Memory": true,
| "Deserialized": true,
| "Replication": 1
| },
| "Memory Size": 100,
| "Disk Size": 0
| }
|}
""".stripMargin
private val executorBlacklistedJsonString =
s"""
|{
| "Event" : "org.apache.spark.scheduler.SparkListenerExecutorBlacklisted",
| "time" : ${executorBlacklistedTime},
| "executorId" : "exec1",
| "taskFailures" : 22
|}
""".stripMargin
private val executorUnblacklistedJsonString =
s"""
|{
| "Event" : "org.apache.spark.scheduler.SparkListenerExecutorUnblacklisted",
| "time" : ${executorUnblacklistedTime},
| "executorId" : "exec1"
|}
""".stripMargin
private val nodeBlacklistedJsonString =
s"""
|{
| "Event" : "org.apache.spark.scheduler.SparkListenerNodeBlacklisted",
| "time" : ${nodeBlacklistedTime},
| "hostId" : "node1",
| "executorFailures" : 33
|}
""".stripMargin
private val nodeUnblacklistedJsonString =
s"""
|{
| "Event" : "org.apache.spark.scheduler.SparkListenerNodeUnblacklisted",
| "time" : ${nodeUnblacklistedTime},
| "hostId" : "node1"
|}
""".stripMargin
private val resourceProfileJsonString =
"""
|{
| "Event":"SparkListenerResourceProfileAdded",
| "Resource Profile Id":21,
| "Executor Resource Requests":{
| "cores" : {
| "Resource Name":"cores",
| "Amount":2,
| "Discovery Script":"",
| "Vendor":""
| },
| "gpu":{
| "Resource Name":"gpu",
| "Amount":2,
| "Discovery Script":"myscript",
| "Vendor":""
| }
| },
| "Task Resource Requests":{
| "cpus":{
| "Resource Name":"cpus",
| "Amount":1.0
| },
| "gpu":{
| "Resource Name":"gpu",
| "Amount":1.0
| }
| }
|}
""".stripMargin
}
case class TestListenerEvent(foo: String, bar: Int) extends SparkListenerEvent
|
rednaxelafx/apache-spark
|
core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
|
Scala
|
apache-2.0
| 92,266
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.batch.sql
import org.apache.flink.api.java.typeutils.{GenericTypeInfo, RowTypeInfo}
import org.apache.flink.api.scala._
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.utils.CommonTestData.NonPojo
import org.apache.flink.table.utils.TableTestUtil._
import org.apache.flink.table.utils.TableTestBase
import org.junit.{Ignore, Test}
class SetOperatorsTest extends TableTestBase {
@Test
def testMinusWithNestedTypes(): Unit = {
val util = batchTestUtil()
val t = util.addTable[(Long, (Int, String), Array[Boolean])]("MyTable", 'a, 'b, 'c)
val expected = binaryNode(
"DataSetMinus",
batchTableNode(0),
batchTableNode(0),
term("minus", "a", "b", "c")
)
val result = t.minus(t)
util.verifyTable(result, expected)
}
@Test
def testExists(): Unit = {
val util = batchTestUtil()
util.addTable[(Long, Int, String)]("A", 'a_long, 'a_int, 'a_string)
util.addTable[(Long, Int, String)]("B", 'b_long, 'b_int, 'b_string)
val expected = unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
batchTableNode(0),
unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(1),
term("select", "b_long AS b_long3", "true AS $f0"),
term("where", "IS NOT NULL(b_long)")
),
term("groupBy", "b_long3"),
term("select", "b_long3", "MIN($f0) AS $f1")
),
term("select", "b_long3")
),
term("where", "=(a_long, b_long3)"),
term("join", "a_long", "a_int", "a_string", "b_long3"),
term("joinType", "InnerJoin")
),
term("select", "a_int", "a_string")
)
util.verifySql(
"SELECT a_int, a_string FROM A WHERE EXISTS(SELECT * FROM B WHERE a_long = b_long)",
expected
)
}
@Test
def testNotIn(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String)]("A", 'a, 'b, 'c)
val expected = unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
unaryNode(
"DataSetCalc",
binaryNode(
"DataSetSingleRowJoin",
batchTableNode(0),
unaryNode(
"DataSetAggregate",
binaryNode(
"DataSetUnion",
values(
"DataSetValues",
term("tuples", "[{ null }]"),
term("values", "b")
),
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "b"),
term("where", "OR(=(b, 6), =(b, 1))")
),
term("union", "b")
),
term("select", "COUNT(*) AS $f0", "COUNT(b) AS $f1")
),
term("where", "true"),
term("join", "a", "b", "c", "$f0", "$f1"),
term("joinType", "NestedLoopInnerJoin")
),
term("select", "a AS $f0", "b AS $f1", "c AS $f2", "$f0 AS $f3", "$f1 AS $f4", "b AS $f5")
),
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "b AS $f0", "true AS $f1"),
term("where", "OR(=(b, 6), =(b, 1))")
),
term("groupBy", "$f0"),
term("select", "$f0", "MIN($f1) AS $f1")
),
term("where", "=($f5, $f00)"),
term("join", "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f00", "$f10"),
term("joinType", "LeftOuterJoin")
),
term("select", "$f0 AS a", "$f2 AS c"),
term("where", "OR(=($f3, 0), AND(IS NULL($f10), >=($f4, $f3), IS NOT NULL($f5)))")
)
util.verifySql(
"SELECT a, c FROM A WHERE b NOT IN (SELECT b FROM A WHERE b = 6 OR b = 1)",
expected
)
}
@Test
def testInWithFields(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, Int, String, Long)]("A", 'a, 'b, 'c, 'd, 'e)
val expected = unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "a", "b", "c", "d", "e"),
term("where", "OR(=(a, c), =(a, CAST(b)), =(a, 5))")
)
util.verifySql(
"SELECT a, b, c, d, e FROM A WHERE a IN (c, b, 5)",
expected
)
}
@Test
@Ignore // Calcite bug
def testNotInWithFilter(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, Long, String)]("A", 'a, 'b, 'c)
util.addTable[(Int, Long, Int, String, Long)]("B", 'a, 'b, 'c, 'd, 'e)
val expected = "FAIL"
util.verifySql(
"SELECT d FROM B WHERE d NOT IN (SELECT a FROM A) AND d < 5",
expected
)
}
@Test
def testUnionNullableTypes(): Unit = {
val util = batchTestUtil()
util.addTable[((Int, String), (Int, String), Int)]("A", 'a, 'b, 'c)
val expected = binaryNode(
"DataSetUnion",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "a")
),
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "CASE(>(c, 0), b, null) AS EXPR$0")
),
term("union", "a")
)
util.verifySql(
"SELECT a FROM A UNION ALL SELECT CASE WHEN c > 0 THEN b ELSE NULL END FROM A",
expected
)
}
@Test
def testUnionAnyType(): Unit = {
val util = batchTestUtil()
val typeInfo = Types.ROW(
new GenericTypeInfo(classOf[NonPojo]),
new GenericTypeInfo(classOf[NonPojo]))
util.addJavaTable(typeInfo, "A", "a, b")
val expected = binaryNode(
"DataSetUnion",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "a")
),
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "b")
),
term("union", "a")
)
util.verifyJavaSql("SELECT a FROM A UNION ALL SELECT b FROM A", expected)
}
}
|
zimmermatt/flink
|
flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/batch/sql/SetOperatorsTest.scala
|
Scala
|
apache-2.0
| 6,895
|
package ir.ast
import ir.interpreter.Interpreter.ValueMap
import ir._
/**
* asScalar pattern. (a.k.a., joinVec).
* Code for this pattern can be generated.
*
* The asScalar pattern has the following high-level semantics:
* `asScalar()( [ <x,,1,,, ..., x,,n,,>, ..., <x,,m-n+1,,, ..., x,,m,,> ] ) =
* [x,,1,,, ..., x,,m,,]`
*
* The asScalar pattern has the following type:
* `asScalar(): [ < a >,,i,, ],,j,, -> [ a ],,i x j,,`
*
* We know the following algorithmic rewrite rules for the asScalar pattern
* (so far):
* - `asScalar() o asVector(n) | asVector(n) o asScalar() => id`
*/
case class asScalar() extends Pattern(arity = 1) {
override def checkType(argType: Type,
setType: Boolean): Type = {
argType match {
case at@ArrayTypeWSWC(VectorType(_, _),_,_) => Type.asScalarType(at)
case _ =>
throw new TypeException(argType, "ArrayType(VectorType(_, _), _)", this)
}
}
override def eval(valueMap: ValueMap, args: Any*): Any =
Join().eval(valueMap, args:_*)
}
|
lift-project/lift
|
src/main/ir/ast/asScalar.scala
|
Scala
|
mit
| 1,049
|
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package utils
// Java
import java.lang.reflect.Method
// Scalaz
import scalaz._
import Scalaz._
/**
* The problem we're trying to solve: converting maps to classes in Scala
* is not very easy to do in a functional way, and it gets even harder
* if you have a class with >22 fields (so can't use case classes).
*
* For a discussion about this on Stack Overflow, see:
* http://stackoverflow.com/questions/4290955/instantiating-a-case-class-from-a-list-of-parameters
*
* The idea is to use Java Reflection with a big ol' TransformMap:
*
* ("key in map" -> Tuple2(transformFunc, "field in class"),
* "another key" -> Tuple2(transformFunc, "field in class"),
* "a third key" -> Tuple2(transformFunc, "field in class"))
*
* And then there would be an iteration through the source map which
* looks up the key in the TransformMap and then applies the
* transformFunc using Java Reflection, storing the answer in the
* class's specified field.
*
* If I can get this working, then the next step is to generate
* a "results" map which contains the results of each
* transformFunc. Each transformFunc should either return a
* ValidationNel[String, Unit], or should be promoted (via an
* implicit) to the same.
*
* Having this should allow me to do something like:
* resultsMap.foldLeft(Unit.success, |@|) to roll up any validation
* errors into one final ValidatioNel.
*
* If I can get all that working, then the final step is to
* support transformFuncs which set multiple fields. To avoid the
* complexity spiralling, it would probably be simplest if any
* transformFunc that wanted to return multiple values returned
* a TupleN, and then we use the same TupleN for the target fields.
* Maybe there will be an implicit to convert "raw" target fields
* into Tuple1s.
*
* Okay, let's start...
*/
object MapTransformer {
// Clarificatory aliases
type Key = String
type Value = String
type Field = String
// A transformation takes a Key and Value and returns a Scalaz Validation with String for Failure and anything for Success
type TransformFunc = Function2[Key, Value, Validation[String, _]]
// Our source map
type SourceMap = Map[Key, Value]
// Our map for transforming data
type TransformMap = Map[Key, Tuple2[TransformFunc, _]]
// All of the setter methods on this object
type SettersMap = Map[Key, Method]
/**
* A factory to generate a new object using
* a TransformMap.
*
* @param sourceMap Contains the source data to
* apply to the obj
* @param transformMap Determines how the source
* data should be transformed
* before storing in the obj
* @return a ValidationNel containing either a Nel
* of error Strings, or the new object
*/
def generate[T <: AnyRef](sourceMap: SourceMap, transformMap: TransformMap)(implicit m: Manifest[T]): Validated[T] = {
val newInst = m.erasure.newInstance()
val result = _transform(newInst, sourceMap, transformMap, getSetters(m.erasure))
result.flatMap(s => newInst.asInstanceOf[T].success) // On success, replace the field count with the new instance
}
/**
* An implicit conversion to take any Object and make it
* Transformable.
*
* @param obj Any Object
* @return the new Transformable class, with manifest attached
*/
implicit def makeTransformable[T <: AnyRef](obj: T)(implicit m : Manifest[T]) = new TransformableClass[T](obj)
/**
* A pimped object, now transformable by
* using the transform method.
*/
class TransformableClass[T](obj: T)(implicit m: Manifest[T]) {
// Do all the reflection for the setters we need:
// This needs to be lazy because Method is not serializable
private lazy val setters = getSetters(m.erasure)
/**
* Update the object by applying the contents
* of a SourceMap to the object using a TransformMap.
*
* @param sourceMap Contains the source data to
* apply to the obj
* @param transformMap Determines how the source
* data should be transformed
* before storing in the obj
* @return a ValidationNel containing either a Nel
* of error Strings, or the count of
* updated fields
*/
def transform(sourceMap: SourceMap, transformMap: TransformMap): ValidationNel[String, Int] =
_transform[T](obj, sourceMap, transformMap, setters)
}
/**
* General-purpose method to update any object
* by applying the contents of a SourceMap to
* the object using a TransformMap. We use the
* SettersMap to update the object.
*
* @param obj Any Object
* @param sourceMap Contains the source data to
* apply to the obj
* @param transformMap Determines how the source
* data should be transformed
* before storing in the obj
* @param setters Provides access to the obj's
* setX() methods
* @return a ValidationNel containing either a Nel
* of error Strings, or the count of
* updated fields
*/
private def _transform[T](obj: T, sourceMap: SourceMap, transformMap: TransformMap, setters: SettersMap): ValidationNel[String, Int] = {
val results: List[Validation[String, Int]] = sourceMap.map { case (key, in) =>
if (transformMap.contains(key)) {
val (func, field) = transformMap(key)
val out = func(key, in)
out match {
case Success(s) =>
field match {
case f: String =>
val result = s.asInstanceOf[AnyRef]
setters(f).invoke(obj, result)
1.success[String] // +1 to the count of fields successfully set
case Tuple2(f1: String, f2: String) =>
val result = s.asInstanceOf[Tuple2[AnyRef, AnyRef]]
setters(f1).invoke(obj, result._1)
setters(f2).invoke(obj, result._2)
2.success[String] // +2 to the count of fields successfully set
case Tuple3(f1: String, f2: String, f3: String) =>
val result = s.asInstanceOf[Tuple3[AnyRef, AnyRef, AnyRef]]
setters(f1).invoke(obj, result._1)
setters(f2).invoke(obj, result._2)
setters(f3).invoke(obj, result._3)
3.success[String] // +3 to the count of fields successfully set
case Tuple4(f1: String, f2: String, f3: String, f4: String) =>
val result = s.asInstanceOf[Tuple4[AnyRef, AnyRef, AnyRef, AnyRef]]
setters(f1).invoke(obj, result._1)
setters(f2).invoke(obj, result._2)
setters(f3).invoke(obj, result._3)
setters(f4).invoke(obj, result._4)
4.success[String] // +4 to the count of fields successfully set
}
case Failure(e) =>
e.fail[Int]
}
} else {
0.success[String] // Key not found: zero fields updated
}
}.toList
results.foldLeft(0.successNel[String])(_ +++ _.toValidationNel)
}
/**
* Lowercases the first character in
* a String.
*
* @param s The String to lowercase the
* first letter of
* @return s with the first character
* in lowercase
*/
private def lowerFirst(s: String): String =
s.substring(0,1).toLowerCase + s.substring(1)
/**
* Gets the field name from a setter Method,
* by cutting out "set" and lowercasing the
* first character after in the setter's name.
*
* @param setter The Method from which we will
* reverse-engineer the field name
* @return the field name extracted from the setter
*/
private def setterToFieldName(setter: Method): String =
lowerFirst(setter.getName.substring(3))
/**
* Gets all of the setter Methods
* from a manifest.
*
* @param c The manifest containing the
* setter methods to return
* @return the Map of setter Methods
*/
private def getSetters[T](c: Class[T]): SettersMap = c
.getDeclaredMethods
.filter { _.getName.startsWith("set") }
.groupBy { setterToFieldName(_) }
.mapValues { _.head }
}
|
mdavid/lessig-bigdata
|
lib/snowplow/3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/utils/MapTransformer.scala
|
Scala
|
mit
| 9,038
|
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.motion
import squants._
import squants.mass.Kilograms
import squants.time.{ SecondTimeIntegral, Seconds, TimeIntegral, TimeSquared }
/**
* @author garyKeorkunian
* @since 0.1
*
* @param value Double
*/
final class Momentum private (val value: Double, val unit: MomentumUnit)
extends Quantity[Momentum]
with TimeIntegral[Force]
with SecondTimeIntegral[Yank] {
def dimension = Momentum
protected def timeDerived = Newtons(toNewtonSeconds)
protected def time = Seconds(1)
def /(that: Velocity): Mass = Kilograms(this.toNewtonSeconds / that.toMetersPerSecond)
def /(that: Mass): Velocity = MetersPerSecond(this.toNewtonSeconds / that.toKilograms)
def /(that: TimeSquared): Yank = this / that.time1 / that.time2
def /(that: Yank): TimeSquared = (this / that.timeIntegrated) * time
def toNewtonSeconds = to(NewtonSeconds)
}
object Momentum extends Dimension[Momentum] {
private[motion] def apply[A](n: A, unit: MomentumUnit)(implicit num: Numeric[A]) = new Momentum(num.toDouble(n), unit)
def apply(m: Mass, v: Velocity): Momentum = NewtonSeconds(m.toKilograms * v.toMetersPerSecond)
def apply = parse _
def name = "Momentum"
def primaryUnit = NewtonSeconds
def siUnit = NewtonSeconds
def units = Set(NewtonSeconds)
}
trait MomentumUnit extends UnitOfMeasure[Momentum] {
def apply[A](n: A)(implicit num: Numeric[A]) = Momentum(n, this)
}
object NewtonSeconds extends MomentumUnit with PrimaryUnit with SiUnit {
val symbol = "Ns"
}
object MomentumConversions {
lazy val newtonSecond = NewtonSeconds(1)
implicit class MomentumConversions[A](n: A)(implicit num: Numeric[A]) {
def newtonSeconds = NewtonSeconds(n)
}
implicit object MomentumNumeric extends AbstractQuantityNumeric[Momentum](Momentum.primaryUnit)
}
|
underscorenico/squants
|
shared/src/main/scala/squants/motion/Momentum.scala
|
Scala
|
apache-2.0
| 2,324
|
package org.jetbrains.plugins.cbt.project
import java.net.URL
import java.util
import com.intellij.execution.configurations.SimpleJavaParameters
import com.intellij.openapi.application.PathManager
import com.intellij.openapi.externalSystem.ExternalSystemManager
import com.intellij.openapi.externalSystem.model.ProjectSystemId
import com.intellij.openapi.externalSystem.util.ExternalSystemConstants
import com.intellij.openapi.fileChooser.FileChooserDescriptor
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.Pair
import com.intellij.util.Function
import org.jetbrains.plugins.cbt.project.settings._
import org.jetbrains.sbt.jarWith
import scala.collection.JavaConverters._
import org.jetbrains.plugins.cbt._
class CbtExternalSystemManager
extends ExternalSystemManager[CbtProjectSettings, CbtProjectSettingsListener,
CbtSystemSettings, CbtLocalSettings, CbtExecutionSettings] {
override def getTaskManagerClass: Class[CbtTaskManager] = classOf[CbtTaskManager]
override def getSystemId: ProjectSystemId = CbtProjectSystem.Id
override def getExternalProjectDescriptor: FileChooserDescriptor = new CbtOpenProjectDescriptor
override def getExecutionSettingsProvider: Function[Pair[Project, String], CbtExecutionSettings] =
(project: Project, path: String) => {
val projectSettings = CbtProjectSettings.getInstance(project, path)
new CbtExecutionSettings(path,
projectSettings.isCbt,
projectSettings.useCbtForInternalTasks,
projectSettings.useDirect,
projectSettings.extraModules.asScala)
}
override def getProjectResolverClass: Class[CbtProjectResolver] = classOf[CbtProjectResolver]
override val getLocalSettingsProvider: Function[Project, CbtLocalSettings] =
(project: Project) => CbtLocalSettings.getInstance(project)
override def getSettingsProvider: Function[Project, CbtSystemSettings] =
(project: Project) =>CbtSystemSettings.instance(project)
override def enhanceLocalProcessing(urls: util.List[URL]): Unit = {
urls.add(jarWith[scala.App].toURI.toURL)
}
override def enhanceRemoteProcessing(parameters: SimpleJavaParameters): Unit = {
val classpath = parameters.getClassPath
classpath.add(jarWith[this.type])
classpath.add(jarWith[org.jetbrains.sbt.structure.XmlSerializer[_]])
classpath.add(jarWith[scala.App])
classpath.add(jarWith[scala.xml.Node])
parameters.getVMParametersList.addProperty(
ExternalSystemConstants.EXTERNAL_SYSTEM_ID_KEY, CbtProjectSystem.Id.getId)
parameters.getVMParametersList.addProperty(
PathManager.PROPERTY_LOG_PATH, PathManager.getLogPath)
}
}
|
triplequote/intellij-scala
|
cbt/src/org/jetbrains/plugins/cbt/project/CbtExternalSystemManager.scala
|
Scala
|
apache-2.0
| 2,669
|
package org.scurator
import org.apache.zookeeper.Watcher.Event.{EventType, KeeperState}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scurator.components._
import scala.concurrent.ExecutionContext.Implicits.global
/**
*
*/
@RunWith(classOf[JUnitRunner])
class TestGetChildren extends BaseSCuratorTest with SCuratorTestClient {
"A SCuratorClient" should "support getting child nodes" in {
val fooPath: String = "/foo"
val fooChild1: String = s"$fooPath/1"
val fooChild2: String = s"$fooPath/2"
val fooChild3: String = s"$fooPath/3"
// Create a few nodes
client.create(CreateRequest(path = fooPath)).futureValue
client.create(CreateRequest(path = fooChild1)).futureValue
client.create(CreateRequest(path = fooChild2)).futureValue
client.create(CreateRequest(path = fooChild3)).futureValue
// Verify the nodes exists
val fooExistsResult = client.exists(ExistsRequest(path = fooPath)).futureValue
fooExistsResult shouldBe 'exists
val fooChild1ExistsResult = client.exists(ExistsRequest(path = fooChild1)).futureValue
fooChild1ExistsResult shouldBe 'exists
val fooChild2ExistsResult = client.exists(ExistsRequest(path = fooChild2)).futureValue
fooChild2ExistsResult shouldBe 'exists
val fooChild3ExistsResult = client.exists(ExistsRequest(path = fooChild3)).futureValue
fooChild3ExistsResult shouldBe 'exists
// Get the children nodes
val result = client.getChildren(GetChildrenRequest(path = fooPath))
whenReady(result) { result =>
// scalastyle:off magic.number
result.children.size shouldBe 3
val sortedChildren = result.children.sorted
sortedChildren.head shouldBe "1"
sortedChildren(1) shouldBe "2"
sortedChildren(2) shouldBe "3"
// scalastyle:on magic.number
}
}
it should "support getting child nodes with a watcher" in {
val fooPath: String = "/foo"
val fooChild1: String = s"$fooPath/1"
val fooChild2: String = s"$fooPath/2"
// Create a few nodes
client.create(CreateRequest(path = fooPath)).futureValue
client.create(CreateRequest(path = fooChild1)).futureValue
// Verify the nodes exists
val fooExistsResult = client.exists(ExistsRequest(path = fooPath)).futureValue
fooExistsResult shouldBe 'exists
val fooChild1ExistsResult = client.exists(ExistsRequest(path = fooChild1)).futureValue
fooChild1ExistsResult shouldBe 'exists
// Get the children nodes with a watcher
val resultWithWatcher = client.getChildren(GetChildrenRequest(path = fooPath, watch = true)).futureValue
// Verify the results
resultWithWatcher.children.size shouldBe 1
resultWithWatcher.children.head shouldBe "1"
resultWithWatcher.watch shouldBe 'isDefined
val watchEvent = resultWithWatcher.watch.get.event
// Add a child to trigger the watcher
client.create(CreateRequest(path = fooChild2)).futureValue
// Verify the watch event results
whenReady(watchEvent) { result =>
result.getPath shouldBe fooPath
result.getType shouldBe EventType.NodeChildrenChanged
}
}
}
|
granthenke/scurator
|
src/test/scala/org/scurator/TestGetChildren.scala
|
Scala
|
apache-2.0
| 3,135
|
package scalapb
import java.util.Base64
import com.google.protobuf.ByteString
import com.google.protobuf.struct.{ListValue, Struct, Value}
import com.google.protobuf.test.unittest_import.{ImportEnum, ImportMessage}
import org.scalatest.EitherValues
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.must.Matchers
import protobuf_unittest.fromstruct_two_level_nesting.{
ContainerMessage,
DeepEnum,
DeepMessage,
ImportMessage => ContainerMessageImportMessage
}
import protobuf_unittest.unittest.TestAllTypes.NestedMessage
import protobuf_unittest.unittest.{ForeignEnum, ForeignMessage, TestAllTypes}
import scalapb.StructUtils.StructParsingError
import scala.util.Random
import scala.annotation.nowarn
@nowarn("cat=deprecation")
class StructUtilsSpec extends AnyFlatSpec with Matchers with EitherValues {
/** Helper to construct a ByteString from a String containing only 8-bit characters. The
* characters are converted directly to bytes, *not* encoded using UTF-8.
*/
def bytes(bytesAsInts: Int*): ByteString =
ByteString.copyFrom(bytesAsInts.map(_.toByte).toArray)
"toStruct of empty message" should "pass" in {
StructUtils.toStruct(TestAllTypes()) must be(
Struct(Map.empty)
)
}
"toStruct with int field" should "pass" in {
val someIntValue = Random.nextInt()
StructUtils.toStruct(TestAllTypes().withOptionalInt32(someIntValue)) must be(
Struct(Map("optional_int32" -> Value(Value.Kind.NumberValue(someIntValue.toDouble))))
)
}
"toStruct with long field" should "pass" in {
// Using StringValue to not lose precision like json encoding does
val someLongValue = Random.nextLong()
StructUtils.toStruct(TestAllTypes().withOptionalInt64(someLongValue)) must be(
Struct(Map("optional_int64" -> Value(Value.Kind.StringValue(someLongValue.toString))))
)
}
"toStruct with double field" should "pass" in {
val someDoubleValue = Random.nextDouble()
StructUtils.toStruct(TestAllTypes().withOptionalDouble(someDoubleValue)) must be(
Struct(Map("optional_double" -> Value(Value.Kind.NumberValue(someDoubleValue.toDouble))))
)
}
"toStruct with float field" should "pass" in {
val someFloatValue = Random.nextFloat()
StructUtils.toStruct(TestAllTypes().withOptionalFloat(someFloatValue)) must be(
Struct(Map("optional_float" -> Value(Value.Kind.NumberValue(someFloatValue.toDouble))))
)
}
"toStruct with string field" should "pass" in {
val someStringValue = Random.alphanumeric.take(Random.nextInt(500)).mkString
StructUtils.toStruct(TestAllTypes().withOptionalString(someStringValue)) must be(
Struct(Map("optional_string" -> Value(Value.Kind.StringValue(someStringValue))))
)
}
"toStruct with byte string field" should "pass" in {
val someBytesValue = bytes(0xe3, 0x81, 0x82)
StructUtils.toStruct(TestAllTypes().withOptionalBytes(someBytesValue)) must be(
Struct(
Map(
"optional_bytes" -> Value(
Value.Kind.StringValue(new String(Base64.getEncoder.encode(someBytesValue.toByteArray)))
)
)
)
)
}
"toStruct with boolean field" should "pass" in {
val someBooleanValue = Random.nextBoolean()
StructUtils.toStruct(TestAllTypes().withOptionalBool(someBooleanValue)) must be(
Struct(Map("optional_bool" -> Value(Value.Kind.BoolValue(someBooleanValue))))
)
}
"toStruct with enum field" should "pass" in {
// using name and not full name to mimic json which is the closest counterpart to Struct
StructUtils.toStruct(TestAllTypes().withOptionalForeignEnum(ForeignEnum.FOREIGN_BAR)) must be(
Struct(
Map("optional_foreign_enum" -> Value(Value.Kind.StringValue(ForeignEnum.FOREIGN_BAR.name)))
)
)
}
"toStruct with repeated field" should "pass" in {
val someRepeatedIntValue = Seq(Random.nextInt(), Random.nextInt())
StructUtils.toStruct(TestAllTypes().withRepeatedInt32(someRepeatedIntValue)) must be(
Struct(
Map(
"repeated_int32" -> Value(
Value.Kind.ListValue(
ListValue(someRepeatedIntValue.map(i => Value(Value.Kind.NumberValue(i.toDouble))))
)
)
)
)
)
}
"toStruct with empty nested message field" should "pass" in {
StructUtils.toStruct(TestAllTypes().withOptionalNestedMessage(NestedMessage())) must be(
Struct(Map("optional_nested_message" -> Value(Value.Kind.StructValue(Struct(Map.empty)))))
)
}
"toStruct with non-empty nested message field" should "pass" in {
val someIntValue = Random.nextInt()
StructUtils.toStruct(
TestAllTypes().withOptionalNestedMessage(NestedMessage().withBb(someIntValue))
) must be(
Struct(
Map(
"optional_nested_message" -> Value(
Value.Kind.StructValue(
Struct(Map("bb" -> Value(Value.Kind.NumberValue(someIntValue.toDouble))))
)
)
)
)
)
}
implicit val testAllTypesCompanion: GeneratedMessageCompanion[TestAllTypes] =
TestAllTypes.messageCompanion
"fromStruct of empty message" should "pass" in {
StructUtils.fromStruct(Struct(Map.empty)).right.value must be(
TestAllTypes()
)
}
"fromStruct with int field" should "pass" in {
val someIntValue = Random.nextInt()
StructUtils
.fromStruct(
Struct(Map("optional_int32" -> Value(Value.Kind.NumberValue(someIntValue.toDouble))))
)
.right
.value must be(
TestAllTypes().withOptionalInt32(someIntValue)
)
}
"fromStruct with long field" should "pass" in {
val someLongValue = Random.nextLong()
StructUtils
.fromStruct(
Struct(Map("optional_int64" -> Value(Value.Kind.StringValue(someLongValue.toString))))
)
.right
.value must be(
TestAllTypes().withOptionalInt64(someLongValue)
)
}
"fromStruct with double field" should "pass" in {
val someDoubleValue = Random.nextDouble()
StructUtils
.fromStruct(
Struct(Map("optional_double" -> Value(Value.Kind.NumberValue(someDoubleValue.toDouble))))
)
.right
.value must be(
TestAllTypes().withOptionalDouble(someDoubleValue)
)
}
"fromStruct with float field" should "pass" in {
val someFloatValue = Random.nextFloat()
StructUtils
.fromStruct(
Struct(Map("optional_float" -> Value(Value.Kind.NumberValue(someFloatValue.toDouble))))
)
.right
.value must be(
TestAllTypes().withOptionalFloat(someFloatValue)
)
}
"fromStruct with string field" should "pass" in {
val someStringValue = Random.alphanumeric.take(Random.nextInt(500)).mkString
StructUtils
.fromStruct(Struct(Map("optional_string" -> Value(Value.Kind.StringValue(someStringValue)))))
.right
.value must be(
TestAllTypes().withOptionalString(someStringValue)
)
}
"fromStruct with byte string field" should "pass" in {
val someBytesValue = bytes(0xe3, 0x81, 0x82)
StructUtils
.fromStruct(
Struct(
Map(
"optional_bytes" -> Value(
Value.Kind.StringValue(
new String(Base64.getEncoder.encode(someBytesValue.toByteArray))
)
)
)
)
)
.right
.value must be(
TestAllTypes().withOptionalBytes(someBytesValue)
)
}
"fromStruct with boolean field" should "pass" in {
val someBooleanValue = Random.nextBoolean()
StructUtils
.fromStruct(Struct(Map("optional_bool" -> Value(Value.Kind.BoolValue(someBooleanValue)))))
.right
.value must be(
TestAllTypes().withOptionalBool(someBooleanValue)
)
}
"fromStruct with enum field" should "pass" in {
StructUtils
.fromStruct(
Struct(
Map(
"optional_foreign_enum" -> Value(Value.Kind.StringValue(ForeignEnum.FOREIGN_BAR.name))
)
)
)
.right
.value must be(
TestAllTypes().withOptionalForeignEnum(ForeignEnum.FOREIGN_BAR)
)
}
"fromStruct with repeated field" should "pass" in {
val someRepeatedIntValue = Seq(Random.nextInt(), Random.nextInt())
StructUtils
.fromStruct(
Struct(
Map(
"repeated_int32" -> Value(
Value.Kind.ListValue(
ListValue(someRepeatedIntValue.map(i => Value(Value.Kind.NumberValue(i.toDouble))))
)
)
)
)
)
.right
.value must be(
TestAllTypes().withRepeatedInt32(someRepeatedIntValue)
)
}
"fromStruct with empty nested message field" should "pass" in {
StructUtils
.fromStruct(
Struct(Map("optional_nested_message" -> Value(Value.Kind.StructValue(Struct(Map.empty)))))
)
.right
.value must be(
TestAllTypes().withOptionalNestedMessage(NestedMessage())
)
}
"fromStruct with non-empty nested message field" should "pass" in {
val someIntValue = Random.nextInt()
StructUtils
.fromStruct(
Struct(
Map(
"optional_nested_message" -> Value(
Value.Kind.StructValue(
Struct(Map("bb" -> Value(Value.Kind.NumberValue(someIntValue.toDouble))))
)
)
)
)
)
.right
.value must be(
TestAllTypes().withOptionalNestedMessage(NestedMessage().withBb(someIntValue))
)
}
"fromStruct with empty import message field" should "pass" in {
StructUtils
.fromStruct(
Struct(Map("optional_import_message" -> Value(Value.Kind.StructValue(Struct(Map.empty)))))
)
.right
.value must be(
TestAllTypes().withOptionalImportMessage(ImportMessage())
)
}
"fromStruct with empty foreign message field" should "pass" in {
StructUtils
.fromStruct(
Struct(Map("optional_foreign_message" -> Value(Value.Kind.StructValue(Struct(Map.empty)))))
)
.right
.value must be(
TestAllTypes().withOptionalForeignMessage(ForeignMessage())
)
}
"ser-deser of empty message" should "pass" in {
val types = TestAllTypes()
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with int field" should "pass" in {
val types = TestAllTypes().withOptionalInt32(Random.nextInt())
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with long field" should "pass" in {
val types = TestAllTypes().withOptionalInt64(Random.nextLong())
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with double field" should "pass" in {
val types = TestAllTypes().withOptionalDouble(Random.nextDouble())
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with float field" should "pass" in {
val types = TestAllTypes().withOptionalFloat(Random.nextFloat())
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with string field" should "pass" in {
val types =
TestAllTypes().withOptionalString(Random.alphanumeric.take(Random.nextInt(500)).mkString)
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with byte string field" should "pass" in {
val types = TestAllTypes().withOptionalBytes(bytes(0xe3, 0x81, 0x82))
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with boolean field" should "pass" in {
val types = TestAllTypes().withOptionalBool(Random.nextBoolean())
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with enum field" should "pass" in {
val types = TestAllTypes().withOptionalForeignEnum(ForeignEnum.FOREIGN_BAR)
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with repeated field" should "pass" in {
val types = TestAllTypes().withRepeatedInt32(Seq(Random.nextInt(), Random.nextInt()))
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with empty nested message field" should "pass" in {
val types = TestAllTypes().withOptionalNestedMessage(NestedMessage())
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"ser-deser with non-empty nested message field" should "pass" in {
val types = TestAllTypes().withOptionalNestedMessage(NestedMessage().withBb(Random.nextInt()))
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
// Tests added for precaution to verify using `fd` is correct in recursion for Repeated field
"fromStruct with repeated field of messages" should "pass" in {
val someIntValueA = Random.nextInt()
val someIntValueB = Random.nextInt()
val types = TestAllTypes().withRepeatedForeignMessage(
Seq(ForeignMessage().withC(someIntValueA), ForeignMessage().withC(someIntValueB))
)
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
"fromStruct with repeated field of enum" should "pass" in {
val types =
TestAllTypes().withRepeatedImportEnum(Seq(ImportEnum.IMPORT_BAR, ImportEnum.IMPORT_BAZ))
StructUtils.fromStruct(StructUtils.toStruct(types)).right.value must be(types)
}
// Tests added for precaution to verify recursion and companions is correct
"fromStruct with two level deep message" should "pass" in {
implicit val containerMessageCompanion = ContainerMessage.messageCompanion
val types = ContainerMessage().withImportMessage(
ContainerMessageImportMessage().withB(DeepMessage().withA(Random.nextInt()))
)
StructUtils.fromStruct[ContainerMessage](StructUtils.toStruct(types)).right.value must be(types)
}
"fromStruct with two level deep enum" should "pass" in {
implicit val containerMessageCompanion = ContainerMessage.messageCompanion
val types =
ContainerMessage().withImportMessage(ContainerMessageImportMessage().withC(DeepEnum.DEEP_BAZ))
StructUtils.fromStruct[ContainerMessage](StructUtils.toStruct(types)).right.value must be(types)
}
// failures
"fromStruct with missing enum field" should "pass" in {
StructUtils
.fromStruct(
Struct(Map("optional_foreign_enum" -> Value(Value.Kind.StringValue("non_existent"))))
)
.left
.value must be(
StructParsingError(
"""Field "protobuf_unittest.TestAllTypes.optional_foreign_enum" is of type enum "protobuf_unittest.ForeignEnum" but received invalid enum value "non_existent""""
)
)
}
"fromStruct with faulty int field" should "pass" in {
val someFaultyInt = Random.nextFloat().toDouble
StructUtils
.fromStruct(Struct(Map("optional_int32" -> Value(Value.Kind.NumberValue(someFaultyInt)))))
.left
.value must be(
StructParsingError(
s"""Field "protobuf_unittest.TestAllTypes.optional_int32" is of type "Int" but received "NumberValue($someFaultyInt)""""
)
)
}
"fromStruct with faulty long field" should "pass" in {
val someFaultyLong = "Hi"
StructUtils
.fromStruct(Struct(Map("optional_int64" -> Value(Value.Kind.StringValue(someFaultyLong)))))
.left
.value must be(
StructParsingError(
"""Field "protobuf_unittest.TestAllTypes.optional_int64" is of type long but received invalid long value "Hi""""
)
)
}
"fromStruct with int field value for string field key" should "pass" in {
val someFaultyString = Random.nextInt().toDouble
StructUtils
.fromStruct(Struct(Map("optional_string" -> Value(Value.Kind.NumberValue(someFaultyString)))))
.left
.value must be(
StructParsingError(
s"""Field "protobuf_unittest.TestAllTypes.optional_string" is of type "String" but received "NumberValue($someFaultyString)""""
)
)
}
"fromStruct with boolean field value for string field key" should "pass" in {
val someBooleanValue = Random.nextBoolean()
StructUtils
.fromStruct(Struct(Map("optional_string" -> Value(Value.Kind.BoolValue(someBooleanValue)))))
.left
.value must be(
StructParsingError(
s"""Field "protobuf_unittest.TestAllTypes.optional_string" is of type "String" but received "BoolValue($someBooleanValue)""""
)
)
}
"fromStruct with message field value for string field key" should "pass" in {
StructUtils
.fromStruct(
Struct(Map("optional_string" -> Value(Value.Kind.StructValue(Struct(Map.empty)))))
)
.left
.value must be(
StructParsingError(
s"""Field "protobuf_unittest.TestAllTypes.optional_string" is of type "String" but received "StructValue(Struct(Map(),UnknownFieldSet(Map())))""""
)
)
}
"fromStruct with repeated field value for single field key" should "pass" in {
StructUtils
.fromStruct(
Struct(
Map(
"optional_string" -> Value(
Value.Kind.ListValue(ListValue(Seq(Value(Value.Kind.StringValue("Hi")))))
)
)
)
)
.left
.value must be(
StructParsingError(
"""Field "protobuf_unittest.TestAllTypes.optional_string" is of type "String" but received "ListValue(ListValue(List(Value(StringValue(Hi),UnknownFieldSet(Map()))),UnknownFieldSet(Map())))""""
)
)
}
"fromStruct with string field value for int field key" should "pass" in {
StructUtils
.fromStruct(Struct(Map("optional_int32" -> Value(Value.Kind.StringValue("Hi")))))
.left
.value must be(
StructParsingError(
s"""Field "protobuf_unittest.TestAllTypes.optional_int32" is of type "Int" but received "StringValue(Hi)""""
)
)
}
"fromStruct with empty optional int field" should "pass" in {
StructUtils
.fromStruct(Struct(Map("optional_int32" -> Value(Value.Kind.Empty))))
.right
.value must be(
TestAllTypes()
)
}
}
|
scalapb/ScalaPB
|
e2e/src/test/scala/scalapb/StructUtilsSpec.scala
|
Scala
|
apache-2.0
| 18,200
|
package com.mec.scala
object Messages{
object Exit
object Finished
case class Response(message: String)
}
import akka.actor.Actor
class ShapesDrawingActor extends Actor{
import Messages._
def receive = {
case s: Shape =>
s.draw { str => println(s"ShapeDrawingActor: $str")}
sender ! Response(s"ShapesDr awingActor: $s drawn")
case Exit =>
println(s"ShapesDrawingActor: exiting...")
sender ! Finished
case unexpected => //default. Equivalent to "unexpected: Any"
val response = Response(s"Error: Unknown message:$unexpected")
println(s"ShapesDrawingActor: $response")
sender ! response
}
}
|
mectest1/HelloScala
|
HelloWorld/src/com/mec/scala/ShapesDrawingActor.scala
|
Scala
|
gpl-3.0
| 661
|
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager.utils
import kafka.manager.CuratorConfig
import org.apache.curator.framework.{CuratorFrameworkFactory, CuratorFramework}
import org.apache.curator.retry.BoundedExponentialBackoffRetry
import org.scalatest.{BeforeAndAfterAll, FunSuite}
/**
* @author hiral
*/
trait KafkaServerInTest extends FunSuite with BeforeAndAfterAll {
val kafkaServerZkPath : String
lazy val sharedCurator: CuratorFramework = {
val config = CuratorConfig(kafkaServerZkPath)
val curator: CuratorFramework = CuratorFrameworkFactory.newClient(
config.zkConnect,
new BoundedExponentialBackoffRetry(config.baseSleepTimeMs, config.maxSleepTimeMs, config.zkMaxRetry))
curator
}
override protected def beforeAll(): Unit = {
super.beforeAll()
sharedCurator.start()
}
override protected def afterAll(): Unit = {
sharedCurator.close()
super.afterAll()
}
}
|
Flipkart/kafka-manager
|
test/kafka/manager/utils/KafkaServerInTest.scala
|
Scala
|
apache-2.0
| 1,018
|
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.linker.backend
import scala.concurrent._
import java.io._
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import org.scalajs.linker.interface.{OutputDirectory, Report}
import org.scalajs.linker.interface.unstable.{OutputDirectoryImpl, OutputPatternsImpl, ReportImpl}
import org.scalajs.linker.standard.{ModuleSet, IOThrottler}
import org.scalajs.linker.standard.ModuleSet.ModuleID
private[backend] abstract class OutputWriter(output: OutputDirectory,
config: LinkerBackendImpl.Config) {
import OutputWriter.ByteArrayWriter
private val outputImpl = OutputDirectoryImpl.fromOutputDirectory(output)
private val moduleKind = config.commonConfig.coreSpec.moduleKind
protected def writeModule(moduleID: ModuleID, jsFileWriter: Writer): Unit
protected def writeModule(moduleID: ModuleID, jsFileWriter: Writer,
sourceMapWriter: Writer): Unit
def write(moduleSet: ModuleSet)(implicit ec: ExecutionContext): Future[Report] = {
val ioThrottler = new IOThrottler(config.maxConcurrentWrites)
def filesToRemove(seen: Iterable[String], reports: List[Report.Module]): Set[String] =
seen.toSet -- reports.flatMap(r => r.jsFileName :: r.sourceMapName.toList)
for {
currentFiles <- outputImpl.listFiles()
reports <- Future.traverse(moduleSet.modules) { m =>
ioThrottler.throttle(writeModule(m.id))
}
_ <- Future.traverse(filesToRemove(currentFiles, reports)) { f =>
ioThrottler.throttle(outputImpl.delete(f))
}
} yield {
val publicModules = for {
(module, report) <- moduleSet.modules.zip(reports)
if module.public
} yield {
report
}
new ReportImpl(publicModules)
}
}
private def writeModule(moduleID: ModuleID)(
implicit ec: ExecutionContext): Future[Report.Module] = {
val jsFileName = OutputPatternsImpl.jsFile(config.outputPatterns, moduleID.id)
if (config.sourceMap) {
val sourceMapFileName = OutputPatternsImpl.sourceMapFile(config.outputPatterns, moduleID.id)
val codeWriter = new ByteArrayWriter
val smWriter = new ByteArrayWriter
writeModule(moduleID, codeWriter.writer, smWriter.writer)
val code = codeWriter.result()
val sourceMap = smWriter.result()
for {
_ <- outputImpl.writeFull(jsFileName, code)
_ <- outputImpl.writeFull(sourceMapFileName, sourceMap)
} yield {
new ReportImpl.ModuleImpl(moduleID.id, jsFileName, Some(sourceMapFileName), moduleKind)
}
} else {
val codeWriter = new ByteArrayWriter
writeModule(moduleID, codeWriter.writer)
val code = codeWriter.result()
for {
_ <- outputImpl.writeFull(jsFileName, code)
} yield {
new ReportImpl.ModuleImpl(moduleID.id, jsFileName, None, moduleKind)
}
}
}
}
private object OutputWriter {
private class ByteArrayWriter {
private val byteStream = new ByteArrayOutputStream
val writer: Writer = new OutputStreamWriter(byteStream, StandardCharsets.UTF_8)
def result(): ByteBuffer = {
writer.close()
ByteBuffer.wrap(byteStream.toByteArray())
}
}
}
|
scala-js/scala-js
|
linker/shared/src/main/scala/org/scalajs/linker/backend/OutputWriter.scala
|
Scala
|
apache-2.0
| 3,458
|
package de.tototec.cmvn.configfile
// FIXME: make a normal class
case class KeyValue(val key: String,
val value: String,
var file: String = null,
var line: java.lang.Integer = null)
|
ToToTec/cmvn
|
de.tototec.cmvn/src/main/scala/de/tototec/cmvn/configfile/KeyValue.scala
|
Scala
|
apache-2.0
| 243
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.