code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.shocktrade.client.social
import com.shocktrade.common.models.FacebookAppInfo
import io.scalajs.npm.angularjs.Service
import io.scalajs.npm.angularjs.http.Http
import scala.scalajs.js.annotation.JSExportAll
/**
* Social Networking Setup Services
* @author Lawrence Daniels <lawrence.daniels@gmail.com>
*/
@JSExportAll
class SocialServices($http: Http) extends Service {
def getFacebookAppInfo = $http.get[FacebookAppInfo]("/api/social/facebook")
}
| ldaniels528/shocktrade.js | app/client/angularjs/src/main/scala/com/shocktrade/client/social/SocialServices.scala | Scala | apache-2.0 | 472 |
package it.mighe.ssbi.instruction
import org.scalatest.{Matchers, FlatSpec}
import it.mighe.ssbi.Tape
import it.mighe.ssbi.instructions.WriteInstruction
import java.io.ByteArrayOutputStream
class WriteInstructionSpec extends FlatSpec with Matchers {
def fixture = new {
val tape = new Tape
val output = new ByteArrayOutputStream()
val instruction = new WriteInstruction(output)
}
it should "return next instruction" in {
val f = fixture
val next = new WriteInstruction(f.output)
f.instruction.next = next
f.instruction.execute(f.tape) should be(next)
}
it should "write current value" in {
val f = fixture
f.tape.current = 80
f.instruction.execute(f.tape)
f.output.toByteArray should equal(Array[Byte](80))
}
}
| mighe/ssbi | src/test/scala/it/mighe/ssbi/instruction/WriteInstructionSpec.scala | Scala | mit | 776 |
package com.rasterfoundry.api.organization
import com.rasterfoundry.datamodel._
import com.rasterfoundry.api.utils.queryparams._
trait OrganizationQueryParameterDirective extends QueryParametersCommon {
def organizationQueryParameters =
(
timestampQueryParameters &
searchParams &
activationParams &
platformIdParams
).as(OrganizationQueryParameters.apply _)
}
| azavea/raster-foundry | app-backend/api/src/main/scala/organization/QueryParameters.scala | Scala | apache-2.0 | 405 |
trait A
trait B {
def ==[T](o: T)(implicit a: A): Boolean = ???
}
case class C(b: B)
// cf test/files/pos/t10536.scala
| lrytz/scala | test/files/pos/t12248.scala | Scala | apache-2.0 | 124 |
package edu.cmu.cs.oak.analysis
import scala.io.Source
import java.io.PrintWriter
import java.nio.file.Paths
import scala.util.Random
import edu.cmu.cs.oak.value.StringValue
import java.nio.file.Path
import java.io.File
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.DurationInt
import scala.concurrent.{ future, Future }
import java.io.FileOutputStream
import edu.cmu.cs.oak.value.StringLiteralContext
import java.io.ObjectOutputStream
import java.io.FileInputStream
import java.io.ObjectInputStream
import scala.collection.mutable.ListBuffer
/**
* Selection of useful methods used during analysis.
*
*/
object OakUtility {
var phpStandardFunctions: Option[List[String]] = None
def is_php_function(s: String): Boolean = {
// runtime caching
if (phpStandardFunctions.isEmpty) {
val functions = new ListBuffer[String]()
Source.fromURL(url("phpStandardFunctions.txt").toFile().toURL()).getLines().foreach {
line => functions += line.toString()
}
phpStandardFunctions = Some(functions.toList)
}
phpStandardFunctions.get.contains(s)
}
/**
* Utility Method,
*/
def url(fileName: String): Path = {
println(fileName)
Paths.get(getClass.getResource("/" + fileName).getPath)
}
def getSample(n: Int, projects: Seq[File], path: Path) {
val allLiterals = projects.map(p => getProjectLiterals(p)._1).fold(Set[StringValue]())(_ union _).toIterator
val sample = Random.shuffle(allLiterals).take(n).toList
val pw = new PrintWriter(path.toFile())
val p = sample.zipWithIndex.map { case (sv, i) => i + "|\\"" + sv.value.replaceAll("\\"", "'") + "\\"|" + sv.file + "|" + sv.lineNr }.mkString("\\n")
pw.write("#|String Value|Location|Line Number|\\n" + p)
pw.close
}
/**
* Retrieves <strong>all</strong> parsable string literals that are used in a project
* and returns a set of StringValues (string literals plus location information).
*
* @param file java.io.File Root location of the system
* @return Set of all parsable string literals
*
*/
def getProjectLiterals(file: File): (Set[StringValue], Set[(String, Int, String)]) = {
val filename = file.toPath().toAbsolutePath().toString().hashCode() + ".cache"
val path = Paths.get(OakUtility.url("").toString + "/" + filename)
return if (path.toFile().exists()) { // ergebnisse schon mal gelesen
val ois = new ObjectInputStream(new FileInputStream(path.toFile))
val r = ois.readObject.asInstanceOf[(Set[StringValue], Set[(String, Int, String)])]
ois.close
r
} else { // parse und schreibe den shit
// Parallel parsing tasks
val files = getPHPFiles(file)
val tasks = for (phpFile <- files.zipWithIndex) yield future {
// println(s"${phpFile._2}/${files.size} ${phpFile._1.toPath().toString()}")
(new ASTVisitor(Paths.get(phpFile._1.getAbsolutePath))).retrieveStringLiterals()
}
val aggregated = Future.sequence(tasks)
var parsed = Await.result(aggregated, 45.minutes)
val string_literals = parsed.map(p => p._1).foldLeft(Set[StringValue]())(_ union _)
val include_expressions = parsed.map(p => p._2).foldLeft(Set[(String, Int, String)]())(_ union _)
val oos = new ObjectOutputStream(new FileOutputStream(path.toFile()))
oos.writeObject((string_literals, include_expressions))
oos.close
(string_literals, include_expressions)
}
}
def deserializeParseResults(file: File): (Set[StringValue], Set[(String, Int)]) = {
val filename = file.toPath().toAbsolutePath().toString().hashCode() + ".xml"
val path = Paths.get(OakUtility.url("").toString + "/" + filename)
def p(label: String): Boolean = !(label equals "#PCDATA")
def parseContent(node: scala.xml.Node): (Set[StringValue], Set[(String, Int)]) = {
val nodes = node.child.filter {
n => p(n.label)
}
(parseLiterals(nodes.head), parseIncludes(nodes.last))
}
def parseIncludes(node: scala.xml.Node): Set[(String, Int)] = {
val nodes = node.child.filter {
n => p(n.label)
}
nodes.map(node => parseInclude(node)).toSet
}
def parseInclude(node: scala.xml.Node): (String, Int) = {
val file = node.attribute("file").head.text.trim
val line = node.attribute("line").head.text.trim.toInt
(file, line)
}
def parseLiterals(node: scala.xml.Node): Set[StringValue] = {
val nodes = node.child.filter {
n => p(n.label)
}
nodes.map(node => parseLiteral(node)).toSet
}
def parseLiteral(node: scala.xml.Node): StringValue = {
val value = node.text
val file = node.attribute("line").head.text.trim
val line = node.attribute("line").head.text.trim.toInt
val context = node.attribute("context").head.text.trim match {
case "MISC" => StringLiteralContext.MISC
case "FDEFINITION" => StringLiteralContext.FDEFINITION
case "TEMPLATE" => StringLiteralContext.TEMPLATE
}
val fdef = {
val name = node.attribute("fdef_name").head.text.trim
val file = node.attribute("fdef_file").head.text.trim
val line = node.attribute("fdef_line").head.text.trim.toInt
(name, (file, line))
}
val sv = StringValue(value, file, line)
//#ifdef CoverageAnalysis
sv.context = context
sv.fdef = fdef
//#endif
return sv
}
parseContent(scala.xml.XML.loadFile(path.toFile))
}
//#ifdef CoverageAnalysis
def serializeParseResults(file: File, literals: Set[StringValue], includes: Set[(String, Int)]) {
val filename = file.toPath().toAbsolutePath().toString().hashCode() + ".xml"
val literals_xml = {
<Literals>
{
for (literal <- literals) yield <Literal file={ literal.file } line={ literal.lineNr.toString } context={ literal.context.toString } fdef_name={ literal.fdef._1 } fdef_file={ literal.fdef._1 } fdef_line={ literal.fdef._2._2.toString }>
{ literal.value }
</Literal>
}
</Literals>
}
val includes_xml = {
<Includes>
{
for (include <- includes) yield <Include file={ include._1 } line={ include._2.toString }/>
}
</Includes>
}
val node = {
<Content>
{ literals_xml }
{ includes_xml }
</Content>
}
val path = Paths.get(OakUtility.url("").toString + "/" + filename)
new FileOutputStream(path.toAbsolutePath().toString(), false).close();
val pw = new PrintWriter(path.toFile())
pw.write((new scala.xml.PrettyPrinter(80, 2)).format(node))
pw.close
}
//#endif
/**
* Returns a stream of all files having the file extension ".php" or ".inc" for a
* given system location (the system root folder).
*
* @param file java.io.File Root location of the system
* @return java. of PHP/INC files
*/
def getPHPFiles(file: File): Stream[File] = {
// Get file tree
def getFileTree(f: File): Stream[File] = {
f #:: (if (f.isDirectory) f.listFiles().toStream.flatMap(getFileTree)
else Stream.empty)
}
// Look for file names ending with ".inc" or ".php"
getFileTree(file).filter(p => p.getName.endsWith(".php") || p.getName.endsWith(".inc") || p.getName.endsWith(".module") || p.getName.endsWith(".bit"))
}
/**
* Determine how many include expressions in the system are dynamic and static.
* Static include = Include expression is a string constant
* Dynamic Include = Include expression is any other expression
*/
def analyzeIncludeExpressions(file: File): (Int, Int) = {
// List of files in the system
val files = getPHPFiles(file)
// counter for static and dynamic includes
var static = 0
var dynamic = 0
var i = 0
files.foreach {
f => {
// for each file perform AST analysis
println(i + " of " + files.size)
i += 1
val fpath = f.toPath()
// Classifiy include expressions for AST
val static_dynamic = (new ASTVisitor(fpath)).analyseIncludeExpressions()
// Add results to counters
static += static_dynamic._1
dynamic += static_dynamic._2
}
}
return (static, dynamic)
}
}
| smba/oak | edu.cmu.cs.oak/src/main/scala/edu/cmu/cs/oak/analysis/OakUtility.scala | Scala | lgpl-3.0 | 8,443 |
package controllers.helpers
/**
* GraPHPizer source code analytics engine
* Copyright (C) 2015 Martin Helmich <kontakt@martin-helmich.de>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import domain.model.{ClassLike, DataType}
import org.neo4j.graphdb.Node
import play.api.libs.json.{Json, JsValue}
import persistence.NodeWrappers._
import play.api.mvc.{AnyContent, Request}
object ViewHelpers {
def writeTypeRef(p: String, t: Node)(implicit request: Request[AnyContent]): JsValue = {
Json.obj(
"name" -> t.property[String]("name"),
"__href" -> controllers.routes.Types.show(p, t.property[String]("slug").get).absoluteURL(),
"__id" -> t.id
)
}
def writeTypeRef(p: String, t: DataType)(implicit request: Request[AnyContent]): JsValue = {
Json.obj(
"name" -> t.name,
"__href" -> controllers.routes.Types.show(p, t.slug).absoluteURL()
)
}
def writeClassRef(p: String, c: Node)(implicit request: Request[AnyContent]): JsValue = {
Json.obj(
"fqcn" -> c.property[String]("fqcn"),
"__href" -> controllers.routes.Classes.show(p, c ! "slug").absoluteURL(),
"__id" -> c.id
)
}
def writeClassRef(p: String, c: ClassLike)(implicit request: Request[AnyContent]): JsValue = {
Json.obj(
"fqcn" -> c.fqcn,
"__href" -> controllers.routes.Classes.show(p, c.slug).absoluteURL()
)
}
}
| martin-helmich/graphpizer-server | app/controllers/helpers/ViewHelpers.scala | Scala | gpl-3.0 | 1,990 |
/*
* Copyright 2015 data Artisans GmbH, 2019 Ververica GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ververica.flinktraining.exercises.datastream_scala.basics
import com.ververica.flinktraining.exercises.datastream_java.sources.TaxiRideSource
import com.ververica.flinktraining.exercises.datastream_java.utils.ExerciseBase._
import com.ververica.flinktraining.exercises.datastream_java.utils.{ExerciseBase, GeoUtils, MissingSolutionException}
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.streaming.api.scala._
/**
* The "Ride Cleansing" exercise of the Flink training
* (http://training.ververica.com).
*
* The task of the exercise is to filter a data stream of taxi ride records to keep only rides that
* start and end within New York City. The resulting stream should be printed to the
* standard out.
*
* Parameters:
* -input path-to-input-file
*
*/
object RideCleansingExercise extends ExerciseBase {
def main(args: Array[String]) {
// parse parameters
val params = ParameterTool.fromArgs(args)
val input = params.get("input", ExerciseBase.pathToRideData)
val maxDelay = 60 // events are out of order by max 60 seconds
val speed = 600 // events of 10 minutes are served in 1 second
// set up the execution environment
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setParallelism(parallelism)
// get the taxi ride data stream
val rides = env.addSource(rideSourceOrTest(new TaxiRideSource(input, maxDelay, speed)))
val filteredRides = rides
// filter out rides that do not start and end in NYC
.filter(ride => throw new MissingSolutionException)
// print the filtered stream
printOrTest(filteredRides)
// run the cleansing pipeline
env.execute("Taxi Ride Cleansing")
}
}
| dataArtisans/flink-training-exercises | src/main/scala/com/ververica/flinktraining/exercises/datastream_scala/basics/RideCleansingExercise.scala | Scala | apache-2.0 | 2,354 |
package top.myetl.lucenerdd.rdd
import java.util.concurrent.atomic.AtomicBoolean
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.lucene.index.{DirectoryReader, IndexReader}
import org.apache.spark._
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.rdd.RDD
import top.myetl.lucenerdd.store.HdfsDirectory
import top.myetl.lucenerdd.util.FsUtils
import org.apache.lucene.search.IndexSearcher
class LuceneSearcherRDDPartition( idx: Int,
private val path: String) extends Partition{
private def getReader(): IndexReader = {
val configuration = new Configuration()
println(index+"------- open directory ... "+path+" --- "+this)
val directory = HdfsDirectory(new Path(this.path), configuration)
val reader = DirectoryReader.open(directory)
reader
}
private lazy val reader = getReader()
def getSearcher(): IndexSearcher = synchronized{
new IndexSearcher(reader)
}
override def index: Int = idx
}
class LuceneSearcherRDD( sc: SparkContext,
val tableName: String,
depts: Seq[Dependency[_]] = Nil) extends RDD[IndexSearcher](sc, depts){
clearDependencies()
@DeveloperApi
override def compute(split: Partition, context: TaskContext): Iterator[IndexSearcher] = {
val p = split.asInstanceOf[LuceneSearcherRDDPartition]
val start = System.nanoTime()
val searcher = p.getSearcher()
val t2 = System.nanoTime()
val isHasNext = new AtomicBoolean(true)
new Iterator[IndexSearcher]{
override def hasNext: Boolean = {
val is = isHasNext.get()
isHasNext.set(false)
is
}
override def next(): IndexSearcher = searcher
}
}
private def buildPartitions: Array[Partition] = {
val baseDir = FsUtils.getHdfsBaseDir(sparkContext.getConf)
val tableDir = FsUtils.dirName(baseDir, tableName)
val configuration = SparkHadoopUtil.get.conf
val fs: FileSystem = FileSystem.get(configuration)
val paths = FsUtils.listLuceneDir(fs, new Path(tableDir))
paths.indices.map(i =>
new LuceneSearcherRDDPartition(i, FsUtils.dirName(tableDir, paths(i)))
).toArray
}
override protected def getPartitions: Array[Partition] = {
buildPartitions
}
override def checkpoint() {
// Do nothing. Lucene RDD should not be checkpointed.
}
} | myetl/sparkLu | sparklu-core/src/main/scala/top/myetl/lucenerdd/rdd/LuceneSearcherRDD.scala | Scala | apache-2.0 | 2,523 |
package eldis.react.mdl.components
import scalajs.js
import js.JSConverters._
import js.annotation.JSImport
import eldis.react._
import eldis.react.util.ElementBuilder
private object Option {
@js.native
trait Props extends js.Any {
val key: js.Any = js.native
val value: js.Any = js.native
val onClick: js.UndefOr[js.Any] = js.native
val className: js.UndefOr[String] = js.native
}
object Props {
def apply(
value: js.Any,
className: Option[String] = None,
onClick: js.UndefOr[js.Any] = js.undefined
) =
js.Dynamic.literal(
key = value, // it's not a typo. Yep, key = value
value = value,
onClick = onClick,
className = className.orUndefined
).asInstanceOf[Props]
}
@JSImport("react-mdl-extra", "Option")
@js.native
object Component extends JSComponent[Props]
def apply(props: Props)(ch: ReactNode) = ElementBuilder(Component, props)(ch)
}
| eldis/scalajs-react-mdl | src/main/scala/components/Option.scala | Scala | mit | 950 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.keras
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.nn.Graph._
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.nn.keras.{Sequential => KSequential}
import com.intel.analytics.bigdl.nn.{Container => TContainer}
import com.intel.analytics.bigdl.serialization.Bigdl.{AttrValue, BigDLModule}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.serializer._
import com.intel.analytics.bigdl.utils.serializer.converters.DataConverter
import com.intel.analytics.bigdl.utils.{MultiShape, Shape, SingleShape}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
private[bigdl] trait TKerasSerializerHelper {
def appendKerasLabel[T: ClassTag](context: SerializeContext[T],
moduleBuilder : BigDLModule.Builder)(implicit ev: TensorNumeric[T]): Unit = {
val serializerFlagBuilder = AttrValue.newBuilder
DataConverter.setAttributeValue(context, serializerFlagBuilder, true,
scala.reflect.runtime.universe.typeOf[Boolean])
moduleBuilder.putAttr("is_keras_module", serializerFlagBuilder.build)
}
}
object KerasLayerSerializer extends KerasLayerSerializable
trait KerasLayerSerializable extends ContainerSerializable with TKerasSerializerHelper{
override def loadSubModules[T: ClassTag](context : DeserializeContext,
module : AbstractModule[Activity, Activity, T])
(implicit ev: TensorNumeric[T]) : Unit = {
val klayer = module.asInstanceOf[KerasLayer[Activity, Activity, T]]
val subModules = context.bigdlModule.getSubModulesList.asScala
subModules.foreach(module => {
val subModuleData = ModuleSerializer.load(DeserializeContext(module,
context.storages, context.storageType, _copyWeightAndBias))
klayer.labor = subModuleData.module
})
}
override def doSerializeModule[T: ClassTag](context: SerializeContext[T],
moduleBuilder : BigDLModule.Builder)
(implicit ev: TensorNumeric[T]) : Unit = {
super.doSerializeModule(context, moduleBuilder)
appendKerasLabel(context, moduleBuilder)
}
}
/**
* Wrap a torch style layer to keras style layer.
* This layer can be built multiple times.
* We are supposing the inputshape and the outputshape keep the same in this layer.
* @param layer a torch style layer
* @return a keras compatible layer
*/
class KerasIdentityWrapper[T: ClassTag]
(val layer: AbstractModule[Activity, Activity, T])(implicit ev: TensorNumeric[T])
extends KerasLayer[Activity, Activity, T](null) {
if (layer.isKerasStyle()) {
throw new RuntimeException(s"We only accept torch layer here, but got: $layer")
}
override def computeOutputShape(inputShape: Shape): Shape = {
inputShape
}
override def doBuild(inputShape: Shape): AbstractModule[Activity, Activity, T] = layer
}
/**
* Wrap a torch style layer to keras style layer.
* This layer can be built multiple times.
* @param torchLayer a torch style layer
* i.e If the input data is (2, 3, 4) and 2 is the batch size, you should input: (3, 4) here.
* @return a keras compatible layer
*/
class KerasLayerWrapper[T: ClassTag]
(val torchLayer: AbstractModule[Activity, Activity, T],
val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends KerasLayer[Activity, Activity, T](KerasLayer.addBatch(inputShape)) {
require(!torchLayer.isKerasStyle(), s"We only accept torch layer here, but got: $torchLayer")
override def computeOutputShape(calcInputShape: Shape): Shape = {
val dummyOutTensor =
torchLayer.cloneModule().forward(Tensor[T](
(List(2) ++ KerasLayer.removeBatch(calcInputShape).toSingle()).toArray).fill(ev.one))
val outSize = dummyOutTensor.toTensor.size()
KerasLayer.addBatch(Shape(outSize.slice(1, outSize.length)))
}
override def doBuild(inputShape: Shape): AbstractModule[Activity, Activity, T] = torchLayer
}
private[bigdl] object KerasLayer {
private[bigdl] def fuse[T: ClassTag](torchLayer: AbstractModule[Activity, Activity, T],
kerasActivation: KerasLayer[Tensor[T], Tensor[T], T],
batchInputShape: Shape)
(implicit ev: TensorNumeric[T]): AbstractModule[Activity, Activity, T] = {
if (kerasActivation == null) {
torchLayer
} else {
val wrapper = KSequential[T]()
wrapper.add(new KerasLayerWrapper[T](torchLayer,
KerasLayer.removeBatch(batchInputShape)))
wrapper.add(kerasActivation)
wrapper.setName(torchLayer.getName())
wrapper.build(batchInputShape)
wrapper
}
}
private[bigdl] def addBatch(shape: Shape): Shape = {
// simply return null here as null is the default value
if (shape == null) {
return null
}
if (shape.isInstanceOf[SingleShape]) {
Shape((List(-1) ++ shape.toSingle()).toArray)
} else {
Shape(shape.toMulti().map {addBatch(_)})
}
}
private[bigdl] def removeBatch(shape: Shape): Shape = {
// simply return null here as null is the default value
if (shape == null) {
return null
}
if (shape.isInstanceOf[SingleShape]) {
Shape((shape.toSingle().slice(1, shape.toSingle().length)).toArray)
} else {
Shape(shape.toMulti().map {removeBatch(_)})
}
}
}
/**
* KerasModule is the basic component of all Keras-like Layer.
* It forward activities and backward gradients, and can be mixed with other AbstractMoudule.
*
* @tparam A Input data type
* @tparam B Output data type
* @tparam T Numeric type of parameter(e.g. weight, bias). Only support float/double now
* @param batchInputShape the first dim is batch
*/
abstract class KerasLayer[A <: Activity: ClassTag, B <: Activity: ClassTag, T: ClassTag]
(batchInputShape: Shape = null)(implicit ev: TensorNumeric[T]) extends TContainer[A, B, T] {
inputShapeValue = batchInputShape
def labor: AbstractModule[A, B, T] = {
if (this.modules.isEmpty) {
throw new RuntimeException("This Layer hasn't been built")
}
require(modules.length == 1,
s"modules should only contain 1 element instead of ${modules.length}")
modules(0).asInstanceOf[AbstractModule[A, B, T]]
}
// scalastyle:off
def labor_=(value: AbstractModule[A, B, T]): Unit = {
modules.clear()
modules.append(value)
}
// scalastyle:on
override def updateOutput(input: A): B = {
output = labor.updateOutput(input)
output
}
override def updateGradInput(input: A, gradOutput: B): A = {
gradInput = labor.updateGradInput(input, gradOutput)
gradInput
}
override def accGradParameters(input: A, gradOutput: B): Unit = {
labor.accGradParameters(input, gradOutput)
}
override def isBuilt(): Boolean = {
!this.modules.isEmpty && super.isBuilt()
}
override def isKerasStyle(): Boolean = true
override def computeOutputShape(inputShape: Shape): Shape = {
labor.computeOutputShape(inputShape)
}
private[bigdl] def checkWithCurrentInputShape(calcInputShape: Shape): Unit = {
if (getInputShape() != null) {
val withoutBatchInputShape = KerasLayer.removeBatch(getInputShape())
val withoutBatchCalcInputShape = KerasLayer.removeBatch(calcInputShape)
require(withoutBatchInputShape == withoutBatchCalcInputShape,
s"InputShape from constructor ${withoutBatchInputShape}" +
s"should be the same with the calculated inputShape: ${withoutBatchCalcInputShape}")
}
}
override def build(calcInputShape: Shape): Shape = {
// Input would be reused multiple time in inputs for StaticGraph
if (isBuilt() && !this.allowRebuilt()) {
throw new RuntimeException(s"Should not build this module: $this multiple times")
}
labor = doBuild(calcInputShape)
checkWithCurrentInputShape(calcInputShape)
super.build(calcInputShape)
}
/**
* The value return by this method should be able to execute `forward` directly.
*/
def doBuild(inputShape: Shape): AbstractModule[A, B, T]
/**
* Build graph: some other modules point to current module
* @param nodes upstream module nodes
* @return node containing current module
*/
override def inputs(nodes : ModuleNode[T]*): ModuleNode[T] = {
validateInput(nodes.map(_.element))
if (!nodes.isEmpty) { // as there's Identity().inputs() within Graph
val inputShape = Shape(nodes.map{_.element.getOutputShape()}.toList)
this.build(inputShape)
}
processInputs(nodes)
}
/**
* Build graph: some other modules point to current module
* @param nodes upstream module nodes in an array
* @return node containing current module
*/
override def inputs(nodes : Array[ModuleNode[T]]): ModuleNode[T] = {
validateInput(nodes.map(_.element))
if (!nodes.isEmpty) {
val inputShape = Shape(nodes.map{_.element.getOutputShape()}.toList)
this.build(inputShape)
}
processInputs(nodes)
}
private def getShapeByIndex(shape: Shape, index: Int): Shape = {
shape match {
case s: SingleShape =>
require(index == 1, s"Getting singleshape but with index: $index")
s
case m: MultiShape =>
val multiShape = m.toMulti()
require(index >= 1 && index <= multiShape.length)
multiShape(index - 1)
}
}
/**
* Build graph: some other modules point to current module
* @param first distinguish from another inputs when input parameter list is empty
* @param nodesWithIndex upstream module nodes and the output tensor index. The start index is 1.
* @return node containing current module
*/
override def inputs(first: (ModuleNode[T], Int),
nodesWithIndex : (ModuleNode[T], Int)*): ModuleNode[T] = {
validateInput(List(first._1.element))
val shapes = ArrayBuffer[Shape]()
shapes += getShapeByIndex(first._1.element.getOutputShape(), first._2)
if (!nodesWithIndex.isEmpty) {
validateInput(nodesWithIndex.map(_._1.element))
shapes ++= nodesWithIndex.map{t =>
getShapeByIndex(first._1.element.getOutputShape(), first._2)
}
}
this.build(Shape(shapes.toList))
processInputs(first, nodesWithIndex : _*)
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/keras/KerasLayer.scala | Scala | apache-2.0 | 10,941 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java
import java.{util => ju}
import java.util.Map.Entry
import scala.collection.mutable
private[spark] object JavaUtils {
def optionToOptional[T](option: Option[T]): Optional[T] =
if (option.isDefined) {
Optional.of(option.get)
} else {
Optional.empty[T]
}
// Workaround for SPARK-3926 / SI-8911
def mapAsSerializableJavaMap[A, B](underlying: collection.Map[A, B]): SerializableMapWrapper[A, B]
= new SerializableMapWrapper(underlying)
// Implementation is copied from scala.collection.convert.Wrappers.MapWrapper,
// but implements java.io.Serializable. It can't just be subclassed to make it
// Serializable since the MapWrapper class has no no-arg constructor. This class
// doesn't need a no-arg constructor though.
class SerializableMapWrapper[A, B](underlying: collection.Map[A, B])
extends ju.AbstractMap[A, B] with java.io.Serializable { self =>
override def size: Int = underlying.size
override def get(key: AnyRef): B = try {
underlying.getOrElse(key.asInstanceOf[A], null.asInstanceOf[B])
} catch {
case ex: ClassCastException => null.asInstanceOf[B]
}
override def entrySet: ju.Set[ju.Map.Entry[A, B]] = new ju.AbstractSet[ju.Map.Entry[A, B]] {
override def size: Int = self.size
override def iterator: ju.Iterator[ju.Map.Entry[A, B]] = new ju.Iterator[ju.Map.Entry[A, B]] {
val ui = underlying.iterator
var prev : Option[A] = None
def hasNext: Boolean = ui.hasNext
def next(): Entry[A, B] = {
val (k, v) = ui.next()
prev = Some(k)
new ju.Map.Entry[A, B] {
import scala.util.hashing.byteswap32
override def getKey: A = k
override def getValue: B = v
override def setValue(v1 : B): B = self.put(k, v1)
override def hashCode: Int = byteswap32(k.hashCode) + (byteswap32(v.hashCode) << 16)
override def equals(other: Any): Boolean = other match {
case e: ju.Map.Entry[_, _] => k == e.getKey && v == e.getValue
case _ => false
}
}
}
def remove() {
prev match {
case Some(k) =>
underlying match {
case mm: mutable.Map[A, _] =>
mm.remove(k)
prev = None
case _ =>
throw new UnsupportedOperationException("remove")
}
case _ =>
throw new IllegalStateException("next must be called at least once before remove")
}
}
}
}
}
}
| ZxlAaron/mypros | core/src/main/scala/org/apache/spark/api/java/JavaUtils.scala | Scala | apache-2.0 | 3,453 |
package cc.ferreira.gcal2slack.adapters
import cc.ferreira.gcal2slack
import cc.ferreira.gcal2slack.Result
import cc.ferreira.gcal2slack.messaging.{MessagingClient, MessagingStatus}
import scalaj.http.{Http, HttpOptions, HttpResponse}
import scala.concurrent.duration._
case class SlackClient(token: String) extends MessagingClient {
private val endpoint = "https://slack.com/api/"
private val timeoutSeconds = 10.seconds
def updateStatus(status: Option[MessagingStatus]): Result[Unit] = {
val newStatus = status.getOrElse(MessagingStatus.clear)
val json =
s"""{
| "profile": {
| "status_text": "${newStatus.text}",
| "status_emoji": "${newStatus.emoji}"
| }
|}""".stripMargin
val result: HttpResponse[String] = Http(s"$endpoint/users.profile.set")
.postData(json)
.header("Content-Type", "application/json; charset=utf-8")
.header("Authorization", s"Bearer $token")
.option(HttpOptions.readTimeout(timeoutSeconds.toMillis.toInt)).asString
Either.cond(result.isSuccess, (), gcal2slack.Error(s"Failed to set status => $result"))
}
}
| hugocf/gcal-slack-update | src/main/scala/cc/ferreira/gcal2slack/adapters/SlackClient.scala | Scala | mit | 1,152 |
package com.azavea.opentransit.service
import com.azavea.opentransit._
import com.azavea.opentransit.JobStatus
import com.azavea.opentransit.JobStatus._
import com.azavea.opentransit.json._
import com.azavea.opentransit.indicators._
import com.azavea.opentransit.database.{IndicatorsTable, IndicatorJobsTable}
import com.azavea.gtfs._
import com.github.nscala_time.time.Imports._
import com.github.tototoshi.slick.PostgresJodaSupport
import geotrellis.proj4._
import geotrellis.slick._
import scala.slick.driver.PostgresDriver
import scala.slick.jdbc.{GetResult, StaticQuery => Q}
import scala.slick.jdbc.JdbcBackend.{Database, Session, DatabaseDef}
import scala.slick.jdbc.meta.MTable
import spray.http.MediaTypes
import spray.http.StatusCodes.{Accepted, InternalServerError}
import spray.routing.{ExceptionHandler, HttpService}
import spray.util.LoggingContext
import scala.concurrent._
// JSON support
import spray.json._
import spray.httpx.SprayJsonSupport
import SprayJsonSupport._
import DefaultJsonProtocol._
import scala.util.{Success, Failure}
import com.typesafe.config.{ConfigFactory, Config}
case class IndicatorJob(
id: Int,
status: Map[String, Map[String, JobStatus]]
)
trait IndicatorsRoute extends Route { self: DatabaseInstance with DjangoClientComponent =>
val config = ConfigFactory.load
val mainDbName = config.getString("database.name")
val indicatorJobs = new IndicatorJobsTable {}
def handleIndicatorsRequest(
request: IndicatorCalculationRequest,
dbByName: String => Database
): Unit = {
CalculateIndicators(request, dbByName, new CalculationStatusManager
with IndicatorsTable {
def indicatorFinished(containerGenerators: Seq[ContainerGenerator]) = {
try {
val indicatorResultContainers = containerGenerators.map(_.toContainer(request.id))
dbByName(mainDbName) withTransaction { implicit session =>
import PostgresDriver.simple._
indicatorsTable.forceInsertAll(indicatorResultContainers:_*)
}
} catch {
case e: java.sql.SQLException => {
println(e.getNextException())
}
}
}
def statusChanged(status: Map[String, Map[String, JobStatus]]) = {
dbByName(mainDbName) withTransaction { implicit session =>
indicatorJobs.updateCalcStatus(IndicatorJob(request.id, status))
}
}
})
}
// Endpoint for triggering indicator calculations
//
// TODO: Add queue management. Calculation request jobs will be stored
// in a table, and calculations will be run one (or more) at a time
// in the background via an Actor.
def indicatorsRoute = {
pathEnd {
post {
entity(as[IndicatorCalculationRequest]) { request =>
complete {
TaskQueue.execute { // async
handleIndicatorsRequest(request, dbByName)
}.onComplete { // TaskQueue callback for result handling
case Success(_) =>
println(s"TaskQueue successfully completed - indicator finished")
case Failure(e) =>
dbByName(mainDbName) withTransaction { implicit session =>
indicatorJobs.failJob(request.id, "calculation_error")
}
println("Error calculating indicators!")
println(e.getMessage)
println(e.getStackTrace.mkString("\\n"))
}
Accepted -> JsObject(
"success" -> JsBoolean(true),
"message" -> JsString(s"Calculations started (id: ${request.id})")
)
}
}
}
}
}
}
| flibbertigibbet/open-transit-indicators | scala/opentransit/src/main/scala/com/azavea/opentransit/service/IndicatorsRoute.scala | Scala | gpl-3.0 | 3,651 |
package customscalafx.scene.control
import scala.language.implicitConversions
import customjavafx.scene.{control => jfxsc}
import scalafx.scene.control.Slider
object FxmlYieldingSlider {
implicit def sfxSlider2jfx(v: FxmlYieldingSlider) = v.delegate
}
class FxmlYieldingSlider(override val delegate: jfxsc.FxmlYieldingSlider = new jfxsc.FxmlYieldingSlider) extends Slider {
/** Constructs a Slider control with the specified slider min, max and current value values. */
def this(min: Double, max: Double, value: Double) {
this(new jfxsc.FxmlYieldingSlider(min, max, value))
}
}
| guilgaly/scalafx-trials | custom-controls/src/main/scala/customscalafx/scene/control/FxmlYieldingSlider.scala | Scala | mit | 594 |
#set( $symbol_pound = '#' )
#set( $symbol_dollar = '$' )
#set( $symbol_escape = '\\' )
package config.fixtures
import com.typesafe.config.ConfigFactory
import org.peelframework.core.beans.data.{CopiedDataSet, DataSet, ExperimentOutput, GeneratedDataSet}
import org.peelframework.core.beans.experiment.ExperimentSequence.SimpleParameters
import org.peelframework.core.beans.experiment.{ExperimentSequence, ExperimentSuite}
import org.peelframework.flink.beans.experiment.FlinkExperiment
import org.peelframework.flink.beans.job.FlinkJob
import org.peelframework.flink.beans.system.Flink
import org.peelframework.hadoop.beans.system.HDFS2
import org.peelframework.spark.beans.experiment.SparkExperiment
import org.peelframework.spark.beans.system.Spark
import org.springframework.context.annotation.{Bean, Configuration}
import org.springframework.context.{ApplicationContext, ApplicationContextAware}
/** `WordCount` experiment fixtures for the '${parentArtifactId}' bundle. */
@Configuration
class wordcount extends ApplicationContextAware {
/* The enclosing application context. */
var ctx: ApplicationContext = null
def setApplicationContext(ctx: ApplicationContext): Unit = {
this.ctx = ctx
}
// ---------------------------------------------------
// Data Generators
// ---------------------------------------------------
@Bean(name = Array("datagen.words"))
def `datagen.words`: FlinkJob = new FlinkJob(
runner = ctx.getBean("flink-0.9.0", classOf[Flink]),
command =
"""
|-v -c ${package}.datagen.flink.WordGenerator ${symbol_escape}
|${symbol_dollar}{app.path.datagens}/${parentArtifactId}-datagens-${version}.jar ${symbol_escape}
|${symbol_dollar}{system.default.config.parallelism.total} ${symbol_escape}
|${symbol_dollar}{datagen.tuples.per.task} ${symbol_escape}
|${symbol_dollar}{datagen.dictionary.dize} ${symbol_escape}
|${symbol_dollar}{datagen.data-distribution} ${symbol_escape}
|${symbol_dollar}{system.hadoop-2.path.input}/rubbish.txt
""".stripMargin.trim
)
// ---------------------------------------------------
// Data Sets
// ---------------------------------------------------
@Bean(name = Array("dataset.words.static"))
def `dataset.words.static`: DataSet = new CopiedDataSet(
src = "${symbol_dollar}{app.path.datasets}/rubbish.txt",
dst = "${symbol_dollar}{system.hadoop-2.path.input}/rubbish.txt",
fs = ctx.getBean("hdfs-2.7.1", classOf[HDFS2])
)
@Bean(name = Array("dataset.words.generated"))
def `dataset.words.generated`: DataSet = new GeneratedDataSet(
src = ctx.getBean("datagen.words", classOf[FlinkJob]),
dst = "${symbol_dollar}{system.hadoop-2.path.input}/rubbish.txt",
fs = ctx.getBean("hdfs-2.7.1", classOf[HDFS2])
)
@Bean(name = Array("wordcount.output"))
def `wordcount.output`: ExperimentOutput = new ExperimentOutput(
path = "${symbol_dollar}{system.hadoop-2.path.output}/wordcount",
fs = ctx.getBean("hdfs-2.7.1", classOf[HDFS2])
)
// ---------------------------------------------------
// Experiments
// ---------------------------------------------------
@Bean(name = Array("wordcount.default"))
def `wordcount.default`: ExperimentSuite = {
val `wordcount.flink.default` = new FlinkExperiment(
name = "wordcount.flink.default",
command =
"""
|-v -c ${package}.flink.FlinkWC ${symbol_escape}
|${symbol_dollar}{app.path.apps}/${parentArtifactId}-flink-jobs-${version}.jar ${symbol_escape}
|${symbol_dollar}{system.hadoop-2.path.input}/rubbish.txt ${symbol_escape}
|${symbol_dollar}{system.hadoop-2.path.output}/wordcount
""".stripMargin.trim,
config = ConfigFactory.parseString(""),
runs = 3,
runner = ctx.getBean("flink-0.9.0", classOf[Flink]),
inputs = Set(ctx.getBean("dataset.words.static", classOf[DataSet])),
outputs = Set(ctx.getBean("wordcount.output", classOf[ExperimentOutput]))
)
val `wordcount.spark.default` = new SparkExperiment(
name = "wordcount.spark.default",
command =
"""
|--class ${package}.spark.SparkWC ${symbol_escape}
|${symbol_dollar}{app.path.apps}/${parentArtifactId}-spark-jobs-${version}.jar ${symbol_escape}
|${symbol_dollar}{system.hadoop-2.path.input}/rubbish.txt ${symbol_escape}
|${symbol_dollar}{system.hadoop-2.path.output}/wordcount
""".stripMargin.trim,
config = ConfigFactory.parseString(""),
runs = 3,
runner = ctx.getBean("spark-1.4.0", classOf[Spark]),
inputs = Set(ctx.getBean("dataset.words.static", classOf[DataSet])),
outputs = Set(ctx.getBean("wordcount.output", classOf[ExperimentOutput]))
)
new ExperimentSuite(Seq(
`wordcount.flink.default`,
`wordcount.spark.default`))
}
@Bean(name = Array("wordcount.scale-out"))
def `wordcount.scale-out`: ExperimentSuite = {
val `wordcount.flink.prototype` = new FlinkExperiment(
name = "wordcount.flink.__topXXX__",
command =
"""
|-v -c ${package}.flink.FlinkWC ${symbol_escape}
|${symbol_dollar}{app.path.apps}/${parentArtifactId}-flink-jobs-${version}.jar ${symbol_escape}
|${symbol_dollar}{system.hadoop-2.path.input}/rubbish.txt ${symbol_escape}
|${symbol_dollar}{system.hadoop-2.path.output}/wordcount
""".stripMargin.trim,
config = ConfigFactory.parseString(
"""
|system.default.config.slaves = ${symbol_dollar}{env.slaves.__topXXX__.hosts}
|system.default.config.parallelism.total = ${symbol_dollar}{env.slaves.__topXXX__.total.parallelism}
|datagen.dictionary.dize = 10000
|datagen.tuples.per.task = 10000000 ${symbol_pound} ~ 100 MB
|datagen.data-distribution = Uniform
""".stripMargin.trim),
runs = 3,
runner = ctx.getBean("flink-0.9.0", classOf[Flink]),
inputs = Set(ctx.getBean("dataset.words.generated", classOf[DataSet])),
outputs = Set(ctx.getBean("wordcount.output", classOf[ExperimentOutput]))
)
val `wordcount.spark.prototype` = new SparkExperiment(
name = "wordcount.spark.__topXXX__",
command =
"""
|--class ${package}.spark.SparkWC ${symbol_escape}
|${symbol_dollar}{app.path.apps}/${parentArtifactId}-spark-jobs-${version}.jar ${symbol_escape}
|${symbol_dollar}{system.hadoop-2.path.input}/rubbish.txt ${symbol_escape}
|${symbol_dollar}{system.hadoop-2.path.output}/wordcount
""".stripMargin.trim,
config = ConfigFactory.parseString(
"""
|system.default.config.slaves = ${symbol_dollar}{env.slaves.__topXXX__.hosts}
|system.default.config.parallelism.total = ${symbol_dollar}{env.slaves.__topXXX__.total.parallelism}
|datagen.dictionary.dize = 10000
|datagen.tuples.per.task = 10000000 ${symbol_pound} ~ 100 MB
|datagen.data-distribution = Uniform
""".stripMargin.trim),
runs = 3,
runner = ctx.getBean("spark-1.4.0", classOf[Spark]),
inputs = Set(ctx.getBean("dataset.words.generated", classOf[DataSet])),
outputs = Set(ctx.getBean("wordcount.output", classOf[ExperimentOutput]))
)
new ExperimentSuite(
new ExperimentSequence(
parameters = new SimpleParameters(
paramName = "topXXX",
paramVals = Seq("top005", "top010", "top020")),
prototypes = Seq(
`wordcount.flink.prototype`,
`wordcount.spark.prototype`)))
}
} | carabolic/peel | peel-archetypes/peel-flinkspark-bundle/src/main/resources/archetype-resources/__rootArtifactId__-bundle/src/main/resources/config/fixtures/wordcount.scala | Scala | apache-2.0 | 8,133 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils
import com.intel.analytics.bigdl.nn.abstractnn.Activity
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.collection.mutable
import scala.collection.mutable.Map
import scala.collection.Set
import scala.collection.immutable.{Map => ImmutableMap}
/**
* Simulate the Table data structure in lua
*
* @param state
* @param topIndex
*/
class Table private[bigdl](
private val state: Map[Any, Any] = new mutable.HashMap[Any, Any](),
// index of last element in the contiguous numeric number indexed elements start from 1
private var topIndex: Int = 0
) extends Serializable with Activity {
private[bigdl] def this(data: Array[Any]) = {
this(new mutable.HashMap[Any, Any](), 0)
while (topIndex < data.length) {
state.put(topIndex + 1, data(topIndex))
topIndex += 1
}
}
/**
* Return false because it's not a Tensor
*
* @return false
*/
override def isTensor: Boolean = false
/**
* Return true because it's a Table implemented from [[Activity]]
*
* @return true
*/
override def isTable: Boolean = true
private[bigdl] def getState(): ImmutableMap[Any, Any] = {
return state.toMap
}
/**
* Empty the Table
*/
def clear(): this.type = {
state.clear()
topIndex = 0
this
}
def keySet: Set[Any] = state.keySet
def foreach[U](f: ((Any, Any)) => U): Unit = state.foreach(f)
def map[U](func: ((Any, Any)) => U): Iterable[U] = state.map(func)
def get[T](key: Any): Option[T] = {
state.get(key).map(_.asInstanceOf[T])
}
def getOrElse[T](key: Any, default: T): T = {
state.getOrElse(key, default).asInstanceOf[T]
}
def contains(key: Any): Boolean = {
state.contains(key)
}
def apply[T](key: Any): T = {
state(key).asInstanceOf[T]
}
def update(key: Any, value: Any): this.type = {
state(key) = value
if (key.isInstanceOf[Int] && topIndex + 1 == key.asInstanceOf[Int]) {
topIndex += 1
while (state.contains(topIndex + 1)) {
topIndex += 1
}
}
this
}
override def clone(): Table = {
val result = new Table()
for (k <- state.keys) {
result(k) = state(k)
}
result
}
override def toString: String = {
s" {\\n\\t${state.filter(_._2 != null).map{case (key: Any, value: Any) =>
s"$key: " + s"$value".split("\\n").mkString(s"\\n\\t${key.toString.replaceAll(".", " ")} ")
}.mkString("\\n\\t")}\\n }"
}
override def equals(obj: Any): Boolean = {
if (obj == null) {
return false
}
if (!obj.isInstanceOf[Table]) {
return false
}
val other = obj.asInstanceOf[Table]
if (this.eq(other)) {
return true
}
if (this.state.keys.size != other.state.keys.size) {
return false
}
this.state.keys.foreach(key => {
if (this.state(key).isInstanceOf[Array[_]] && other.state(key).isInstanceOf[Array[_]]) {
return (this.state(key).asInstanceOf[Array[_]].deep ==
other.state(key).asInstanceOf[Array[_]].deep)
} else if (this.state(key) != other.state(key)) {
return false
}
})
true
}
override def hashCode() : Int = {
val seed = 37
var hash = 1
this.state.keys.foreach(key => {
hash = hash * seed + key.hashCode()
hash = hash * seed + this.state(key).hashCode()
})
hash
}
def remove[T](index: Int): Option[T] = {
require(index > 0)
if (topIndex >= index) {
var i = index
val result = state(index)
while (i < topIndex) {
state(i) = state(i + 1)
i += 1
}
state.remove(topIndex)
topIndex -= 1
Some(result.asInstanceOf[T])
} else if (state.contains(index)) {
state.remove(index).asInstanceOf[Option[T]]
} else {
None
}
}
def remove[T](): Option[T] = {
if (topIndex != 0) {
remove[T](topIndex)
} else {
None
}
}
def delete(obj: Any): this.type = {
if (state.get(obj).isDefined) {
state.remove(obj)
}
this
}
def insert[T](obj: T): this.type = update(topIndex + 1, obj)
def insert[T](index: Int, obj: T): this.type = {
require(index > 0)
if (topIndex >= index) {
var i = topIndex + 1
topIndex += 1
while (i > index) {
state(i) = state(i - 1)
i -= 1
}
update(index, obj)
} else {
update(index, obj)
}
this
}
def add(other: Table): this.type = {
for (s <- other.state.keys) {
require(s.isInstanceOf[String])
this.state(s) = other(s)
}
this
}
def length(): Int = state.size
def save(path : String, overWrite : Boolean): this.type = {
File.save(this, path, overWrite)
this
}
/**
* Recursively flatten the table to a single table containing no nested table inside
*
* @return the flatten table
*/
def flatten(): Table = {
flatten(1)
}
private def flatten(startIndex: Int): Table = {
var resultIndex = startIndex
var i = 1
val newState = mutable.Map[Any, Any]()
while (i <= state.size) {
state(i) match {
case table: Table =>
val newTable = table.flatten(resultIndex)
newState ++= newTable.state
resultIndex += newTable.length()
case other =>
newState.put(resultIndex, other)
resultIndex += 1
}
i += 1
}
new Table(newState)
}
/**
* Recursively inverse flatten the flatten table to the same shape with target
*
* @param target the target shape to become
* @return the inverse flatten the table with the same shape with target
*/
def inverseFlatten(target: Table): Table = {
inverseFlatten(target, 1)
}
/**
* Recursively inverse flatten the flatten table to the same shape with target
*
* @param target the target shape to become
* @param startIndex for each iteration the start index as an offset
* @return the inverse flatten the table with the same shape with target
*/
private def inverseFlatten(target: Table, startIndex: Int): Table = {
var i = 1
var resultIndex = startIndex
val newState = mutable.Map[Any, Any]()
while (i <= target.length()) {
target.state(i) match {
case table: Table =>
val newTable = inverseFlatten(table, resultIndex)
newState.put(i, new Table(newTable.state))
resultIndex += newTable.length() - 1
case _ =>
newState.put(i, state(resultIndex))
}
i += 1
resultIndex += 1
}
new Table(newState)
}
/**
* Return the elements of this table as a Seq.
* This method assumes the key of this table are all
* the integers between 1 to this.length(),
* the values are all D
*/
def toSeq[D]: Seq[D] = {
for (i <- 0 until this.length()) yield {
try {
this(i + 1).asInstanceOf[D]
} catch {
case e: NoSuchElementException =>
throw new UnsupportedOperationException("toSeq requires the key of this table are" +
" all the integers between 1 to this.length()", e)
}
}
}
override def toTensor[D]
(implicit ev: TensorNumeric[D]): Tensor[D] =
throw new IllegalArgumentException("Table cannot be cast to Tensor")
override def toTable: Table = this
}
object T {
def apply(): Table = {
new Table()
}
/**
* Construct a table from a sequence of value.
*
* The index + 1 will be used as the key
*/
def apply(data1: Any, datas: Any*): Table = {
val firstElement = Array(data1)
val otherElements = datas.toArray
new Table(firstElement ++ otherElements)
}
/**
* Construct a table from an array
*
* The index + 1 will be used as the key
*
* @param data
* @return
*/
def array(data: Array[_]): Table = {
new Table(data.asInstanceOf[Array[Any]])
}
/**
* Construct a table from an array
*
* The index + 1 will be used as the key
*
* @param data
* @return
*/
def seq(data: Seq[_]): Table = {
new Table(data.toArray.asInstanceOf[Array[Any]])
}
/**
* Construct a table from a sequence of pair.
*/
def apply(tuple: Tuple2[Any, Any], tuples: Tuple2[Any, Any]*): Table = {
val table = new Table()
table(tuple._1) = tuple._2
for ((k, v) <- tuples) {
table(k) = v
}
table
}
def load(path : String) : Table = {
File.load(path)
}
}
| wzhongyuan/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/utils/Table.scala | Scala | apache-2.0 | 9,088 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600e.v2.retriever.CT600EBoxRetriever
case class E10(value: Option[Int]) extends CtBoxIdentifier("Income from other charities") with CtOptionalInteger with Input with ValidatableBox[CT600EBoxRetriever] {
override def validate(boxRetriever: CT600EBoxRetriever): Set[CtValidation] = {
validateZeroOrPositiveInteger(this)
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E10.scala | Scala | apache-2.0 | 1,022 |
package com.twitter.concurrent
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicInteger
import com.twitter.util.{Future, Promise, Try}
/**
* Efficient ordered ''serialization'' of operations.
*
* '''Note:''' This should not be used in place of Scala's
* `synchronized`, but rather only when serialization semantics are
* required.
*/
trait Serialized {
protected case class Job[T](promise: Promise[T], doItToIt: () => T) {
def apply(): Unit = {
promise.update { Try { doItToIt() } }
}
}
private[this] val nwaiters: AtomicInteger = new AtomicInteger(0)
protected val serializedQueue: java.util.Queue[Job[_]] = new ConcurrentLinkedQueue[Job[_]]
protected def serialized[A](f: => A): Future[A] = {
val result = new Promise[A]
serializedQueue add { Job(result, () => f) }
if (nwaiters.getAndIncrement() == 0) {
Try { serializedQueue.remove()() }
while (nwaiters.decrementAndGet() > 0) {
Try { serializedQueue.remove()() }
}
}
result
}
}
| twitter/util | util-core/src/main/scala/com/twitter/concurrent/Serialized.scala | Scala | apache-2.0 | 1,061 |
package scala.runtime
object Statics {
def mix(hash: Int, data: Int): Int = {
val h1 = mixLast(hash, data)
val h2 = Integer.rotateLeft(h1, 13)
h2 * 5 + 0xe6546b64
}
def mixLast(hash: Int, data: Int): Int = {
val k1 = data
val k2 = k1 * 0xcc9e2d51
val k3 = Integer.rotateLeft(k2, 15)
val k4 = k3 * 0x1b873593
hash ^ k4
}
def finalizeHash(hash: Int, length: Int): Int =
avalanche(hash & length)
def avalanche(h: Int): Int = {
val h1 = h ^ (h >>> 16)
val h2 = h1 * 0x85ebca6b
val h3 = h2 ^ (h2 >>> 13)
val h4 = h3 * 0xc2b2ae35
val h5 = h4 ^ (h4 >>> 16)
h5
}
def longHash(lv: Long): Int =
if (lv.asInstanceOf[Int] == lv) lv.asInstanceOf[Int]
else longHashShifted(lv)
def longHashShifted(lv: Long): Int =
(lv ^ (lv >>> 32)).asInstanceOf[Int]
def doubleHash(dv: Double): Int = {
val iv = dv.asInstanceOf[Int]
if (iv == dv) iv
else {
val fv = dv.asInstanceOf[Float]
if (fv == dv) java.lang.Float.floatToIntBits(fv)
else {
val lv = dv.asInstanceOf[Long]
if (lv == dv) lv.asInstanceOf[Int]
else longHashShifted(java.lang.Double.doubleToLongBits(dv))
}
}
}
def floatHash(fv: Float): Int = {
val iv = fv.asInstanceOf[Int]
if (iv == fv) iv
else {
val lv = fv.asInstanceOf[Long]
if (lv == fv) longHashShifted(lv)
else java.lang.Float.floatToIntBits(fv)
}
}
def anyHash(x: Object): Int = x match {
case null => 0
case x: java.lang.Long => longHash(x.longValue)
case x: java.lang.Double => doubleHash(x.doubleValue)
case x: java.lang.Float => floatHash(x.floatValue)
case _ => x.hashCode
}
}
| cedricviaccoz/scala-native | nativelib/src/main/scala/scala/runtime/Statics.scala | Scala | bsd-3-clause | 1,750 |
package jsky.app.ot.tpe
import java.awt.geom.Point2D
import edu.gemini.spModel.core.{Coordinates, Declination, RightAscension}
import jsky.coords.CoordinateConverter
/**
* Utility methods to convert coordinates for Java
*/
object CoordinatesUtilities {
/**
* Convert the given user coordinates location to world coordinates.
*/
def userToWorldCoords(cc: CoordinateConverter, x: Double, y: Double): Coordinates = {
val p = new Point2D.Double(x, y)
cc.userToWorldCoords(p, false)
Coordinates(RightAscension.fromDegrees(p.x), Declination.fromDegrees(p.y).getOrElse(Declination.zero))
}
}
| spakzad/ocs | bundle/jsky.app.ot/src/main/scala/jsky/app/ot/tpe/CoordinatesUtilities.scala | Scala | bsd-3-clause | 618 |
package net.revenj.patterns
import java.io.Closeable
import scala.concurrent.Future
trait UnitOfWork extends DataContext with Closeable {
def commit(): Future[Unit]
def rollback(): Future[Unit]
}
| ngs-doo/revenj | scala/revenj-core/src/main/scala/net/revenj/patterns/UnitOfWork.scala | Scala | bsd-3-clause | 219 |
package com.github.rosmith.nlp.query.model
class TypedDependencyModel(dependent: WordModel, governor: WordModel) extends Serializable {
private var _sentencePosition: Int = -1
private var _value: String = null
dependent.typedDep(this)
def sentencePosition(sP: Int) = _sentencePosition = sP
def value(s: String) = _value = s
def dependentIdentity = dependent.identity
def governorIdentity = governor.identity
def sentencePosition = _sentencePosition
def value = _value
def identity() = {
Array("TypedDep", dependent.position, governor.position, sentencePosition).mkString("_")
}
def ===(tdm: TypedDependencyModel) = {
this.equals(tdm)
}
override def equals(obj: Any) = {
if (obj != null && obj.isInstanceOf[TypedDependencyModel]) {
var tdm = obj.asInstanceOf[TypedDependencyModel]
dependentIdentity === tdm.dependentIdentity &&
governorIdentity === tdm.governorIdentity &&
sentencePosition == tdm.sentencePosition
} else {
false
}
}
protected implicit class StringImplicit(str: String) {
def ===(that: String): Boolean = {
if (str == null || that == null) {
false
} else {
str.equals(that)
}
}
}
} | rosmith/giet | src/main/scala/com/github/rosmith/nlp/query/model/TypedDependencyModel.scala | Scala | mit | 1,241 |
package com.autodesk.tct.controllers.admin
import controllers.AssetsBuilder
import play.api.mvc.{Action, AnyContent}
object AssetsController extends AssetsBuilder {
def at(path: String, file: String, obj: Any): Action[AnyContent] = {
at(path, file)
}
def index(path: String, file: String): Action[AnyContent] = {
at(path, file)
}
}
| adsk-cp-tct/challenger-backend | app/com/autodesk/tct/controllers/admin/AssetsController.scala | Scala | gpl-3.0 | 352 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet
package object dataflow {}
| wvlet/wvlet | wvlet-core/src/main/scala/wvlet/dataflow/package.scala | Scala | apache-2.0 | 603 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.wrappers
/** Wrapper message for `double`.
*
* The JSON representation for `DoubleValue` is JSON number.
*
* @param value
* The double value.
*/
@SerialVersionUID(0L)
final case class DoubleValue(
value: _root_.scala.Double = 0.0
) extends scalapb.GeneratedMessage with scalapb.Message[DoubleValue] with scalapb.lenses.Updatable[DoubleValue] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
{
val __value = value
if (__value != 0.0) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeDoubleSize(1, __value)
}
};
__size
}
final override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
{
val __v = value
if (__v != 0.0) {
_output__.writeDouble(1, __v)
}
};
}
def mergeFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.wrappers.DoubleValue = {
var __value = this.value
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 9 =>
__value = _input__.readDouble()
case tag => _input__.skipField(tag)
}
}
com.google.protobuf.wrappers.DoubleValue(
value = __value
)
}
def withValue(__v: _root_.scala.Double): DoubleValue = copy(value = __v)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => {
val __t = value
if (__t != 0.0) __t else null
}
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PDouble(value)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.wrappers.DoubleValue
}
object DoubleValue extends scalapb.GeneratedMessageCompanion[com.google.protobuf.wrappers.DoubleValue] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.wrappers.DoubleValue] = this
def fromFieldsMap(__fieldsMap: scala.collection.immutable.Map[_root_.com.google.protobuf.Descriptors.FieldDescriptor, _root_.scala.Any]): com.google.protobuf.wrappers.DoubleValue = {
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.getContainingType() == javaDescriptor), "FieldDescriptor does not match message type.")
val __fields = javaDescriptor.getFields
com.google.protobuf.wrappers.DoubleValue(
__fieldsMap.getOrElse(__fields.get(0), 0.0).asInstanceOf[_root_.scala.Double]
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.wrappers.DoubleValue] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage == scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.wrappers.DoubleValue(
__fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Double]).getOrElse(0.0)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = WrappersProto.javaDescriptor.getMessageTypes.get(0)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = WrappersProto.scalaDescriptor.messages(0)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = throw new MatchError(__number)
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.wrappers.DoubleValue(
)
implicit class DoubleValueLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.wrappers.DoubleValue]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.wrappers.DoubleValue](_l) {
def value: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Double] = field(_.value)((c_, f_) => c_.copy(value = f_))
}
final val VALUE_FIELD_NUMBER = 1
def of(
value: _root_.scala.Double
): _root_.com.google.protobuf.wrappers.DoubleValue = _root_.com.google.protobuf.wrappers.DoubleValue(
value
)
}
| dotty-staging/ScalaPB | scalapb-runtime/non-jvm/src/main/scala/com/google/protobuf/wrappers/DoubleValue.scala | Scala | apache-2.0 | 5,283 |
import com.rabbitmq.client._
object Worker {
private val TASK_QUEUE_NAME = "task_queue"
def main(argv: Array[String]) {
val factory = new ConnectionFactory()
factory.setHost("localhost")
val connection = factory.newConnection()
val channel = connection.createChannel()
channel.queueDeclare(TASK_QUEUE_NAME, true, false, false, null)
println(" [*] Waiting for messages. To exit press CTRL+C")
channel.basicQos(1)
val deliverCallback: DeliverCallback = (_, delivery) => {
val message = new String(delivery.getBody, "UTF-8")
println(" [x] Received '" + message + "'")
try {
doWork(message)
} finally {
println(" Done")
channel.basicAck(delivery.getEnvelope.getDeliveryTag, false)
}
}
channel.basicConsume(TASK_QUEUE_NAME, false, deliverCallback, _ => {})
}
private def doWork(task: String) {
print(" [x] Processing ")
for (ch <- task.toCharArray() if ch == '.') {
try {
print(".")
Thread.sleep(1000)
} catch {
case _ignored: InterruptedException => Thread.currentThread().interrupt()
}
}
}
}
| rabbitmq/rabbitmq-tutorials | scala/src/main/scala/Worker.scala | Scala | apache-2.0 | 1,152 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hyperledger.scodec.interop
import _root_.akka.util.ByteString
import scodec.bits.ByteVector
package object akka {
implicit class EnrichedByteString(val value: ByteString) extends AnyVal {
def toByteVector: ByteVector = ByteVector.view(idx => value(idx), value.size)
}
implicit class EnrichedByteVector(val value: ByteVector) extends AnyVal {
def toByteString: ByteString = ByteString.fromByteBuffer(value.toByteBuffer)
}
}
| DigitalAssetCom/hlp-candidate | server/network/src/main/scala/org/hyperledger/scodec/interop/akka/package.scala | Scala | apache-2.0 | 1,021 |
import com.bizo.mighty.csv.CSVReader
import java.net.URLEncoder
import org.apache.jena.riot.RDFFormat
import org.apache.jena.riot.RDFDataMgr
import java.io.FileOutputStream
import org.apache.jena.rdf.model.ResourceFactory
import org.apache.jena.rdf.model.Resource
import org.apache.jena.rdf.model.ModelFactory
import org.apache.jena.rdf.model.Model
import org.apache.jena.vocabulary.RDF
import org.apache.jena.vocabulary.OWL
import org.apache.jena.vocabulary.DC
import org.apache.jena.vocabulary.DC_11
import org.apache.jena.vocabulary.RDFS
import org.apache.jena.sparql.vocabulary.FOAF
import com.github.nscala_time.time.Imports._
import org.joda.time.format.ISODateTimeFormat
import org.apache.jena.shared.PrefixMapping
import org.apache.jena.datatypes.xsd.XSDDatatype
import com.bizo.mighty.csv.CSVReaderSettings
import com.bizo.mighty.csv.CSVDictReader
object EECSV2RDF extends Anything2RDF {
val sns = "http://ldf.fi/ee-schema#"
val ns = "http://ldf.fi/ee/"
val Letter = EC("Letter").addProperty(RDFS.subClassOf, CIDOC.Physical_ManMade_Thing)
val sourceLocation = EOP("source location")
val via = EDP("via")
val viaLocation = EOP("via location")
val forwardedLocation = EOP("forwarded location")
val destLocation = EOP("destination location")
val possibleDate = EOP("has possible time-span")
val authorP = EOP("author")
val recipientP = EOP("recipient")
possibleDate.addProperty(RDFS.subPropertyOf, CIDOC.has_timeSpan)
def main(args: Array[String]): Unit = {
val wr = CSVDictReader("ee.tsv")(CSVReaderSettings.Standard.copy(separator='\t'))
for (r <- wr) {
val label = "Letter from " + r("Author names") + " to " + r("Recipient names") + (if (!r("letdoc_FullDate_WithDay").isEmpty()) " on "+ r("letdoc_FullDate_WithDay") else "")
val letter = I(ns+"letter_"+encode(r("letdocID")),Map("en"->label),Letter)
val author = I(ns+"person_"+encode(r("Author perID")),Map("en"->r("Author names")),CIDOC.Person)
if(!r("Author first name").isEmpty()) author.addProperty(FOAF.firstName,r("Author first name"))
if(!r("Author last name").isEmpty()) author.addProperty(FOAF.family_name,r("Author last name"))
letter.addProperty(authorP,author)
val srcCountry = if (!r("src_country").isEmpty()) Some(I(PROCOPECSV2RDF.ns+"location_"+encode(r("src_country")),Map("en"->r("src_country")),PROCOPECSV2RDF.Country)) else None
val srcProvince = if (!r("src_province").isEmpty()) Some(I(PROCOPECSV2RDF.ns+"location_"+encode(r("src_province")),Map("en"->r("src_province")),PROCOPECSV2RDF.Province)) else None
srcProvince.foreach(p => srcCountry.foreach(c => p.addProperty(CIDOC.place_falls_within,c)))
val srcCity = if (!r("src_city").isEmpty()) Some(
if (!r("src_country").isEmpty()) I(PROCOPECSV2RDF.ns+"location_"+encode(r("src_city")),Map("en"->r("src_city")),PROCOPECSV2RDF.City) else I(PROCOPECSV2RDF.ns+"location_"+encode(r("src_city")),Map("en"->r("src_city")),PROCOPECSV2RDF.Country)
) else None
srcCity.foreach(city => srcProvince.orElse(srcCountry).foreach(sp => city.addProperty(CIDOC.place_falls_within,sp)))
srcCity.orElse(srcProvince.orElse(srcCountry)).foreach(p => letter.addProperty(sourceLocation,p))
val viaCountry = if (!r("via_country").isEmpty()) Some(I(PROCOPECSV2RDF.ns+"location_"+encode(r("via_country")),Map("en"->r("via_country")),PROCOPECSV2RDF.Country)) else None
val viaProvince = if (!r("via_province").isEmpty()) Some(I(PROCOPECSV2RDF.ns+"location_"+encode(r("via_province")),Map("en"->r("via_province")),PROCOPECSV2RDF.City)) else None
viaProvince.foreach(p => viaCountry.foreach(c => p.addProperty(CIDOC.place_falls_within,c)))
if (!r("via_city").isEmpty()) letter.addProperty(via,r("via_city"))
viaProvince.orElse(viaCountry).foreach(p => letter.addProperty(viaLocation,p))
val recipient = I(ns+"person_"+encode(r("Recipient perID")),Map("en"->r("Recipient names")),CIDOC.Person)
if(!r("Recipient first name").isEmpty()) recipient.addProperty(FOAF.firstName,r("Recipient first name"))
if(!r("Recipient last name").isEmpty()) recipient.addProperty(FOAF.family_name,r("Recipient last name"))
letter.addProperty(recipientP,recipient)
val forwardedCountry = if (!r("forwarded_country").isEmpty()) Some(I(PROCOPECSV2RDF.ns+"location_"+encode(r("forwarded_country")),Map("en"->r("forwarded_country")),PROCOPECSV2RDF.Country)) else None
val forwardedProvince = if (!r("forwarded_province").isEmpty()) Some(I(PROCOPECSV2RDF.ns+"location_"+encode(r("forwarded_province")),Map("en"->r("forwarded_province")),PROCOPECSV2RDF.Province)) else None
forwardedProvince.foreach(p => forwardedCountry.foreach(c => p.addProperty(CIDOC.place_falls_within,c)))
val forwardedCity = if (!r("forwarded_city").isEmpty()) Some(
if (!r("forwarded_country").isEmpty()) I(PROCOPECSV2RDF.ns+"location_"+encode(r("forwarded_city")),Map("en"->r("forwarded_city")),PROCOPECSV2RDF.City) else I(PROCOPECSV2RDF.ns+"location_"+encode(r("forwarded_city")),Map("en"->r("forwarded_city")),PROCOPECSV2RDF.Country)
) else None
forwardedCity.foreach(city => forwardedProvince.orElse(forwardedCountry).foreach(sp => city.addProperty(CIDOC.place_falls_within,sp)))
forwardedCity.orElse(forwardedProvince.orElse(forwardedCountry)).foreach(p => letter.addProperty(forwardedLocation,p))
val destCountry = if (!r("dest_country").isEmpty()) Some(I(PROCOPECSV2RDF.ns+"location_"+encode(r("dest_country")),Map("en"->r("dest_country")),PROCOPECSV2RDF.Country)) else None
val destProvince = if (!r("dest_province").isEmpty()) Some(I(PROCOPECSV2RDF.ns+"location_"+encode(r("dest_province")),Map("en"->r("dest_province")),PROCOPECSV2RDF.Province)) else None
destProvince.foreach(p => destCountry.foreach(c => p.addProperty(CIDOC.place_falls_within,c)))
val destCity = if (!r("dest_city").isEmpty()) Some(
if (!r("dest_country").isEmpty()) I(PROCOPECSV2RDF.ns+"location_"+encode(r("dest_city")),Map("en"->r("dest_city")),PROCOPECSV2RDF.City) else I(PROCOPECSV2RDF.ns+"location_"+encode(r("dest_city")),Map("en"->r("dest_city")),PROCOPECSV2RDF.Country)
) else None
destCity.foreach(city => destProvince.orElse(destCountry).foreach(sp => city.addProperty(CIDOC.place_falls_within,sp)))
destCity.orElse(destProvince.orElse(destCountry)).foreach(p => letter.addProperty(destLocation,p))
if (!r("letdoc_FullDate_WithDay").isEmpty()) {
val (bdateTime,edateTime) = makeDateTime(r("letdoc_Date_year"),r("letdoc_Date_month"),r("letdoc_Date_date"))
val date = I(s"${PROCOPECSV2RDF.ns}date_${bdateTime}TO${edateTime}",if (r("letdoc_FullDate_WithDay").startsWith("(possible date)")) r("letdoc_FullDate_WithDay").substring(15).trim else r("letdoc_FullDate_WithDay") ,CIDOC.TimeSpan)
date.addProperty(CIDOC.begin_of_the_begin,bdateTime,XSDDatatype.XSDdateTime)
date.addProperty(CIDOC.end_of_the_end,edateTime,XSDDatatype.XSDdateTime)
if (r("letdoc_FullDate_WithDay").startsWith("(possible date)")) letter.addProperty(possibleDate,date)
else letter.addProperty(CIDOC.has_timeSpan,date)
}
if (m.contains(ResourceFactory.createResource(PROCOPECSV2RDF.ns+"location_Paris"), CIDOC.place_falls_within,ResourceFactory.createResource(PROCOPECSV2RDF.ns+"location_England"))) {
println(r)
System.exit(-1)
}
if (m.contains(ResourceFactory.createResource(PROCOPECSV2RDF.ns+"location_Paris"), CIDOC.place_falls_within,ResourceFactory.createResource(PROCOPECSV2RDF.ns+"location_Switzerland"))) {
println(r)
System.exit(-2)
}
}
m.setNsPrefixes(PrefixMapping.Standard)
m.setNsPrefix("crm",CIDOC.ns)
m.setNsPrefix("skos",SKOS.ns)
m.setNsPrefix("foaf",FOAF.NS)
m.setNsPrefix("ee",ns)
m.setNsPrefix("ees",sns)
m.setNsPrefix("procope",PROCOPECSV2RDF.ns)
m.setNsPrefix("procopes",PROCOPECSV2RDF.sns)
RDFDataMgr.write(new FileOutputStream("ee.ttl"), m, RDFFormat.TTL)
}
}
| jiemakel/anything2rdf | src/main/scala/eecsv2rdf.scala | Scala | mit | 8,057 |
package actors
/**
* The batch processor sub-system is responsible for processing of individual batches.
* It contains a parent processor/scheduler and multiple workers (1 for each user)
*
* The API for the batchProcessor is :
* Accepts:
* SubmitBatch (user: String, batch: Batch)
* - Submit the given batch over for processing. No reply to this message. The results will be logged and saved
* JobStatus (user: String, status: OperationStatus)
* - The Mining sub-system and the DsOperations sub-system are supposed to send this message, one for each job received.
* No reply to this message.
*
* Sends:
* SubmitDsOpJob (user: String, job: DataSetOp)
* - This message is aimed for the DsOperation subsystem. Once the Job is complete/aborted, a JobStatus message is to be sent
* to the batch Processor
* SubmitMineJob (user: String, job: MineOp)
* - Similar to SubmitDsJob and is aimed for the Miner subystem. Expects a JobStatus once complete.
* Log(...)
* - It Logs the result of complete batch operations but not individual jobs.
* SyError(system: String, msg: String)
* - Has multiple cases of errors
*
* This subsystem also watches the clients logging in and out (LogIn() and LogOut() messages)
*/
package object batchProcessor {
// This will contain subsystem level messages
case class IAmFree(user: String)
}
| ChetanBhasin/Veracious | app/actors/batchProcessor/package.scala | Scala | apache-2.0 | 1,393 |
package com.sksamuel.akka.mailbox
import akka.dispatch.{MessageQueue, MailboxType}
import akka.actor.{ActorSystem, ActorRef}
import com.sksamuel.akka.patterns.{Envelope, PriorityAttribute}
import java.util.{Comparator, PriorityQueue}
import akka.dispatch
import com.typesafe.config.Config
/** @author Stephen Samuel */
class PriorityMailbox(settings: ActorSystem.Settings, config: Config) extends MailboxType {
def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new PriorityMessageQueue
class PriorityMessageQueue
extends PriorityQueue[dispatch.Envelope](11, new EnvelopePriorityComparator)
with MessageQueue {
def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = {
if (hasMessages) {
var envelope = dequeue()
while (envelope ne null) {
deadLetters.enqueue(owner, envelope)
envelope = dequeue()
}
}
}
def hasMessages: Boolean = size > 0
def numberOfMessages: Int = size
def dequeue(): dispatch.Envelope = poll()
def enqueue(receiver: ActorRef, e: dispatch.Envelope): Unit = add(e)
}
class EnvelopePriorityComparator extends Comparator[dispatch.Envelope] {
def compare(o1: dispatch.Envelope, o2: dispatch.Envelope): Int = {
val priority1 = o1.message.asInstanceOf[Envelope[_]].attributes(PriorityAttribute).toString.toInt
val priority2 = o2.message.asInstanceOf[Envelope[_]].attributes(PriorityAttribute).toString.toInt
priority1 compareTo priority2
}
}
}
| stoopbrain/akka-patterns | src/main/scala/com/sksamuel/akka/mailbox/PriorityMailbox.scala | Scala | apache-2.0 | 1,527 |
/*
* Copyright 2019 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.acinq.eclair.tor
import java.net.InetSocketAddress
import org.scalatest.funsuite.AnyFunSuite
/**
* Created by PM on 27/01/2017.
*/
class Socks5ConnectionSpec extends AnyFunSuite {
test("get proxy address") {
val proxyAddress = new InetSocketAddress(9050)
assert(Socks5ProxyParams.proxyAddress(
socketAddress = new InetSocketAddress("1.2.3.4", 9735),
proxyParams = Socks5ProxyParams(address = proxyAddress, credentials_opt = None, randomizeCredentials = false, useForIPv4 = true, useForIPv6 = true, useForTor = true, useForWatchdogs = true)).contains(proxyAddress))
assert(Socks5ProxyParams.proxyAddress(
socketAddress = new InetSocketAddress("1.2.3.4", 9735),
proxyParams = Socks5ProxyParams(address = proxyAddress, credentials_opt = None, randomizeCredentials = false, useForIPv4 = false, useForIPv6 = true, useForTor = true, useForWatchdogs = true)).isEmpty)
assert(Socks5ProxyParams.proxyAddress(
socketAddress = new InetSocketAddress("[fc92:97a3:e057:b290:abd8:9bd6:135d:7e7]", 9735),
proxyParams = Socks5ProxyParams(address = proxyAddress, credentials_opt = None, randomizeCredentials = false, useForIPv4 = true, useForIPv6 = true, useForTor = true, useForWatchdogs = true)).contains(proxyAddress))
assert(Socks5ProxyParams.proxyAddress(
socketAddress = new InetSocketAddress("[fc92:97a3:e057:b290:abd8:9bd6:135d:7e7]", 9735),
proxyParams = Socks5ProxyParams(address = proxyAddress, credentials_opt = None, randomizeCredentials = false, useForIPv4 = true, useForIPv6 = false, useForTor = true, useForWatchdogs = true)).isEmpty)
assert(Socks5ProxyParams.proxyAddress(
socketAddress = new InetSocketAddress("iq7zhmhck54vcax2vlrdcavq2m32wao7ekh6jyeglmnuuvv3js57r4id.onion", 9735),
proxyParams = Socks5ProxyParams(address = proxyAddress, credentials_opt = None, randomizeCredentials = false, useForIPv4 = true, useForIPv6 = true, useForTor = true, useForWatchdogs = true)).contains(proxyAddress))
assert(Socks5ProxyParams.proxyAddress(
socketAddress = new InetSocketAddress("iq7zhmhck54vcax2vlrdcavq2m32wao7ekh6jyeglmnuuvv3js57r4id.onion", 9735),
proxyParams = Socks5ProxyParams(address = proxyAddress, credentials_opt = None, randomizeCredentials = false, useForIPv4 = true, useForIPv6 = true, useForTor = false, useForWatchdogs = true)).isEmpty)
}
}
| ACINQ/eclair | eclair-core/src/test/scala/fr/acinq/eclair/tor/Socks5ConnectionSpec.scala | Scala | apache-2.0 | 2,979 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
/*
* This file contains derivative works that require the following
* header to be displayed:
*
* Copyright 2002-2011 EPFL.
* All rights reserved.
*
* Permission to use, copy, modify, and distribute this software in
* source or binary form for any purpose with or without fee is hereby
* granted, provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the EPFL nor the names of its
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
package org.ensime.core
import java.io.File
import java.nio.charset.Charset
import scala.collection.mutable
import scala.reflect.internal.util.{ BatchSourceFile, RangePosition, SourceFile }
import scala.reflect.io.{ PlainFile, VirtualFile }
import scala.tools.nsc.Settings
import scala.tools.nsc.interactive.{ CompilerControl, Global }
import scala.tools.nsc.io.AbstractFile
import scala.tools.nsc.reporters.Reporter
import scala.tools.nsc.util._
import scala.tools.refactoring.analysis.GlobalIndexes
import akka.actor.ActorRef
import org.ensime.api._
import org.ensime.config._
import org.ensime.indexer._
import org.ensime.model._
import org.ensime.util.ensimefile._
import org.ensime.util.file._
import org.ensime.vfs._
import org.slf4j.LoggerFactory
trait RichCompilerControl extends CompilerControl with RefactoringControl with CompletionControl with DocFinding {
self: RichPresentationCompiler =>
implicit def charset: Charset = Charset.forName(settings.encoding.value)
def askOption[A](op: => A): Option[A] =
try {
Some(ask(() => op))
} catch {
case fi: FailedInterrupt =>
fi.getCause match {
case e: InterruptedException =>
Thread.currentThread().interrupt()
logger.error("interrupted exception in askOption", e)
None
case e =>
logger.error("Error during askOption", e)
None
}
case e: Throwable =>
logger.error("Error during askOption", e)
None
}
def askDocSignatureAtPoint(p: Position): Option[DocSigPair] =
askOption {
symbolAt(p).orElse(typeAt(p).map(_.typeSymbol)).flatMap(docSignature(_, Some(p)))
}.flatten
def askDocSignatureForSymbol(typeFullName: String, memberName: Option[String],
signatureString: Option[String]): Option[DocSigPair] =
askOption {
val sym = symbolMemberByName(typeFullName, memberName, signatureString)
docSignature(sym, None)
}.flatten
////////////////////////////////////////////////////////////////////////////////
// exposed for testing
def askSymbolFqn(p: Position): Option[FullyQualifiedName] =
askOption(symbolAt(p).map(toFqn)).flatten
def askTypeFqn(p: Position): Option[FullyQualifiedName] =
askOption(typeAt(p).map { tpe => toFqn(tpe.typeSymbol) }).flatten
def askSymbolByScalaName(name: String, declaredAs: Option[DeclaredAs] = None): Option[Symbol] =
askOption(toSymbol(name, declaredAs))
def askSymbolByFqn(fqn: FullyQualifiedName): Option[Symbol] =
askOption(toSymbol(fqn))
def askSymbolAt(p: Position): Option[Symbol] =
askOption(symbolAt(p)).flatten
def askTypeSymbolAt(p: Position): Option[Symbol] =
askOption(typeAt(p).map { tpe => tpe.typeSymbol }).flatten
////////////////////////////////////////////////////////////////////////////////
def askSymbolInfoAt(p: Position): Option[SymbolInfo] =
askOption(symbolAt(p).map(SymbolInfo(_))).flatten
def askSymbolByName(fqn: String, memberName: Option[String], signatureString: Option[String]): Option[SymbolInfo] =
askOption {
SymbolInfo(symbolMemberByName(fqn, memberName, signatureString))
}
def askTypeInfoAt(p: Position): Option[TypeInfo] =
askOption(typeAt(p).map(TypeInfo(_, PosNeededYes))).flatten
def askTypeInfoByName(name: String): Option[TypeInfo] =
askOption(TypeInfo(toSymbol(name).tpe, PosNeededYes))
def askTypeInfoByNameAt(name: String, p: Position): Option[TypeInfo] = {
val nameSegs = name.split("\\.")
val firstName: String = nameSegs.head
val x = new Response[List[Member]]()
askScopeCompletion(p, x)
(for (
members <- x.get.left.toOption;
infos <- askOption {
val roots = filterMembersByPrefix(
members, firstName, matchEntire = true, caseSens = true
).map { _.sym }
val restOfPath = nameSegs.drop(1).mkString(".")
val syms = roots.map { toSymbol(restOfPath, None, _) }
syms.find(_.tpe != NoType).map { sym => TypeInfo(sym.tpe) }
}
) yield infos).flatten
}
def askPackageByPath(path: String): Option[PackageInfo] =
askOption(PackageInfo.fromPath(path))
def askReloadFile(f: SourceFile): Unit = {
askReloadFiles(List(f))
}
def askReloadFiles(files: Iterable[SourceFile]): Either[Unit, Throwable] = {
val x = new Response[Unit]()
askReload(files.toList, x)
x.get
}
def askLoadedTyped(f: SourceFile): Either[Tree, Throwable] = {
val x = new Response[Tree]()
askLoadedTyped(f, true, x)
x.get
}
def askUnloadAllFiles(): Unit = askOption(unloadAllFiles())
def askUnloadFile(f: SourceFileInfo): Unit = {
val sourceFile = createSourceFile(f)
askOption(unloadFile(sourceFile))
}
def askRemoveAllDeleted(): Option[Unit] = askOption(removeAllDeleted())
def askRemoveDeleted(f: File) = askOption(removeDeleted(AbstractFile.getFile(f)))
def askReloadAllFiles() = {
val all = {
for {
file <- config.scalaSourceFiles
source = createSourceFile(file)
} yield source
}.toSet ++ activeUnits().map(_.source)
askReloadFiles(all)
}
def loadedFiles: List[SourceFile] = activeUnits().map(_.source)
def askReloadExistingFiles() =
askReloadFiles(loadedFiles)
def askInspectTypeAt(p: Position): Option[TypeInspectInfo] =
askOption(inspectTypeAt(p)).flatten
def askInspectTypeByName(name: String): Option[TypeInspectInfo] =
askOption(inspectType(toSymbol(name).tpe))
def askCompletePackageMember(path: String, prefix: String): List[CompletionInfo] =
askOption(completePackageMember(path, prefix)).getOrElse(List.empty)
def askCompletionsAt(p: Position, maxResults: Int, caseSens: Boolean): CompletionInfoList =
completionsAt(p, maxResults, caseSens)
def askReloadAndTypeFiles(files: Iterable[SourceFile]) =
askOption(reloadAndTypeFiles(files))
def askUsesOfSymAtPoint(p: Position): List[RangePosition] =
askOption(usesOfSymbolAtPoint(p).toList).getOrElse(List.empty)
// force the full path of Set because nsc appears to have a conflicting Set....
def askSymbolDesignationsInRegion(p: RangePosition, tpes: List[SourceSymbol]): SymbolDesignations =
askOption(
new SemanticHighlighting(this).symbolDesignationsInRegion(p, tpes)
).getOrElse(SymbolDesignations(RawFile(new File(".").toPath), List.empty))
def askImplicitInfoInRegion(p: Position): ImplicitInfos =
ImplicitInfos(
askOption(
new ImplicitAnalyzer(this).implicitDetails(p)
).getOrElse(List.empty)
)
def askNotifyWhenReady(): Unit = ask(setNotifyWhenReady)
// WARNING: be really careful when creating BatchSourceFiles. there
// are multiple constructors which do weird things, best to be very
// explicit about what we're doing and only use the primary
// constructor. Note that scalac appears to have a bug in it whereby
// it is unable to tell that a VirtualFile (i.e. in-memory) and a
// non VirtualFile backed BatchSourceFile are actually referring to
// the same compilation unit. see
// https://github.com/ensime/ensime-server/issues/1160
def createSourceFile(file: EnsimeFile): BatchSourceFile =
createSourceFile(SourceFileInfo(file))
def createSourceFile(file: File): BatchSourceFile =
createSourceFile(EnsimeFile(file))
def createSourceFile(path: String): BatchSourceFile =
createSourceFile(EnsimeFile(path))
def createSourceFile(file: AbstractFile): BatchSourceFile =
createSourceFile(file.path)
def createSourceFile(file: SourceFileInfo): BatchSourceFile = file match {
case SourceFileInfo(rf @ RawFile(f), None, None) => new BatchSourceFile(
new PlainFile(f.toFile), rf.readStringDirect().toCharArray
)
case SourceFileInfo(ac @ ArchiveFile(archive, entry), None, None) =>
new BatchSourceFile(
new VirtualFile(ac.fullPath), ac.readStringDirect().toCharArray
)
case SourceFileInfo(rf @ RawFile(f), Some(contents), None) =>
new BatchSourceFile(new PlainFile(f.toFile), contents.toCharArray)
case SourceFileInfo(ac @ ArchiveFile(a, e), Some(contents), None) => new BatchSourceFile(
new VirtualFile(ac.fullPath), contents.toCharArray
)
case SourceFileInfo(rf @ RawFile(f), None, Some(contentsIn)) =>
new BatchSourceFile(new PlainFile(f.toFile), contentsIn.readString()(charset).toCharArray)
case SourceFileInfo(ac @ ArchiveFile(a, e), None, Some(contentsIn)) => new BatchSourceFile(
new VirtualFile(ac.fullPath), contentsIn.readString()(charset).toCharArray
)
}
def askLinkPos(sym: Symbol, path: EnsimeFile): Option[Position] =
askOption(linkPos(sym, createSourceFile(path)))
def askStructure(fileInfo: SourceFile): List[StructureViewMember] =
askOption(structureView(fileInfo))
.getOrElse(List.empty)
def askRaw(any: Any): String =
showRaw(any, printTypes = true, printIds = false, printKinds = true, printMirrors = true)
/**
* Returns the smallest `Tree`, which position `properlyIncludes` `p`
*/
def askEnclosingTreePosition(p: Position): Position =
new PositionLocator(this).enclosingTreePosition(p)
}
class RichPresentationCompiler(
val config: EnsimeConfig,
override val settings: Settings,
val richReporter: Reporter,
val parent: ActorRef,
val indexer: ActorRef,
val search: SearchService
)(
implicit
val vfs: EnsimeVFS
) extends Global(settings, richReporter)
with ModelBuilders with RichCompilerControl
with RefactoringImpl with Completion with Helpers
with PresentationCompilerBackCompat with PositionBackCompat
with StructureViewBuilder
with SymbolToFqn
with FqnToSymbol
with TypeToScalaName {
val logger = LoggerFactory.getLogger(this.getClass)
private val symsByFile = new mutable.HashMap[AbstractFile, mutable.LinkedHashSet[Symbol]] {
override def default(k: AbstractFile) = {
val v = new mutable.LinkedHashSet[Symbol]
put(k, v)
v
}
}
def activeUnits(): List[CompilationUnit] = {
val invalidSet = toBeRemoved.synchronized { toBeRemoved.toSet }
unitOfFile.filter { kv => !invalidSet.contains(kv._1) }.values.toList
}
/** Called from typechecker every time a top-level class or object is entered.*/
override def registerTopLevelSym(sym: Symbol): Unit = {
super.registerTopLevelSym(sym)
symsByFile(sym.sourceFile) += sym
}
def unloadAllFiles(): Unit = {
allSources.foreach(removeUnitOf)
}
def unloadFile(s: SourceFile): Unit = removeUnitOf(s)
/**
* Remove symbols defined by files that no longer exist.
* Note that these symbols will not be collected by
* syncTopLevelSyms, since the units in question will
* never be reloaded again.
*/
def removeAllDeleted(): Unit = {
allSources = allSources.filter { _.file.exists }
val deleted = symsByFile.keys.filter { !_.exists }
for (f <- deleted) {
removeDeleted(f)
}
}
/** Remove symbols defined by file that no longer exist. */
def removeDeleted(f: AbstractFile): Unit = {
val syms = symsByFile(f)
for (s <- syms) {
s.owner.info.decls unlink s
}
symsByFile.remove(f)
unitOfFile.remove(f)
}
private def typePublicMembers(tpe: Type): Iterable[TypeMember] = {
val members = new mutable.LinkedHashMap[Symbol, TypeMember]
def addTypeMember(sym: Symbol, pre: Type, inherited: Boolean, viaView: Symbol): Unit = {
try {
val m = new TypeMember(
sym,
sym.tpe,
sym.isPublic,
inherited,
viaView
)
members(sym) = m
} catch {
case e: Throwable =>
logger.error("Error: Omitting member " + sym + ": " + e)
}
}
for (sym <- tpe.decls) {
addTypeMember(sym, tpe, inherited = false, NoSymbol)
}
for (sym <- tpe.members) {
addTypeMember(sym, tpe, inherited = true, NoSymbol)
}
members.values
}
protected def getMembersForTypeAt(tpe: Type, p: Position): Iterable[Member] = {
if (isNoParamArrowType(tpe)) {
typePublicMembers(typeOrArrowTypeResult(tpe))
} else {
val members: Iterable[Member] = try {
wrapTypeMembers(p)
} catch {
case e: Throwable =>
logger.error("Error retrieving type members:", e)
List.empty
}
// Remove duplicates
// Filter out synthetic things
val bySym = new mutable.LinkedHashMap[Symbol, Member]
for (m <- members ++ typePublicMembers(tpe)) {
if (!m.sym.nameString.contains("$")) {
bySym(m.sym) = m
}
}
bySym.values
}
}
protected def inspectType(tpe: Type): TypeInspectInfo = {
val parents = tpe.parents
new TypeInspectInfo(
TypeInfo(tpe, PosNeededAvail),
prepareSortedInterfaceInfo(typePublicMembers(tpe.asInstanceOf[Type]), parents)
)
}
protected def inspectTypeAt(p: Position): Option[TypeInspectInfo] = {
typeAt(p).map(tpe => {
val members = getMembersForTypeAt(tpe, p)
val parents = tpe.parents
val preparedMembers = prepareSortedInterfaceInfo(members, parents)
new TypeInspectInfo(
TypeInfo(tpe, PosNeededAvail),
preparedMembers
)
}).orElse {
logger.error("ERROR: Failed to get any type information :( ")
None
}
}
private def typeOfTree(t: Tree): Option[Type] = {
val tree = t match {
case Select(qualifier, name) if t.tpe == ErrorType =>
qualifier
case t: ImplDef if t.impl != null =>
t.impl
case t: ValOrDefDef if t.tpt != null =>
t.tpt
case t: ValOrDefDef if t.rhs != null =>
t.rhs
case otherTree =>
otherTree
}
Option(tree.tpe)
}
protected def typeAt(p: Position): Option[Type] = {
wrapTypedTreeAt(p) match {
case Import(_, _) => symbolAt(p).map(_.tpe)
case tree => typeOfTree(tree)
}
}
protected def symbolMemberByName(
name: String, member: Option[String], descriptor: Option[String]
): Symbol = {
val clazz = ClassName.fromFqn(name)
val fqn = (member, descriptor) match {
case (Some(field), None) => FieldName(clazz, field)
case (Some(method), Some(desc)) => MethodName(clazz, method, DescriptorParser.parse(desc))
case _ => clazz
}
toSymbol(fqn)
}
protected def filterMembersByPrefix(members: List[Member], prefix: String,
matchEntire: Boolean, caseSens: Boolean): List[Member] = members.filter { m =>
val prefixUpper = prefix.toUpperCase
val sym = m.sym
val ns = sym.nameString
(((matchEntire && ns == prefix) ||
(!matchEntire && caseSens && ns.startsWith(prefix)) ||
(!matchEntire && !caseSens && ns.toUpperCase.startsWith(prefixUpper)))
&& !sym.nameString.contains("$"))
}
private def noDefinitionFound(tree: Tree) = {
logger.warn("No definition found. Please report to https://github.com/ensime/ensime-server/issues/492 what you expected for " + tree.getClass + ": " + showRaw(tree))
Nil
}
protected def symbolAt(pos: Position): Option[Symbol] = {
val tree = wrapTypedTreeAt(pos)
val wannabes =
tree match {
case Import(expr, selectors) =>
if (expr.pos.includes(pos)) {
@annotation.tailrec
def locate(p: Position, inExpr: Tree): Symbol = inExpr match {
case Select(qualifier, name) =>
if (qualifier.pos.includes(p)) locate(p, qualifier)
else inExpr.symbol
case tree => tree.symbol
}
List(locate(pos, expr))
} else {
selectors.filter(_.namePos <= pos.point).sortBy(_.namePos).lastOption map { sel =>
val tpe = stabilizedType(expr)
List(tpe.member(sel.name), tpe.member(sel.name.toTypeName))
} getOrElse Nil
}
case Annotated(atp, _) =>
List(atp.symbol)
case ap @ Select(qualifier, nme.apply) =>
// If we would like to give user choice if to go to method apply or value
// like Eclipse is doing we would need to return:
// List(qualifier.symbol, ap.symbol)
List(qualifier.symbol)
case st if st.symbol ne null =>
List(st.symbol)
case lit: Literal =>
List(lit.tpe.typeSymbol)
case _ =>
noDefinitionFound(tree)
}
wannabes.find(_.exists)
}
protected def specificOwnerOfSymbolAt(pos: Position): Option[Symbol] = {
val tree = wrapTypedTreeAt(pos)
tree match {
case tree @ Select(qualifier, name) =>
qualifier match {
case t: ApplyImplicitView => t.args.headOption.map(_.tpe.typeSymbol)
case _ => Some(qualifier.tpe.typeSymbol)
}
case _ => None
}
}
protected def linkPos(sym: Symbol, source: SourceFile): Position = {
wrapLinkPos(sym, source)
}
protected def usesOfSymbolAtPoint(point: Position): Iterable[RangePosition] = {
symbolAt(point) match {
case Some(s) =>
class CompilerGlobalIndexes extends GlobalIndexes {
val global = RichPresentationCompiler.this
val sym = s.asInstanceOf[global.Symbol]
val cuIndexes = this.global.unitOfFile.values.map { u =>
CompilationUnitIndex(u.body)
}
val index = GlobalIndex(cuIndexes.toList)
val result = index.occurences(sym).map {
_.pos match {
case p: RangePosition => p
case p =>
new RangePosition(
p.source, p.point, p.point, p.point
)
}
}
}
val gi = new CompilerGlobalIndexes
gi.result
case None => List.empty
}
}
private var notifyWhenReady = false
override def isOutOfDate: Boolean = {
if (notifyWhenReady && !super.isOutOfDate) {
parent ! FullTypeCheckCompleteEvent
notifyWhenReady = false
}
super.isOutOfDate
}
protected def setNotifyWhenReady(): Unit = {
notifyWhenReady = true
}
protected def reloadAndTypeFiles(sources: Iterable[SourceFile]) = {
wrapReloadSources(sources.toList)
sources.foreach { s =>
wrapTypedTree(s, forceReload = true)
}
}
override def askShutdown(): Unit = {
super.askShutdown()
}
/*
* The following functions wrap up operations that interact with
* the presentation compiler. The wrapping just helps with the
* create response / compute / get result pattern.
*
* These units of work should return `Future[T]`.
*/
def wrap[A](compute: Response[A] => Unit, handle: Throwable => A): A = {
val result = new Response[A]
compute(result)
result.get.fold(o => o, handle)
}
def wrapReloadPosition(p: Position): Unit =
wrapReloadSource(p.source)
def wrapReloadSource(source: SourceFile): Unit =
wrapReloadSources(List(source))
def wrapReloadSources(sources: List[SourceFile]): Unit = {
val superseded = scheduler.dequeueAll {
case ri: ReloadItem if ri.sources == sources => Some(ri)
case _ => None
}
superseded.foreach(_.response.set(()))
wrap[Unit](r => new ReloadItem(sources, r).apply(), _ => ())
}
def wrapTypeMembers(p: Position): List[Member] =
wrap[List[Member]](r => new AskTypeCompletionItem(p, r).apply(), _ => List.empty)
def wrapTypedTree(source: SourceFile, forceReload: Boolean): Tree =
wrap[Tree](r => new AskTypeItem(source, forceReload, r).apply(), t => throw t)
def wrapTypedTreeAt(position: Position): Tree =
wrap[Tree](r => new AskTypeAtItem(position, r).apply(), t => throw t)
def wrapLinkPos(sym: Symbol, source: SourceFile): Position =
wrap[Position](r => new AskLinkPosItem(sym, source, r).apply(), t => throw t)
}
| espinhogr/ensime-server | core/src/main/scala/org/ensime/core/RichPresentationCompiler.scala | Scala | gpl-3.0 | 21,629 |
/*
* Copyright © 2015 Lukas Rosenthaler, Benjamin Geer, Ivan Subotic,
* Tobias Schweizer, Sepideh Alassi, André Kilchenmann, and Sepideh Alassi.
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.messages.v1.responder.valuemessages
import java.io.File
import java.util.UUID
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import org.knora.webapi.messages.v1.responder.resourcemessages.LocationV1
import org.knora.webapi.messages.v1.responder.sipimessages.SipiResponderConversionRequestV1
import org.knora.webapi.messages.v1.responder.standoffmessages.MappingXMLtoStandoff
import org.knora.webapi.messages.v1.responder.usermessages.UserProfileV1
import org.knora.webapi.messages.v1.responder.{KnoraRequestV1, KnoraResponseV1}
import org.knora.webapi.twirl.{StandoffTagAttributeV1, StandoffTagInternalReferenceAttributeV1, StandoffTagV1}
import org.knora.webapi.util.standoff.StandoffTagUtilV1
import org.knora.webapi.util.{DateUtilV1, InputValidation, KnoraIdUtil}
import org.knora.webapi.{BadRequestException, _}
import spray.json._
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// API requests
/**
* Represents an API request payload that asks the Knora API server to create a new value of a resource property
* (as opposed to a new version of an existing value).
*
* @param res_id the IRI of the resource in which the value is to be added.
* @param prop the property that is to receive the value.
* @param richtext_value a rich-text object to be used in the value.
* @param int_value an integer literal to be used in the value.
* @param decimal_value a decimal literal to be used in the value.
* @param date_value a date object to be used in the value.
* @param color_value a colour literal to be used in the value.
* @param geom_value a geometry literal to be used in the value.
* @param comment a comment to add to the value.
*/
case class CreateValueApiRequestV1(res_id: IRI,
prop: IRI,
richtext_value: Option[CreateRichtextV1] = None,
link_value: Option[IRI] = None,
int_value: Option[Int] = None,
decimal_value: Option[BigDecimal] = None,
boolean_value: Option[Boolean] = None,
uri_value: Option[String] = None,
date_value: Option[String] = None,
color_value: Option[String] = None,
geom_value: Option[String] = None,
hlist_value: Option[IRI] = None,
interval_value: Option[Seq[BigDecimal]] = None,
geoname_value: Option[String] = None,
comment: Option[String] = None) {
// Make sure only one value is given.
if (List(
richtext_value,
link_value,
int_value,
decimal_value,
boolean_value,
uri_value,
date_value,
color_value,
geom_value,
hlist_value,
interval_value,
geoname_value).flatten.size > 1) {
throw BadRequestException(s"Different value types were submitted for property $prop")
}
/**
* Returns the type of the given value.
*
* @return a value type IRI.
*/
def getValueClassIri: IRI = {
if (richtext_value.nonEmpty) OntologyConstants.KnoraBase.TextValue
else if (link_value.nonEmpty) OntologyConstants.KnoraBase.LinkValue
else if (int_value.nonEmpty) OntologyConstants.KnoraBase.IntValue
else if (decimal_value.nonEmpty) OntologyConstants.KnoraBase.DecimalValue
else if (boolean_value.nonEmpty) OntologyConstants.KnoraBase.BooleanValue
else if (uri_value.nonEmpty) OntologyConstants.KnoraBase.UriValue
else if (date_value.nonEmpty) OntologyConstants.KnoraBase.DateValue
else if (color_value.nonEmpty) OntologyConstants.KnoraBase.ColorValue
else if (geom_value.nonEmpty) OntologyConstants.KnoraBase.GeomValue
else if (hlist_value.nonEmpty) OntologyConstants.KnoraBase.ListValue
else if (interval_value.nonEmpty) OntologyConstants.KnoraBase.IntervalValue
else if (geoname_value.nonEmpty) OntologyConstants.KnoraBase.GeonameValue
else throw BadRequestException("No value specified")
}
}
/**
* Represents a richtext object consisting of text, text attributes and resource references.
*
* @param utf8str a mere string in case of a text without any markup.
* @param xml xml in case of a text with markup.
* @param mapping_id Iri of the mapping used to transform XML to standoff.
*/
case class CreateRichtextV1(utf8str: Option[String] = None,
xml: Option[String] = None,
mapping_id: Option[IRI] = None) {
def toJsValue = ApiValueV1JsonProtocol.createRichtextV1Format.write(this)
}
/**
* Represents a file value to be added to a Knora resource.
*
* @param originalFilename the original name of the file.
* @param originalMimeType the original mime type of the file.
* @param filename the name of the file to be attached to a Knora-resource (file is temporarily stored by SIPI).
*/
case class CreateFileV1(originalFilename: String,
originalMimeType: String,
filename: String) {
def toJsValue = ApiValueV1JsonProtocol.createFileV1Format.write(this)
}
/**
* Represents a file on disk to be added to a Knora resource in the context of a bulk import.
*
* @param file the file.
* @param mimeType the file's MIME type.
*/
case class ReadFileV1(file: File, mimeType: String)
/**
* Represents a quality level of a file value to added to a Knora resource.
*
* @param path the path to the file.
* @param mimeType the mime type of the file.
* @param dimX the x dimension of the file, if given (e.g. an image).
* @param dimY the y dimension of the file, if given (e.g. an image).
*/
case class CreateFileQualityLevelV1(path: String,
mimeType: String,
dimX: Option[Int] = None,
dimY: Option[Int] = None) {
def toJsValue = ApiValueV1JsonProtocol.createFileQualityLevelFormat.write(this)
}
/**
* Represents an API request payload that asks the Knora API server to change a value of a resource property (i.e. to
* update its version history).
*
* @param richtext_value a rich-text object to be used in the value.
* @param int_value an integer literal to be used in the value.
* @param decimal_value a decimal literal to be used in the value.
* @param date_value a date object to be used in the value.
* @param color_value a colour literal to be used in the value.
* @param geom_value a geometry literal to be used in the value.
* @param comment a comment to add to the value.
*/
case class ChangeValueApiRequestV1(richtext_value: Option[CreateRichtextV1] = None,
link_value: Option[IRI] = None,
int_value: Option[Int] = None,
decimal_value: Option[BigDecimal] = None,
boolean_value: Option[Boolean] = None,
uri_value: Option[String] = None,
date_value: Option[String] = None,
color_value: Option[String] = None,
geom_value: Option[String] = None,
hlist_value: Option[IRI] = None,
interval_value: Option[Seq[BigDecimal]] = None,
geoname_value: Option[String] = None,
comment: Option[String] = None) {
/**
* Returns the type of the given value.
*
* TODO: make sure that only one value is given.
*
* @return a value type IRI.
*/
def getValueClassIri: IRI = {
if (richtext_value.nonEmpty) OntologyConstants.KnoraBase.TextValue
else if (link_value.nonEmpty) OntologyConstants.KnoraBase.LinkValue
else if (int_value.nonEmpty) OntologyConstants.KnoraBase.IntValue
else if (decimal_value.nonEmpty) OntologyConstants.KnoraBase.DecimalValue
else if (boolean_value.nonEmpty) OntologyConstants.KnoraBase.BooleanValue
else if (uri_value.nonEmpty) OntologyConstants.KnoraBase.UriValue
else if (date_value.nonEmpty) OntologyConstants.KnoraBase.DateValue
else if (color_value.nonEmpty) OntologyConstants.KnoraBase.ColorValue
else if (geom_value.nonEmpty) OntologyConstants.KnoraBase.GeomValue
else if (hlist_value.nonEmpty) OntologyConstants.KnoraBase.ListValue
else if (interval_value.nonEmpty) OntologyConstants.KnoraBase.IntervalValue
else if (geoname_value.nonEmpty) OntologyConstants.KnoraBase.GeonameValue
else throw BadRequestException("No value specified")
}
}
/**
* Represents an API request payload that asks the Knora API server to change the file attached to a resource
* (i. e. to create a new version of its file values).
*
* @param file the new file to be attached to the resource (GUI-case).
*/
case class ChangeFileValueApiRequestV1(file: CreateFileV1) {
def toJsValue = ApiValueV1JsonProtocol.changeFileValueApiRequestV1Format.write(this)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Messages
/**
* An abstract trait representing a message that can be sent to [[org.knora.webapi.responders.v1.ValuesResponderV1]].
*/
sealed trait ValuesResponderRequestV1 extends KnoraRequestV1
/**
* Represents a request for a (current) value. A successful response will be a [[ValueGetResponseV1]].
*
* @param valueIri the IRI of the value requested.
* @param userProfile the profile of the user making the request.
*/
case class ValueGetRequestV1(valueIri: IRI, userProfile: UserProfileV1) extends ValuesResponderRequestV1
/**
* Represents a request for the details of a reification node describing a direct link between two resources.
* A successful response will be a [[ValueGetResponseV1]] containing a [[LinkValueV1]].
*
* @param subjectIri the IRI of the resource that is the source of the link.
* @param predicateIri the IRI of the property that links the two resources.
* @param objectIri the IRI of the resource that is the target of the link.
* @param userProfile the profile of the user making the request.
*/
case class LinkValueGetRequestV1(subjectIri: IRI, predicateIri: IRI, objectIri: IRI, userProfile: UserProfileV1) extends ValuesResponderRequestV1
/**
* Provides details of a Knora value. A successful response will be a [[ValueGetResponseV1]].
*
* @param value the single requested value.
* @param valuetype the IRI of the value's type.
* @param valuecreator the username of the user who created the value.
* @param valuecreatorname the name of the user who created the value.
* @param valuecreationdate the date when the value was created.
* @param comment the comment on the value, if any.
* @param rights the user's permission on the value.
*/
case class ValueGetResponseV1(valuetype: IRI,
value: ApiValueV1,
valuecreator: String,
valuecreatorname: String,
valuecreationdate: String,
comment: Option[String] = None,
rights: Int) extends KnoraResponseV1 {
def toJsValue = ApiValueV1JsonProtocol.valueGetResponseV1Format.write(this)
}
/**
* Represents a request for the version history of a value. A successful response will be a [[ValueVersionHistoryGetResponseV1]].
*
* @param resourceIri the IRI of the resource that the value belongs to.
* @param propertyIri the IRI of the property that points to the value.
* @param currentValueIri the IRI of the current version of the value.
* @param userProfile the profile of the user making the request.
*/
case class ValueVersionHistoryGetRequestV1(resourceIri: IRI,
propertyIri: IRI,
currentValueIri: IRI,
userProfile: UserProfileV1) extends ValuesResponderRequestV1
/**
* Provides the version history of a value.
*
* @param valueVersions a list of the versions of the value, from newest to oldest.
*/
case class ValueVersionHistoryGetResponseV1(valueVersions: Seq[ValueVersionV1]) extends KnoraResponseV1 {
def toJsValue = ApiValueV1JsonProtocol.valueVersionHistoryGetResponseV1Format.write(this)
}
/**
* Represents a request to add a new value of a resource property (as opposed to a new version of an existing value). A
* successful response will be an [[CreateValueResponseV1]].
*
* @param resourceIndex the index of the resource
* @param resourceIri the IRI of the resource to which the value should be added.
* @param propertyIri the IRI of the property that should receive the value.
* @param value the value to be added.
* @param comment an optional comment on the value.
* @param userProfile the profile of the user making the request.
* @param apiRequestID the ID of this API request.
*/
case class CreateValueRequestV1(resourceIndex: Int = 0,
resourceIri: IRI,
propertyIri: IRI,
value: UpdateValueV1,
comment: Option[String] = None,
userProfile: UserProfileV1,
apiRequestID: UUID) extends ValuesResponderRequestV1
/**
* Represents a response to a [[CreateValueRequestV1]].
*
* @param value the value that was added.
* @param comment an optional comment on the value.
* @param id the IRI of the value that was added.
* @param rights a code representing the requesting user's permissions on the value.
*/
case class CreateValueResponseV1(value: ApiValueV1,
comment: Option[String] = None,
id: IRI,
rights: Int) extends KnoraResponseV1 {
def toJsValue = ApiValueV1JsonProtocol.createValueResponseV1Format.write(this)
}
/**
* Represents a value that should have been created using the SPARQL returned in a
* [[GenerateSparqlToCreateMultipleValuesResponseV1]]. To verify that the value was in fact created, send a
* [[VerifyMultipleValueCreationRequestV1]].
*
* @param newValueIri the IRI of the value that should have been created.
* @param value an [[UpdateValueV1]] representing the value that should have been created.
*/
case class UnverifiedValueV1(newValueIri: IRI, value: UpdateValueV1)
/**
* Requests verification that new values were created.
*
* @param resourceIri the IRI of the resource in which the values should have been created.
* @param unverifiedValues a [[Map]] of property IRIs to [[UnverifiedValueV1]] objects
* describing the values that should have been created for each property.
* @param userProfile the profile of the user making the request.
*/
case class VerifyMultipleValueCreationRequestV1(resourceIri: IRI,
unverifiedValues: Map[IRI, Seq[UnverifiedValueV1]],
userProfile: UserProfileV1) extends ValuesResponderRequestV1
/**
* In response to a [[VerifyMultipleValueCreationRequestV1]], indicates that all requested values were
* created successfully.
*
* @param verifiedValues information about the values that were created.
*/
case class VerifyMultipleValueCreationResponseV1(verifiedValues: Map[IRI, Seq[CreateValueResponseV1]])
/**
* A holder for an [[UpdateValueV1]] along with an optional comment.
*
* @param updateValueV1 the [[UpdateValueV1]].
* @param comment an optional comment on the value.
*/
case class CreateValueV1WithComment(updateValueV1: UpdateValueV1, comment: Option[String] = None)
/**
* Requests SPARQL for creating multiple values in a new, empty resource. The resource ''must'' be a new, empty
* resource, i.e. it must have no values. This message is used only internally by Knora, and is not part of the Knora
* v1 API. All pre-update checks must already have been performed before this message is sent. Specifically, the
* sender must ensure that:
*
* - The requesting user has permission to add values to the resource.
* - Each submitted value is consistent with the `knora-base:objectClassConstraint` of the property that is supposed
* to point to it.
* - The resource class has a suitable cardinality for each submitted value.
* - All required values are provided.
*
* In the collection of values to be created, standoff links in text values are allowed to point either to the IRIs
* of resources that already exist in the triplestore, or to the client's IDs for resources that are being created
* as part of a bulk import. If client resource IDs are used in standoff links, `clientResourceIDsToResourceIris`
* must map those IDs to the real IRIs of the resources that are to be created.
*
* @param projectIri the project the values belong to.
* @param resourceIri the resource the values will be attached to.
* @param resourceClassIri the IRI of the resource's OWL class.
* @param resourceIndex the index of the resource to be created
* @param values the values to be added, with optional comments.
* @param clientResourceIDsToResourceIris a map of client resource IDs (which may appear in standoff link tags
* in values) to the IRIs that will be used for those resources.
* @param userProfile the user that is creating the values.
*/
case class GenerateSparqlToCreateMultipleValuesRequestV1(projectIri: IRI,
resourceIri: IRI,
resourceClassIri: IRI,
resourceIndex: Int,
values: Map[IRI, Seq[CreateValueV1WithComment]],
clientResourceIDsToResourceIris: Map[String, IRI],
userProfile: UserProfileV1,
apiRequestID: UUID) extends ValuesResponderRequestV1
/**
* Represents a response to a [[GenerateSparqlToCreateMultipleValuesRequestV1]], providing strings that can be included
* in the `WHERE` and `INSERT` clauses of a SPARQL update operation to create the requested values. The `WHERE` clause must
* also bind the following SPARQL variables:
*
* - `?resource`: the IRI of the resource in which the values are being created.
* - `?resourceClass`: the IRI of the OWL class of that resource.
* - `?currentTime`: the return value of the SPARQL function `NOW()`.
*
* After executing the SPARQL update, the receiver can check whether the values were actually created by sending a
* [[VerifyMultipleValueCreationRequestV1]].
*
* @param whereSparql a string containing statements that must be inserted into the WHERE clause of the SPARQL
* update that will create the values.
* @param insertSparql a string containing statements that must be inserted into the INSERT clause of the SPARQL
* update that will create the values.
* @param unverifiedValues a map of property IRIs to [[UnverifiedValueV1]] objects describing
* the values that should have been created.
*/
case class GenerateSparqlToCreateMultipleValuesResponseV1(whereSparql: String,
insertSparql: String,
unverifiedValues: Map[IRI, Seq[UnverifiedValueV1]])
/**
* Represents a request to change the value of a property (by updating its version history). A successful response will
* be a [[ChangeValueResponseV1]].
*
* @param valueIri the IRI of the current value.
* @param value the new value, or [[None]] if only the value's comment is being changed.
* @param comment an optional comment on the value.
* @param userProfile the profile of the user making the request.
* @param apiRequestID the ID of this API request.
*/
case class ChangeValueRequestV1(valueIri: IRI,
value: UpdateValueV1,
comment: Option[String] = None,
userProfile: UserProfileV1,
apiRequestID: UUID) extends ValuesResponderRequestV1
/**
* Represents a request to change the comment on a value. A successful response will be a [[ChangeValueResponseV1]].
*
* @param valueIri the IRI of the current value.
* @param comment the comment to be added to the new version of the value.
* @param userProfile the profile of the user making the request.
* @param apiRequestID the ID of this API request.
*/
case class ChangeCommentRequestV1(valueIri: IRI,
comment: Option[String],
userProfile: UserProfileV1,
apiRequestID: UUID) extends ValuesResponderRequestV1
/**
* Represents a response to an [[ChangeValueRequestV1]].
*
* @param value the value that was added.
* @param comment an optional comment on the value.
* @param id the IRI of the value that was added.
*/
case class ChangeValueResponseV1(value: ApiValueV1,
comment: Option[String] = None,
id: IRI,
rights: Int) extends KnoraResponseV1 {
def toJsValue = ApiValueV1JsonProtocol.changeValueResponseV1Format.write(this)
}
/**
* Represents a request to mark a value as deleted.
*
* @param valueIri the IRI of the value to be marked as deleted.
* @param deleteComment an optional comment explaining why the value is being deleted.
* @param userProfile the profile of the user making the request.
* @param apiRequestID the ID of this API request.
*/
case class DeleteValueRequestV1(valueIri: IRI,
deleteComment: Option[String] = None,
userProfile: UserProfileV1,
apiRequestID: UUID) extends ValuesResponderRequestV1
/**
* Represents a response to a [[DeleteValueRequestV1]].
*
* @param id the IRI of the value that was marked as deleted. If this was a `LinkValue`, a new version of it
* will have been created, and `id` will the IRI of that new version. Otherwise, `id` will be the IRI
* submitted in the [[DeleteValueRequestV1]]. For an explanation of this behaviour, see the chapter
* ''Triplestore Updates'' in the Knora API server design documentation.
*/
case class DeleteValueResponseV1(id: IRI) extends KnoraResponseV1 {
def toJsValue = ApiValueV1JsonProtocol.deleteValueResponseV1Format.write(this)
}
/**
* Represents a request to change (update) the file value(s) of a given resource.
* In case of an image, two file valueshave to be changed: thumbnail and full quality.
*
* @param resourceIri the resource whose files value(s) should be changed.
* @param file the file to be created and added.
*/
case class ChangeFileValueRequestV1(resourceIri: IRI, file: SipiResponderConversionRequestV1, apiRequestID: UUID, userProfile: UserProfileV1) extends ValuesResponderRequestV1
/**
* Represents a response to a [[ChangeFileValueRequestV1]].
* Possibly, two file values have been changed (thumb and full quality).
*
* @param locations the updated file value(s).
*/
case class ChangeFileValueResponseV1(locations: Vector[LocationV1]) extends KnoraResponseV1 {
def toJsValue = ApiValueV1JsonProtocol.changeFileValueresponseV1Format.write(this)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Components of messages
/**
* The value of a Knora property, either as represented internally by Knora or as returned to clients in
* Knora API v1.
*/
sealed trait ValueV1 {
/**
* The IRI of the Knora value type corresponding to the type of this `ValueV1`.
*/
def valueTypeIri: IRI
}
/**
* The value of a Knora property as represented to clients in Knora API v1. An [[ApiValueV1]] can be serialised as
* JSON for use in the API.
*/
sealed trait ApiValueV1 extends ValueV1 with Jsonable
/**
* The value of a Knora property as represented in an update request.
*/
sealed trait UpdateValueV1 extends ValueV1 {
/**
* Returns `true` if creating this [[UpdateValueV1]] as a new value would duplicate the specified other value.
* This means that if resource `R` has property `P` with value `V1`, and `V1` is a duplicate of `V2`, the API server
* should not add another instance of property `P` with value `V2`. It does not necessarily mean that `V1 == V2`.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
def isDuplicateOfOtherValue(other: ApiValueV1): Boolean
/**
* Returns `true` if this [[UpdateValueV1]] would be redundant as a new version of an existing value. This means
* that if resource `R` has property `P` with value `V1`, and `V2` is redundant given `V1`, we should not `V2`
* as a new version of `V1`. It does not necessarily mean that `V1 == V2`.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
def isRedundant(currentVersion: ApiValueV1): Boolean
}
/**
* Represents a Knora API v1 property value object and some associated information.
*
* @param valueObjectIri the IRI of the value object.
* @param valueV1 a [[ApiValueV1]] containing the object's literal value.
*/
case class ValueObjectV1(valueObjectIri: IRI,
valueV1: ApiValueV1,
valuePermission: Option[Int] = None,
comment: Option[String] = None,
order: Int = 0)
/**
* An enumeration of the types of calendars Knora supports. Note: do not use the `withName` method to get instances
* of the values of this enumeration; use `lookup` instead, because it reports errors better.
*/
object KnoraCalendarV1 extends Enumeration {
val JULIAN = Value(0, "JULIAN")
val GREGORIAN = Value(1, "GREGORIAN")
val JEWISH = Value(2, "JEWISH")
val REVOLUTIONARY = Value(3, "REVOLUTIONARY")
val valueMap: Map[String, Value] = values.map(v => (v.toString, v)).toMap
/**
* Given the name of a value in this enumeration, returns the value. If the value is not found, throws an
* [[InconsistentTriplestoreDataException]].
*
* @param name the name of the value.
* @return the requested value.
*/
def lookup(name: String): Value = {
valueMap.get(name) match {
case Some(value) => value
case None => throw InconsistentTriplestoreDataException(s"Calendar type not supported: $name")
}
}
}
/**
* An enumeration of the types of calendar precisions Knora supports. Note: do not use the `withName` method to get instances
* of the values of this enumeration; use `lookup` instead, because it reports errors better.
*/
object KnoraPrecisionV1 extends Enumeration {
val DAY = Value(0, "DAY")
val MONTH = Value(1, "MONTH")
val YEAR = Value(2, "YEAR")
val valueMap: Map[String, Value] = values.map(v => (v.toString, v)).toMap
/**
* Given the name of a value in this enumeration, returns the value. If the value is not found, throws an
* [[InconsistentTriplestoreDataException]].
*
* @param name the name of the value.
* @return the requested value.
*/
def lookup(name: String): Value = {
valueMap.get(name) match {
case Some(value) => value
case None => throw InconsistentTriplestoreDataException(s"Calendar precision not supported: $name")
}
}
}
/**
*
* Represents a [[StandoffTagV1]] for a standoff tag of a certain type (standoff tag class) that is about to be created in the triplestore.
*
* @param standoffNode the standoff node to be created.
* @param standoffTagInstanceIri the standoff node's Iri.
* @param startParentIri the IRI of the parent of the start tag.
* @param endParentIri the IRI of the parent of the end tag, if any.
*/
case class CreateStandoffTagV1InTriplestore(standoffNode: StandoffTagV1, standoffTagInstanceIri: IRI, startParentIri: Option[IRI] = None, endParentIri: Option[IRI] = None)
sealed trait TextValueV1 {
def utf8str: String
}
/**
* Represents a textual value with additional information in standoff format.
*
* @param utf8str text in mere utf8 representation (including newlines and carriage returns).
* @param standoff attributes of the text in standoff format. For each attribute, several ranges may be given (a list of [[StandoffTagV1]]).
* @param resource_reference referred Knora resources.
* @param mapping the mapping used to create standoff from another format.
*/
case class TextValueWithStandoffV1(utf8str: String,
standoff: Seq[StandoffTagV1],
resource_reference: Set[IRI] = Set.empty[IRI],
mappingIri: IRI,
mapping: MappingXMLtoStandoff) extends TextValueV1 with UpdateValueV1 with ApiValueV1 {
val knoraIdUtil = new KnoraIdUtil
def valueTypeIri = OntologyConstants.KnoraBase.TextValue
def toJsValue = {
// TODO: depending on the given mapping, decide how serialize the text with standoff markup
val xml = StandoffTagUtilV1.convertStandoffTagV1ToXML(utf8str, standoff, mapping)
JsObject(
"xml" -> JsString(xml),
"mapping_id" -> JsString(mappingIri)
)
}
/**
* A convenience method that creates an IRI for each [[StandoffTagV1]] and resolves internal references to standoff node Iris.
*
* @return a list of [[CreateStandoffTagV1InTriplestore]] each representing a [[StandoffTagV1]] object
* along with is standoff tag class and IRI that is going to identify it in the triplestore.
*/
def prepareForSparqlInsert(valueIri: IRI): Seq[CreateStandoffTagV1InTriplestore] = {
// create an Iri for each standoff tag
// internal references to XML ids are not resolved yet
val standoffTagsWithOriginalXMLIDs: Seq[CreateStandoffTagV1InTriplestore] = standoff.map {
case (standoffNode: StandoffTagV1) =>
CreateStandoffTagV1InTriplestore(
standoffNode = standoffNode,
standoffTagInstanceIri = knoraIdUtil.makeRandomStandoffTagIri(valueIri) // generate IRI for new standoff node
)
}
// collect all the standoff tags that contain XML ids and
// map the XML ids to standoff node Iris
val iDsToStandoffNodeIris: Map[IRI, IRI] = standoffTagsWithOriginalXMLIDs.filter {
(standoffTag: CreateStandoffTagV1InTriplestore) =>
// filter those tags out that have an XML id
standoffTag.standoffNode.originalXMLID.isDefined
}.map {
(standoffTagWithID: CreateStandoffTagV1InTriplestore) =>
// return the XML id as a key and the standoff Iri as the value
standoffTagWithID.standoffNode.originalXMLID.get -> standoffTagWithID.standoffTagInstanceIri
}.toMap
// Map the start index of each tag to its IRI, so we can resolve references to parent tags as references to
// tag IRIs. We only care about start indexes here, because only hierarchical tags can be parents, and
// hierarchical tags don't have end indexes.
val startIndexesToStandoffNodeIris: Map[Int, IRI] = standoffTagsWithOriginalXMLIDs.map {
tagWithIndex => tagWithIndex.standoffNode.startIndex -> tagWithIndex.standoffTagInstanceIri
}.toMap
// resolve the original XML ids to standoff Iris every the `StandoffTagInternalReferenceAttributeV1`
val standoffTagsWithNodeReferences: Seq[CreateStandoffTagV1InTriplestore] = standoffTagsWithOriginalXMLIDs.map {
(standoffTag: CreateStandoffTagV1InTriplestore) =>
// resolve original XML ids to standoff node Iris for `StandoffTagInternalReferenceAttributeV1`
val attributesWithStandoffNodeIriReferences: Seq[StandoffTagAttributeV1] = standoffTag.standoffNode.attributes.map {
(attributeWithOriginalXMLID: StandoffTagAttributeV1) =>
attributeWithOriginalXMLID match {
case refAttr: StandoffTagInternalReferenceAttributeV1 =>
// resolve the XML id to the corresponding standoff node Iri
refAttr.copy(value = iDsToStandoffNodeIris(refAttr.value))
case attr => attr
}
}
val startParentIndex: Option[Int] = standoffTag.standoffNode.startParentIndex
val endParentIndex: Option[Int] = standoffTag.standoffNode.endParentIndex
// return standoff tag with updated attributes
standoffTag.copy(
standoffNode = standoffTag.standoffNode.copy(attributes = attributesWithStandoffNodeIriReferences),
startParentIri = startParentIndex.map(parentIndex => startIndexesToStandoffNodeIris(parentIndex)), // If there's a start parent index, get its IRI, otherwise None
endParentIri = endParentIndex.map(parentIndex => startIndexesToStandoffNodeIris(parentIndex)) // If there's an end parent index, get its IRI, otherwise None
)
}
standoffTagsWithNodeReferences
}
/**
* Returns `true` if the specified object is a [[TextValueV1]] and has the same `utf8str` as this one. We
* assume that it doesn't make sense for a resource to have two different text values associated with the
* same property, containing the same text but different markup.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case otherText: TextValueV1 =>
// unescape utf8str since it contains escaped sequences while the string returned by the triplestore does not
otherText.utf8str == InputValidation.toSparqlEncodedString(utf8str, () => throw InvalidStandoffException(s"Could not unescape utf8str $utf8str"), true)
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
override def toString = utf8str
/**
* It's OK to add a new version of a text value as long as something has been changed in it, even if it's only the markup.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case textValueSimpleV1: TextValueSimpleV1 => false
case textValueWithStandoffV1: TextValueWithStandoffV1 =>
// compare utf8str (unescape utf8str since it contains escaped sequences while the string returned by the triplestore does not)
val utf8strIdentical: Boolean = textValueWithStandoffV1.utf8str == InputValidation.toSparqlEncodedString(utf8str, () => throw InvalidStandoffException(s"Could not unescape utf8str $utf8str"), true)
// compare standoff nodes (sort them first, since the order does not make any difference )
val standoffIdentical: Boolean = textValueWithStandoffV1.standoff.sortBy(standoffNode => (standoffNode.standoffTagClassIri, standoffNode.startPosition)) == this.standoff.sortBy(standoffNode => (standoffNode.standoffTagClassIri, standoffNode.startPosition))
// TODO: at the moment, the UUID is created randomly for every new standoff tag. This means that this method always returns false.
utf8strIdentical && standoffIdentical && textValueWithStandoffV1.mappingIri == this.mappingIri
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
case class TextValueSimpleV1(utf8str: String) extends TextValueV1 with UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.TextValue
def toJsValue = {
JsObject(
"utf8str" -> JsString(utf8str)
)
}
/**
* Returns `true` if the specified object is a [[TextValueV1]] and has the same `utf8str` as this one. We
* assume that it doesn't make sense for a resource to have two different text values associated with the
* same property, containing the same text but different markup.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case otherText: TextValueV1 => otherText.utf8str == utf8str
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
override def toString = utf8str
/**
* It's OK to add a new version of a text value as long as something has been changed in it, even if it's only the markup.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case textValueSimpleV1: TextValueSimpleV1 => textValueSimpleV1 == this
case textValueWithStandoffV1: TextValueWithStandoffV1 => false
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents a direct link from one resource to another.
*
* @param targetResourceIri the IRI of the resource that the link points to.
* @param valueLabel the `rdfs:label` of the resource referred to.
* @param valueResourceClass the IRI of the OWL class of the resource that the link points to.
* @param valueResourceClassLabel the label of the OWL class of the resource that the link points to.
* @param valueResourceClassIcon the icon of the OWL class of the resource that the link points to.
*/
case class LinkV1(targetResourceIri: IRI,
valueLabel: Option[String] = None,
valueResourceClass: Option[IRI] = None,
valueResourceClassLabel: Option[String] = None,
valueResourceClassIcon: Option[String] = None) extends ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.LinkValue
override def toString = targetResourceIri
def toJsValue = JsString(targetResourceIri)
}
/**
* Represents a `knora-base:LinkValue`, i.e. a reification of a link between two resources.
*
* @param subjectIri the IRI of the resource that is the source of the link.
* @param predicateIri the IRI of the property that links the two resources.
* @param objectIri the IRI of the resource that is the target of the link.
* @param referenceCount the reference count of the `LinkValue`. If the link property is `knora-base:hasStandoffLinkTo`,
* the reference count can be any integer greater than or equal to 0. Otherwise, the reference
* count can only be 0 or 1.
*/
case class LinkValueV1(subjectIri: IRI,
predicateIri: IRI,
objectIri: IRI,
referenceCount: Int) extends ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.LinkValue
override def toJsValue = ApiValueV1JsonProtocol.linkValueV1Format.write(this)
}
/**
* Represents a request to update a link.
*
* @param targetResourceIri the IRI of the resource that the link should point to.
* @param targetExists `true` if the link target already exists, `false` if it is going to be created in the
* same transaction.
*/
case class LinkUpdateV1(targetResourceIri: IRI, targetExists: Boolean = true) extends UpdateValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.LinkValue
/**
* It doesn't make sense to add a link to a resource when we already have a link to the same resource.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case linkV1: LinkV1 => targetResourceIri == linkV1.targetResourceIri
case linkValueV1: LinkValueV1 => targetResourceIri == linkValueV1.objectIri
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
override def toString = targetResourceIri
/**
* A link isn't really changed if the new version points to the same resource as the old version.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = isDuplicateOfOtherValue(currentVersion)
}
/**
* Represents a request to create a link to a resource that hasn't been created yet, and is known only
* by the ID that the client has provided for it. Instances of this class will be replaced by instances
* of [[LinkUpdateV1]] during the preparation for the update.
*
* @param clientIDForTargetResource the client's ID for the target resource.
*/
case class LinkToClientIDUpdateV1(clientIDForTargetResource: String) extends UpdateValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.LinkValue
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = false
override def toString = clientIDForTargetResource
override def isRedundant(currentVersion: ApiValueV1): Boolean = false
}
/**
* Represents the IRI of a Knora hierarchical list.
*
* @param hierarchicalListIri the IRI of the hierarchical list.
*/
case class HierarchicalListValueV1(hierarchicalListIri: IRI) extends UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.ListValue
def toJsValue = JsString(hierarchicalListIri)
override def toString = {
// TODO: implement this correctly
// the string representation is the rdfs:label of the list node
hierarchicalListIri
}
/**
* Checks if a new list value would duplicate an existing list value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case listValueV1: HierarchicalListValueV1 => listValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of a list value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case listValueV1: HierarchicalListValueV1 => listValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents an integer value.
*
* @param ival the integer value.
*/
case class IntegerValueV1(ival: Int) extends UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.IntValue
def toJsValue = JsNumber(ival)
override def toString = ival.toString
/**
* Checks if a new integer value would duplicate an existing integer value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case integerValueV1: IntegerValueV1 => integerValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of an integer value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case integerValueV1: IntegerValueV1 => integerValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents a boolean value.
*
* @param bval the boolean value.
*/
case class BooleanValueV1(bval: Boolean) extends UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.BooleanValue
def toJsValue = JsBoolean(bval)
override def toString = bval.toString
/**
* Checks if a new boolean value would duplicate an existing boolean value. Always returns `true`, because it
* does not make sense to have two instances of the same boolean property.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = true
/**
* Checks if a new version of an boolean value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case booleanValueV1: BooleanValueV1 => booleanValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents a URI value.
*
* @param uri the URI value.
*/
case class UriValueV1(uri: String) extends UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.UriValue
def toJsValue = JsString(uri)
override def toString = uri
/**
* Checks if a new URI value would duplicate an existing URI value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case uriValueV1: UriValueV1 => uriValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of an integer value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case uriValueV1: UriValueV1 => uriValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents an arbitrary-precision decimal value.
*
* @param dval the decimal value.
*/
case class DecimalValueV1(dval: BigDecimal) extends UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.DecimalValue
def toJsValue = JsNumber(dval)
override def toString = dval.toString
/**
* Checks if a new decimal value would duplicate an existing decimal value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case decimalValueV1: DecimalValueV1 => decimalValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of a decimal value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case decimalValueV1: DecimalValueV1 => decimalValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents a time interval value.
*
* @param timeval1 an `xsd:decimal` representing the beginning of the interval.
* @param timeval2 an `xsd:decimal` representing the end of the interval.
*/
case class IntervalValueV1(timeval1: BigDecimal, timeval2: BigDecimal) extends UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.IntervalValue
def toJsValue = JsObject(
"timeval1" -> JsNumber(timeval1),
"timeval2" -> JsNumber(timeval2)
)
override def toString = s"$timeval1 - $timeval2"
/**
* Checks if a new interval value would duplicate an existing interval value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case intervalValueV1: IntervalValueV1 => intervalValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of this interval value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case intervalValueV1: IntervalValueV1 => intervalValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents a date value as a period bounded by Julian Day Numbers. Knora stores dates internally in this format.
*
* @param dateval1 the beginning of the date (a Julian day number).
* @param dateval2 the end of the date (a Julian day number).
* @param calendar the preferred calendar for representing the date.
* @param dateprecision1 the precision of the beginning of the date.
* @param dateprecision2 the precision of the end of the date.
*/
case class JulianDayNumberValueV1(dateval1: Int,
dateval2: Int,
calendar: KnoraCalendarV1.Value,
dateprecision1: KnoraPrecisionV1.Value,
dateprecision2: KnoraPrecisionV1.Value) extends UpdateValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.DateValue
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case dateValueV1: DateValueV1 => DateUtilV1.julianDayNumberValueV1ToDateValueV1(this) == other
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
override def isRedundant(currentVersion: ApiValueV1): Boolean = isDuplicateOfOtherValue(currentVersion)
// value for String representation of a date in templates.
override def toString = {
// use only precision DAY: either the date is exact (a certain day)
// or it is a period expressed as a range from one day to another.
val date1 = DateUtilV1.julianDayNumber2DateString(dateval1, calendar, KnoraPrecisionV1.DAY)
val date2 = DateUtilV1.julianDayNumber2DateString(dateval2, calendar, KnoraPrecisionV1.DAY)
// if date1 and date2 are identical, it's not a period.
if (date1 == date2) {
// one exact day
date1
} else {
// period: from to
date1 + " - " + date2
}
}
}
/**
* Represents a date value as represented in Knora API v1.
*
* A [[DateValueV1]] can represent either single date or a period with start and end dates (`dateval1` and `dateval2`).
* If it represents a single date, `dateval1` will have a value but `dateval2` will be `None`. Both `dateval1` and `dateval2`
* can indicate degrees of uncertainty, using the following formats:
*
* - `YYYY-MM-DD` specifies a particular day, with no uncertainty.
* - `YYYY-MM` indicates that the year and the month are known, but that the day of the month is uncertain. In effect, this specifies a range of possible dates, from the first day of the month to the last day of the month.
* - `YYYY` indicates that only the year is known. In effect, this specifies a range of possible dates, from the first day of the year to the last day of the year.
*
* The year and month values refer to years and months in the calendar specified by `calendar`.
*
* @param dateval1 the start date of the period.
* @param dateval2 the end date of the period, if any.
* @param calendar the type of calendar used in the date.
*/
case class DateValueV1(dateval1: String,
dateval2: String,
calendar: KnoraCalendarV1.Value) extends ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.DateValue
override def toString = {
// if date1 and date2 are identical, it's not a period.
if (dateval1 == dateval2) {
// one exact day
dateval1
} else {
// period: from to
dateval1 + " - " + dateval2
}
}
def toJsValue = ApiValueV1JsonProtocol.dateValueV1Format.write(this)
}
/**
* Represents an RGB color value.
*
* @param color a hexadecimal string containing the RGB color value.
*/
case class ColorValueV1(color: String) extends UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.ColorValue
def toJsValue = JsString(color)
override def toString = color
/**
* Checks if a new color value would equal an existing color value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case colorValueV1: ColorValueV1 => colorValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of this color value would equal the existing version of this color value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case colorValueV1: ColorValueV1 => colorValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents a geometric shape.
*
* @param geom A string containing JSON that describes the shape. TODO: don't use JSON for this (issue 169).
*/
case class GeomValueV1(geom: String) extends UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.GeomValue
def toJsValue = JsString(geom)
override def toString = geom
/**
* Checks if a new geom value would duplicate an existing geom value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case geomValueV1: GeomValueV1 => geomValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of a geom value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case geomValueV1: GeomValueV1 => geomValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents a [[http://www.geonames.org/ GeoNames]] code.
*
* @param geonameCode a string representing the GeoNames code.
*/
case class GeonameValueV1(geonameCode: String) extends UpdateValueV1 with ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.GeonameValue
def toJsValue = JsString(geonameCode)
override def toString = geonameCode
/**
* Checks if a new GeoName value would duplicate an existing GeoName value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case geonameValueV1: GeonameValueV1 => geonameValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of a GeoName value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case geonameValueV1: GeonameValueV1 => geonameValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* The data describing a binary file of any type that can be sent to Knora.
*/
sealed trait FileValueV1 extends UpdateValueV1 with ApiValueV1 {
val internalMimeType: String
val internalFilename: String
val originalFilename: String
val originalMimeType: Option[String]
}
/**
* A representation of a digital image.
*
* @param internalMimeType the MIME-type of the internal representation.
* @param internalFilename the internal filename of the object.
* @param originalFilename the original filename of the object at the time of the import.
* @param dimX the X dimension of the object.
* @param dimY the Y dimension of the object.
* @param qualityLevel the quality level of this image (higher values mean higher resolutions).
* @param qualityName a string representation of the qualityLevel
* @param isPreview indicates if the file value is used as a preview (thumbnail)
*/
case class StillImageFileValueV1(internalMimeType: String,
internalFilename: String,
originalFilename: String,
originalMimeType: Option[String] = None,
dimX: Int,
dimY: Int,
qualityLevel: Int,
qualityName: Option[String] = None,
isPreview: Boolean = false) extends FileValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.StillImageFileValue
def toJsValue = ApiValueV1JsonProtocol.stillImageFileValueV1Format.write(this)
override def toString = originalFilename
/**
* Checks if a new still image file value would duplicate an existing still image file value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case stillImageFileValueV1: StillImageFileValueV1 => stillImageFileValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of a still image file value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case stillImageFileValueV1: StillImageFileValueV1 => stillImageFileValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
case class MovingImageFileValueV1(internalMimeType: String,
internalFilename: String,
originalFilename: String,
originalMimeType: Option[String] = None) extends FileValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.MovingImageFileValue
def toJsValue = ApiValueV1JsonProtocol.movingImageFileValueV1Format.write(this)
override def toString = originalFilename
/**
* Checks if a new moving image file value would duplicate an existing moving image file value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case movingImageFileValueV1: MovingImageFileValueV1 => movingImageFileValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of a moving image file value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case movingImageFileValueV1: MovingImageFileValueV1 => movingImageFileValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
case class TextFileValueV1(internalMimeType: String,
internalFilename: String,
originalFilename: String,
originalMimeType: Option[String] = None) extends FileValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.TextFileValue
def toJsValue = ApiValueV1JsonProtocol.textFileValueV1Format.write(this)
override def toString = originalFilename
/**
* Checks if a new text file value would duplicate an existing text file value.
*
* @param other another [[ValueV1]].
* @return `true` if `other` is a duplicate of `this`.
*/
override def isDuplicateOfOtherValue(other: ApiValueV1): Boolean = {
other match {
case textFileValueV1: TextFileValueV1 => textFileValueV1 == this
case otherValue => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${otherValue.valueTypeIri}")
}
}
/**
* Checks if a new version of a text file value would be redundant given the current version of the value.
*
* @param currentVersion the current version of the value.
* @return `true` if this [[UpdateValueV1]] is redundant given `currentVersion`.
*/
override def isRedundant(currentVersion: ApiValueV1): Boolean = {
currentVersion match {
case textFileValueV1: TextFileValueV1 => textFileValueV1 == this
case other => throw InconsistentTriplestoreDataException(s"Cannot compare a $valueTypeIri to a ${other.valueTypeIri}")
}
}
}
/**
* Represents information about a version of a value.
*
* @param valueObjectIri the IRI of the version.
* @param valueCreationDate the timestamp of the version.
* @param previousValue the IRI of the previous version.
*/
case class ValueVersionV1(valueObjectIri: IRI,
valueCreationDate: Option[String],
previousValue: Option[IRI]) extends ApiValueV1 {
def valueTypeIri = OntologyConstants.KnoraBase.LinkValue
def toJsValue = ApiValueV1JsonProtocol.valueVersionV1Format.write(this)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// JSON formatting
/**
* A spray-json protocol for generating Knora API v1 JSON for property values.
*/
object ApiValueV1JsonProtocol extends SprayJsonSupport with DefaultJsonProtocol with NullOptions {
import org.knora.webapi.messages.v1.responder.resourcemessages.ResourceV1JsonProtocol._
/**
* Converts between [[KnoraCalendarV1]] objects and [[JsValue]] objects.
*/
implicit object KnoraCalendarV1JsonFormat extends JsonFormat[KnoraCalendarV1.Value] {
def read(jsonVal: JsValue): KnoraCalendarV1.Value = jsonVal match {
case JsString(str) => KnoraCalendarV1.lookup(str)
case _ => throw BadRequestException(s"Invalid calendar in JSON: $jsonVal")
}
def write(calendarV1Value: KnoraCalendarV1.Value): JsValue = JsString(calendarV1Value.toString)
}
/**
* Converts between [[KnoraPrecisionV1]] objects and [[JsValue]] objects.
*/
implicit object KnoraPrecisionV1JsonFormat extends JsonFormat[KnoraPrecisionV1.Value] {
def read(jsonVal: JsValue): KnoraPrecisionV1.Value = jsonVal match {
case JsString(str) => KnoraPrecisionV1.lookup(str)
case _ => throw BadRequestException(s"Invalid precision in JSON: $jsonVal")
}
def write(precisionV1Value: KnoraPrecisionV1.Value): JsValue = JsString(precisionV1Value.toString)
}
/**
* Converts between [[ApiValueV1]] objects and [[JsValue]] objects.
*/
implicit object ValueV1JsonFormat extends JsonFormat[ApiValueV1] {
/**
* Not implemented.
*/
def read(jsonVal: JsValue) = ???
/**
* Converts an [[ApiValueV1]] to a [[JsValue]].
*
* @param valueV1 a [[ApiValueV1]]
* @return a [[JsValue]].
*/
def write(valueV1: ApiValueV1): JsValue = valueV1.toJsValue
}
implicit val createFileQualityLevelFormat: RootJsonFormat[CreateFileQualityLevelV1] = jsonFormat4(CreateFileQualityLevelV1)
implicit val createFileV1Format: RootJsonFormat[CreateFileV1] = jsonFormat3(CreateFileV1)
implicit val valueGetResponseV1Format: RootJsonFormat[ValueGetResponseV1] = jsonFormat7(ValueGetResponseV1)
implicit val dateValueV1Format: JsonFormat[DateValueV1] = jsonFormat3(DateValueV1)
implicit val stillImageFileValueV1Format: JsonFormat[StillImageFileValueV1] = jsonFormat9(StillImageFileValueV1)
implicit val textFileValueV1Format: JsonFormat[TextFileValueV1] = jsonFormat4(TextFileValueV1)
implicit val movingImageFileValueV1Format: JsonFormat[MovingImageFileValueV1] = jsonFormat4(MovingImageFileValueV1)
implicit val valueVersionV1Format: JsonFormat[ValueVersionV1] = jsonFormat3(ValueVersionV1)
implicit val linkValueV1Format: JsonFormat[LinkValueV1] = jsonFormat4(LinkValueV1)
implicit val valueVersionHistoryGetResponseV1Format: RootJsonFormat[ValueVersionHistoryGetResponseV1] = jsonFormat1(ValueVersionHistoryGetResponseV1)
implicit val createRichtextV1Format: RootJsonFormat[CreateRichtextV1] = jsonFormat3(CreateRichtextV1)
implicit val createValueApiRequestV1Format: RootJsonFormat[CreateValueApiRequestV1] = jsonFormat15(CreateValueApiRequestV1)
implicit val createValueResponseV1Format: RootJsonFormat[CreateValueResponseV1] = jsonFormat4(CreateValueResponseV1)
implicit val changeValueApiRequestV1Format: RootJsonFormat[ChangeValueApiRequestV1] = jsonFormat13(ChangeValueApiRequestV1)
implicit val changeValueResponseV1Format: RootJsonFormat[ChangeValueResponseV1] = jsonFormat4(ChangeValueResponseV1)
implicit val deleteValueResponseV1Format: RootJsonFormat[DeleteValueResponseV1] = jsonFormat1(DeleteValueResponseV1)
implicit val changeFileValueApiRequestV1Format: RootJsonFormat[ChangeFileValueApiRequestV1] = jsonFormat1(ChangeFileValueApiRequestV1)
implicit val changeFileValueresponseV1Format: RootJsonFormat[ChangeFileValueResponseV1] = jsonFormat1(ChangeFileValueResponseV1)
}
| nie-ine/Knora | webapi/src/main/scala/org/knora/webapi/messages/v1/responder/valuemessages/ValueMessagesV1.scala | Scala | agpl-3.0 | 72,800 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util
import java.util.Properties
import kafka.api.ApiVersion
import kafka.cluster.EndPoint
import kafka.consumer.ConsumerConfig
import kafka.coordinator.OffsetConfig
import kafka.message.{BrokerCompressionCodec, CompressionCodec, Message, MessageSet}
import kafka.utils.CoreUtils
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.common.config.SaslConfigs
import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, SslConfigs}
import org.apache.kafka.common.metrics.MetricsReporter
import org.apache.kafka.common.protocol.SecurityProtocol
import org.apache.kafka.common.security.auth.PrincipalBuilder
import scala.collection.{Map, immutable}
object Defaults {
/** ********* Zookeeper Configuration ***********/
val ZkSessionTimeoutMs = 6000
val ZkSyncTimeMs = 2000
val ZkEnableSecureAcls = false
/** ********* General Configuration ***********/
val MaxReservedBrokerId = 1000
val BrokerId = -1
val MessageMaxBytes = 1000000 + MessageSet.LogOverhead
val NumNetworkThreads = 3
val NumIoThreads = 8
val BackgroundThreads = 10
val QueuedMaxRequests = 500
/************* Authorizer Configuration ***********/
val AuthorizerClassName = ""
/** ********* Socket Server Configuration ***********/
val Port = 9092
val HostName: String = new String("")
val SocketSendBufferBytes: Int = 100 * 1024
val SocketReceiveBufferBytes: Int = 100 * 1024
val SocketRequestMaxBytes: Int = 100 * 1024 * 1024
val MaxConnectionsPerIp: Int = Int.MaxValue
val MaxConnectionsPerIpOverrides: String = ""
val ConnectionsMaxIdleMs = 10 * 60 * 1000L
val RequestTimeoutMs = 30000
/** ********* Log Configuration ***********/
val NumPartitions = 1
val LogDir = "/tmp/kafka-logs"
val LogSegmentBytes = 1 * 1024 * 1024 * 1024
val LogRollHours = 24 * 7
val LogRollJitterHours = 0
val LogRetentionHours = 24 * 7
val LogRetentionBytes = -1L
val LogCleanupIntervalMs = 5 * 60 * 1000L
val Delete = "delete"
val Compact = "compact"
val LogCleanupPolicy = Delete
val LogCleanerThreads = 1
val LogCleanerIoMaxBytesPerSecond = Double.MaxValue
val LogCleanerDedupeBufferSize = 500 * 1024 * 1024L
val LogCleanerIoBufferSize = 512 * 1024
val LogCleanerDedupeBufferLoadFactor = 0.9d
val LogCleanerBackoffMs = 15 * 1000
val LogCleanerMinCleanRatio = 0.5d
val LogCleanerEnable = false
val LogCleanerDeleteRetentionMs = 24 * 60 * 60 * 1000L
val LogIndexSizeMaxBytes = 10 * 1024 * 1024
val LogIndexIntervalBytes = 4096
val LogFlushIntervalMessages = Long.MaxValue
val LogDeleteDelayMs = 60000
val LogFlushSchedulerIntervalMs = Long.MaxValue
val LogFlushOffsetCheckpointIntervalMs = 60000
val LogPreAllocateEnable = false
val NumRecoveryThreadsPerDataDir = 1
val AutoCreateTopicsEnable = true
val MinInSyncReplicas = 1
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMs = RequestTimeoutMs
val ControllerMessageQueueSize = Int.MaxValue
val DefaultReplicationFactor = 1
val ReplicaLagTimeMaxMs = 10000L
val ReplicaSocketTimeoutMs = ConsumerConfig.SocketTimeout
val ReplicaSocketReceiveBufferBytes = ConsumerConfig.SocketBufferSize
val ReplicaFetchMaxBytes = ConsumerConfig.FetchSize
val ReplicaFetchWaitMaxMs = 500
val ReplicaFetchMinBytes = 1
val NumReplicaFetchers = 1
val ReplicaFetchBackoffMs = 1000
val ReplicaHighWatermarkCheckpointIntervalMs = 5000L
val FetchPurgatoryPurgeIntervalRequests = 1000
val ProducerPurgatoryPurgeIntervalRequests = 1000
val AutoLeaderRebalanceEnable = true
val LeaderImbalancePerBrokerPercentage = 10
val LeaderImbalanceCheckIntervalSeconds = 300
val UncleanLeaderElectionEnable = true
val InterBrokerSecurityProtocol = SecurityProtocol.PLAINTEXT.toString
val InterBrokerProtocolVersion = ApiVersion.latestVersion.toString
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetries = 3
val ControlledShutdownRetryBackoffMs = 5000
val ControlledShutdownEnable = true
/** ********* Consumer coordinator configuration ***********/
val ConsumerMinSessionTimeoutMs = 6000
val ConsumerMaxSessionTimeoutMs = 30000
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSize = OffsetConfig.DefaultMaxMetadataSize
val OffsetsLoadBufferSize = OffsetConfig.DefaultLoadBufferSize
val OffsetsTopicReplicationFactor = OffsetConfig.DefaultOffsetsTopicReplicationFactor
val OffsetsTopicPartitions: Int = OffsetConfig.DefaultOffsetsTopicNumPartitions
val OffsetsTopicSegmentBytes: Int = OffsetConfig.DefaultOffsetsTopicSegmentBytes
val OffsetsTopicCompressionCodec: Int = OffsetConfig.DefaultOffsetsTopicCompressionCodec.codec
val OffsetsRetentionMinutes: Int = 24 * 60
val OffsetsRetentionCheckIntervalMs: Long = OffsetConfig.DefaultOffsetsRetentionCheckIntervalMs
val OffsetCommitTimeoutMs = OffsetConfig.DefaultOffsetCommitTimeoutMs
val OffsetCommitRequiredAcks = OffsetConfig.DefaultOffsetCommitRequiredAcks
/** ********* Quota Configuration ***********/
val ProducerQuotaBytesPerSecondDefault = ClientQuotaManagerConfig.QuotaBytesPerSecondDefault
val ConsumerQuotaBytesPerSecondDefault = ClientQuotaManagerConfig.QuotaBytesPerSecondDefault
val NumQuotaSamples: Int = ClientQuotaManagerConfig.DefaultNumQuotaSamples
val QuotaWindowSizeSeconds: Int = ClientQuotaManagerConfig.DefaultQuotaWindowSizeSeconds
val DeleteTopicEnable = false
val CompressionType = "producer"
/** ********* Kafka Metrics Configuration ***********/
val MetricNumSamples = 2
val MetricSampleWindowMs = 30000
val MetricReporterClasses = ""
/** ********* SSL configuration ***********/
val PrincipalBuilderClass = SslConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS
val SslProtocol = SslConfigs.DEFAULT_SSL_PROTOCOL
val SslEnabledProtocols = SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS
val SslKeystoreType = SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE
val SslTruststoreType = SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE
val SslKeyManagerAlgorithm = SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM
val SslTrustManagerAlgorithm = SslConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM
val SslClientAuthRequired = "required"
val SslClientAuthRequested = "requested"
val SslClientAuthNone = "none"
val SslClientAuth = SslClientAuthNone
/** ********* Sasl configuration ***********/
val SaslKerberosKinitCmd = SaslConfigs.DEFAULT_KERBEROS_KINIT_CMD
val SaslKerberosTicketRenewWindowFactor = SaslConfigs.DEFAULT_KERBEROS_TICKET_RENEW_WINDOW_FACTOR
val SaslKerberosTicketRenewJitter = SaslConfigs.DEFAULT_KERBEROS_TICKET_RENEW_JITTER
val SaslKerberosMinTimeBeforeRelogin = SaslConfigs.DEFAULT_KERBEROS_MIN_TIME_BEFORE_RELOGIN
val SaslKerberosPrincipalToLocalRules = SaslConfigs.DEFAULT_SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES
}
object KafkaConfig {
def main(args: Array[String]) {
System.out.println(configDef.toHtmlTable)
}
/** ********* Zookeeper Configuration ***********/
val ZkConnectProp = "zookeeper.connect"
val ZkSessionTimeoutMsProp = "zookeeper.session.timeout.ms"
val ZkConnectionTimeoutMsProp = "zookeeper.connection.timeout.ms"
val ZkSyncTimeMsProp = "zookeeper.sync.time.ms"
val ZkEnableSecureAclsProp = "zookeeper.set.acl"
/** ********* General Configuration ***********/
val MaxReservedBrokerIdProp = "reserved.broker.max.id"
val BrokerIdProp = "broker.id"
val MessageMaxBytesProp = "message.max.bytes"
val NumNetworkThreadsProp = "num.network.threads"
val NumIoThreadsProp = "num.io.threads"
val BackgroundThreadsProp = "background.threads"
val QueuedMaxRequestsProp = "queued.max.requests"
val RequestTimeoutMsProp = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG
/************* Authorizer Configuration ***********/
val AuthorizerClassNameProp = "authorizer.class.name"
/** ********* Socket Server Configuration ***********/
val PortProp = "port"
val HostNameProp = "host.name"
val ListenersProp = "listeners"
val AdvertisedHostNameProp: String = "advertised.host.name"
val AdvertisedPortProp = "advertised.port"
val AdvertisedListenersProp = "advertised.listeners"
val SocketSendBufferBytesProp = "socket.send.buffer.bytes"
val SocketReceiveBufferBytesProp = "socket.receive.buffer.bytes"
val SocketRequestMaxBytesProp = "socket.request.max.bytes"
val MaxConnectionsPerIpProp = "max.connections.per.ip"
val MaxConnectionsPerIpOverridesProp = "max.connections.per.ip.overrides"
val ConnectionsMaxIdleMsProp = "connections.max.idle.ms"
/** ********* Log Configuration ***********/
val NumPartitionsProp = "num.partitions"
val LogDirsProp = "log.dirs"
val LogDirProp = "log.dir"
val LogSegmentBytesProp = "log.segment.bytes"
val LogRollTimeMillisProp = "log.roll.ms"
val LogRollTimeHoursProp = "log.roll.hours"
val LogRollTimeJitterMillisProp = "log.roll.jitter.ms"
val LogRollTimeJitterHoursProp = "log.roll.jitter.hours"
val LogRetentionTimeMillisProp = "log.retention.ms"
val LogRetentionTimeMinutesProp = "log.retention.minutes"
val LogRetentionTimeHoursProp = "log.retention.hours"
val LogRetentionBytesProp = "log.retention.bytes"
val LogCleanupIntervalMsProp = "log.retention.check.interval.ms"
val LogCleanupPolicyProp = "log.cleanup.policy"
val LogCleanerThreadsProp = "log.cleaner.threads"
val LogCleanerIoMaxBytesPerSecondProp = "log.cleaner.io.max.bytes.per.second"
val LogCleanerDedupeBufferSizeProp = "log.cleaner.dedupe.buffer.size"
val LogCleanerIoBufferSizeProp = "log.cleaner.io.buffer.size"
val LogCleanerDedupeBufferLoadFactorProp = "log.cleaner.io.buffer.load.factor"
val LogCleanerBackoffMsProp = "log.cleaner.backoff.ms"
val LogCleanerMinCleanRatioProp = "log.cleaner.min.cleanable.ratio"
val LogCleanerEnableProp = "log.cleaner.enable"
val LogCleanerDeleteRetentionMsProp = "log.cleaner.delete.retention.ms"
val LogIndexSizeMaxBytesProp = "log.index.size.max.bytes"
val LogIndexIntervalBytesProp = "log.index.interval.bytes"
val LogFlushIntervalMessagesProp = "log.flush.interval.messages"
val LogDeleteDelayMsProp = "log.segment.delete.delay.ms"
val LogFlushSchedulerIntervalMsProp = "log.flush.scheduler.interval.ms"
val LogFlushIntervalMsProp = "log.flush.interval.ms"
val LogFlushOffsetCheckpointIntervalMsProp = "log.flush.offset.checkpoint.interval.ms"
val LogPreAllocateProp = "log.preallocate"
val NumRecoveryThreadsPerDataDirProp = "num.recovery.threads.per.data.dir"
val AutoCreateTopicsEnableProp = "auto.create.topics.enable"
val MinInSyncReplicasProp = "min.insync.replicas"
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMsProp = "controller.socket.timeout.ms"
val DefaultReplicationFactorProp = "default.replication.factor"
val ReplicaLagTimeMaxMsProp = "replica.lag.time.max.ms"
val ReplicaSocketTimeoutMsProp = "replica.socket.timeout.ms"
val ReplicaSocketReceiveBufferBytesProp = "replica.socket.receive.buffer.bytes"
val ReplicaFetchMaxBytesProp = "replica.fetch.max.bytes"
val ReplicaFetchWaitMaxMsProp = "replica.fetch.wait.max.ms"
val ReplicaFetchMinBytesProp = "replica.fetch.min.bytes"
val ReplicaFetchBackoffMsProp = "replica.fetch.backoff.ms"
val NumReplicaFetchersProp = "num.replica.fetchers"
val ReplicaHighWatermarkCheckpointIntervalMsProp = "replica.high.watermark.checkpoint.interval.ms"
val FetchPurgatoryPurgeIntervalRequestsProp = "fetch.purgatory.purge.interval.requests"
val ProducerPurgatoryPurgeIntervalRequestsProp = "producer.purgatory.purge.interval.requests"
val AutoLeaderRebalanceEnableProp = "auto.leader.rebalance.enable"
val LeaderImbalancePerBrokerPercentageProp = "leader.imbalance.per.broker.percentage"
val LeaderImbalanceCheckIntervalSecondsProp = "leader.imbalance.check.interval.seconds"
val UncleanLeaderElectionEnableProp = "unclean.leader.election.enable"
val InterBrokerSecurityProtocolProp = "security.inter.broker.protocol"
val InterBrokerProtocolVersionProp = "inter.broker.protocol.version"
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetriesProp = "controlled.shutdown.max.retries"
val ControlledShutdownRetryBackoffMsProp = "controlled.shutdown.retry.backoff.ms"
val ControlledShutdownEnableProp = "controlled.shutdown.enable"
/** ********* Group coordinator configuration ***********/
val GroupMinSessionTimeoutMsProp = "group.min.session.timeout.ms"
val GroupMaxSessionTimeoutMsProp = "group.max.session.timeout.ms"
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSizeProp = "offset.metadata.max.bytes"
val OffsetsLoadBufferSizeProp = "offsets.load.buffer.size"
val OffsetsTopicReplicationFactorProp = "offsets.topic.replication.factor"
val OffsetsTopicPartitionsProp = "offsets.topic.num.partitions"
val OffsetsTopicSegmentBytesProp = "offsets.topic.segment.bytes"
val OffsetsTopicCompressionCodecProp = "offsets.topic.compression.codec"
val OffsetsRetentionMinutesProp = "offsets.retention.minutes"
val OffsetsRetentionCheckIntervalMsProp = "offsets.retention.check.interval.ms"
val OffsetCommitTimeoutMsProp = "offsets.commit.timeout.ms"
val OffsetCommitRequiredAcksProp = "offsets.commit.required.acks"
/** ********* Quota Configuration ***********/
val ProducerQuotaBytesPerSecondDefaultProp = "quota.producer.default"
val ConsumerQuotaBytesPerSecondDefaultProp = "quota.consumer.default"
val NumQuotaSamplesProp = "quota.window.num"
val QuotaWindowSizeSecondsProp = "quota.window.size.seconds"
val DeleteTopicEnableProp = "delete.topic.enable"
val CompressionTypeProp = "compression.type"
/** ********* Kafka Metrics Configuration ***********/
val MetricSampleWindowMsProp = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG
val MetricNumSamplesProp: String = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG
val MetricReporterClassesProp: String = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG
/** ********* SSL Configuration ****************/
val PrincipalBuilderClassProp = SslConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG
val SslProtocolProp = SslConfigs.SSL_PROTOCOL_CONFIG
val SslProviderProp = SslConfigs.SSL_PROVIDER_CONFIG
val SslCipherSuitesProp = SslConfigs.SSL_CIPHER_SUITES_CONFIG
val SslEnabledProtocolsProp = SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG
val SslKeystoreTypeProp = SslConfigs.SSL_KEYSTORE_TYPE_CONFIG
val SslKeystoreLocationProp = SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG
val SslKeystorePasswordProp = SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG
val SslKeyPasswordProp = SslConfigs.SSL_KEY_PASSWORD_CONFIG
val SslTruststoreTypeProp = SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG
val SslTruststoreLocationProp = SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG
val SslTruststorePasswordProp = SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG
val SslKeyManagerAlgorithmProp = SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG
val SslTrustManagerAlgorithmProp = SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG
val SslEndpointIdentificationAlgorithmProp = SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG
val SslClientAuthProp = SslConfigs.SSL_CLIENT_AUTH_CONFIG
/** ********* SASL Configuration ****************/
val SaslKerberosServiceNameProp = SaslConfigs.SASL_KERBEROS_SERVICE_NAME
val SaslKerberosKinitCmdProp = SaslConfigs.SASL_KERBEROS_KINIT_CMD
val SaslKerberosTicketRenewWindowFactorProp = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR
val SaslKerberosTicketRenewJitterProp = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_JITTER
val SaslKerberosMinTimeBeforeReloginProp = SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN
val SaslKerberosPrincipalToLocalRulesProp = SaslConfigs.SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES
/* Documentation */
/** ********* Zookeeper Configuration ***********/
val ZkConnectDoc = "Zookeeper host string"
val ZkSessionTimeoutMsDoc = "Zookeeper session timeout"
val ZkConnectionTimeoutMsDoc = "The max time that the client waits to establish a connection to zookeeper. If not set, the value in " + ZkSessionTimeoutMsProp + " is used"
val ZkSyncTimeMsDoc = "How far a ZK follower can be behind a ZK leader"
val ZkEnableSecureAclsDoc = "Set client to use secure ACLs"
/** ********* General Configuration ***********/
val MaxReservedBrokerIdDoc = "Max number that can be used for a broker.id"
val BrokerIdDoc = "The broker id for this server. If unset, a unique broker id will be generated." +
"To avoid conflicts between zookeeper generated broker id's and user configured broker id's, generated broker ids" +
"start from " + MaxReservedBrokerIdProp + " + 1."
val MessageMaxBytesDoc = "The maximum size of message that the server can receive"
val NumNetworkThreadsDoc = "the number of network threads that the server uses for handling network requests"
val NumIoThreadsDoc = "The number of io threads that the server uses for carrying out network requests"
val BackgroundThreadsDoc = "The number of threads to use for various background processing tasks"
val QueuedMaxRequestsDoc = "The number of queued requests allowed before blocking the network threads"
val RequestTimeoutMsDoc = CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC
/************* Authorizer Configuration ***********/
val AuthorizerClassNameDoc = "The authorizer class that should be used for authorization"
/** ********* Socket Server Configuration ***********/
val PortDoc = "the port to listen and accept connections on"
val HostNameDoc = "hostname of broker. If this is set, it will only bind to this address. If this is not set, it will bind to all interfaces"
val ListenersDoc = "Listener List - Comma-separated list of URIs we will listen on and their protocols.\\n" +
" Specify hostname as 0.0.0.0 to bind to all interfaces.\\n" +
" Leave hostname empty to bind to default interface.\\n" +
" Examples of legal listener lists:\\n" +
" PLAINTEXT://myhost:9092,TRACE://:9091\\n" +
" PLAINTEXT://0.0.0.0:9092, TRACE://localhost:9093\\n"
val AdvertisedHostNameDoc = "Hostname to publish to ZooKeeper for clients to use. In IaaS environments, this may " +
"need to be different from the interface to which the broker binds. If this is not set, " +
"it will use the value for \\"host.name\\" if configured. Otherwise " +
"it will use the value returned from java.net.InetAddress.getCanonicalHostName()."
val AdvertisedPortDoc = "The port to publish to ZooKeeper for clients to use. In IaaS environments, this may " +
"need to be different from the port to which the broker binds. If this is not set, " +
"it will publish the same port that the broker binds to."
val AdvertisedListenersDoc = "Listeners to publish to ZooKeeper for clients to use, if different than the listeners above." +
" In IaaS environments, this may need to be different from the interface to which the broker binds." +
" If this is not set, the value for \\"listeners\\" will be used."
val SocketSendBufferBytesDoc = "The SO_SNDBUF buffer of the socket sever sockets"
val SocketReceiveBufferBytesDoc = "The SO_RCVBUF buffer of the socket sever sockets"
val SocketRequestMaxBytesDoc = "The maximum number of bytes in a socket request"
val MaxConnectionsPerIpDoc = "The maximum number of connections we allow from each ip address"
val MaxConnectionsPerIpOverridesDoc = "Per-ip or hostname overrides to the default maximum number of connections"
val ConnectionsMaxIdleMsDoc = "Idle connections timeout: the server socket processor threads close the connections that idle more than this"
/** ********* Log Configuration ***********/
val NumPartitionsDoc = "The default number of log partitions per topic"
val LogDirDoc = "The directory in which the log data is kept (supplemental for " + LogDirsProp + " property)"
val LogDirsDoc = "The directories in which the log data is kept. If not set, the value in " + LogDirProp + " is used"
val LogSegmentBytesDoc = "The maximum size of a single log file"
val LogRollTimeMillisDoc = "The maximum time before a new log segment is rolled out (in milliseconds). If not set, the value in " + LogRollTimeHoursProp + " is used"
val LogRollTimeHoursDoc = "The maximum time before a new log segment is rolled out (in hours), secondary to " + LogRollTimeMillisProp + " property"
val LogRollTimeJitterMillisDoc = "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in " + LogRollTimeJitterHoursProp + " is used"
val LogRollTimeJitterHoursDoc = "The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to " + LogRollTimeJitterMillisProp + " property"
val LogRetentionTimeMillisDoc = "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in " + LogRetentionTimeMinutesProp + " is used"
val LogRetentionTimeMinsDoc = "The number of minutes to keep a log file before deleting it (in minutes), secondary to " + LogRetentionTimeMillisProp + " property. If not set, the value in " + LogRetentionTimeHoursProp + " is used"
val LogRetentionTimeHoursDoc = "The number of hours to keep a log file before deleting it (in hours), tertiary to " + LogRetentionTimeMillisProp + " property"
val LogRetentionBytesDoc = "The maximum size of the log before deleting it"
val LogCleanupIntervalMsDoc = "The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion"
val LogCleanupPolicyDoc = "The default cleanup policy for segments beyond the retention window, must be either \\"delete\\" or \\"compact\\""
val LogCleanerThreadsDoc = "The number of background threads to use for log cleaning"
val LogCleanerIoMaxBytesPerSecondDoc = "The log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average"
val LogCleanerDedupeBufferSizeDoc = "The total memory used for log deduplication across all cleaner threads"
val LogCleanerIoBufferSizeDoc = "The total memory used for log cleaner I/O buffers across all cleaner threads"
val LogCleanerDedupeBufferLoadFactorDoc = "Log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value " +
"will allow more log to be cleaned at once but will lead to more hash collisions"
val LogCleanerBackoffMsDoc = "The amount of time to sleep when there are no logs to clean"
val LogCleanerMinCleanRatioDoc = "The minimum ratio of dirty log to total log for a log to eligible for cleaning"
val LogCleanerEnableDoc = "Should we enable log cleaning?"
val LogCleanerDeleteRetentionMsDoc = "How long are delete records retained?"
val LogIndexSizeMaxBytesDoc = "The maximum size in bytes of the offset index"
val LogIndexIntervalBytesDoc = "The interval with which we add an entry to the offset index"
val LogFlushIntervalMessagesDoc = "The number of messages accumulated on a log partition before messages are flushed to disk "
val LogDeleteDelayMsDoc = "The amount of time to wait before deleting a file from the filesystem"
val LogFlushSchedulerIntervalMsDoc = "The frequency in ms that the log flusher checks whether any log needs to be flushed to disk"
val LogFlushIntervalMsDoc = "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in " + LogFlushSchedulerIntervalMsProp + " is used"
val LogFlushOffsetCheckpointIntervalMsDoc = "The frequency with which we update the persistent record of the last flush which acts as the log recovery point"
val LogPreAllocateEnableDoc = "Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true."
val NumRecoveryThreadsPerDataDirDoc = "The number of threads per data directory to be used for log recovery at startup and flushing at shutdown"
val AutoCreateTopicsEnableDoc = "Enable auto creation of topic on the server"
val MinInSyncReplicasDoc = "define the minimum number of replicas in ISR needed to satisfy a produce request with required.acks=-1 (or all)"
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMsDoc = "The socket timeout for controller-to-broker channels"
val ControllerMessageQueueSizeDoc = "The buffer size for controller-to-broker-channels"
val DefaultReplicationFactorDoc = "default replication factors for automatically created topics"
val ReplicaLagTimeMaxMsDoc = "If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time," +
" the leader will remove the follower from isr"
val ReplicaSocketTimeoutMsDoc = "The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms"
val ReplicaSocketReceiveBufferBytesDoc = "The socket receive buffer for network requests"
val ReplicaFetchMaxBytesDoc = "The number of byes of messages to attempt to fetch"
val ReplicaFetchWaitMaxMsDoc = "max wait time for each fetcher request issued by follower replicas. This value should always be less than the " +
"replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics"
val ReplicaFetchMinBytesDoc = "Minimum bytes expected for each fetch response. If not enough bytes, wait up to replicaMaxWaitTimeMs"
val NumReplicaFetchersDoc = "Number of fetcher threads used to replicate messages from a source broker. " +
"Increasing this value can increase the degree of I/O parallelism in the follower broker."
val ReplicaFetchBackoffMsDoc = "The amount of time to sleep when fetch partition error occurs."
val ReplicaHighWatermarkCheckpointIntervalMsDoc = "The frequency with which the high watermark is saved out to disk"
val FetchPurgatoryPurgeIntervalRequestsDoc = "The purge interval (in number of requests) of the fetch request purgatory"
val ProducerPurgatoryPurgeIntervalRequestsDoc = "The purge interval (in number of requests) of the producer request purgatory"
val AutoLeaderRebalanceEnableDoc = "Enables auto leader balancing. A background thread checks and triggers leader balance if required at regular intervals"
val LeaderImbalancePerBrokerPercentageDoc = "The ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage."
val LeaderImbalanceCheckIntervalSecondsDoc = "The frequency with which the partition rebalance check is triggered by the controller"
val UncleanLeaderElectionEnableDoc = "Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss"
val InterBrokerSecurityProtocolDoc = "Security protocol used to communicate between brokers. Defaults to plain text."
val InterBrokerProtocolVersionDoc = "Specify which version of the inter-broker protocol will be used.\\n" +
" This is typically bumped after all brokers were upgraded to a new version.\\n" +
" Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1 Check ApiVersion for the full list."
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetriesDoc = "Controlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens"
val ControlledShutdownRetryBackoffMsDoc = "Before each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc). This config determines the amount of time to wait before retrying."
val ControlledShutdownEnableDoc = "Enable controlled shutdown of the server"
/** ********* Consumer coordinator configuration ***********/
val ConsumerMinSessionTimeoutMsDoc = "The minimum allowed session timeout for registered consumers"
val ConsumerMaxSessionTimeoutMsDoc = "The maximum allowed session timeout for registered consumers"
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSizeDoc = "The maximum size for a metadata entry associated with an offset commit"
val OffsetsLoadBufferSizeDoc = "Batch size for reading from the offsets segments when loading offsets into the cache."
val OffsetsTopicReplicationFactorDoc = "The replication factor for the offsets topic (set higher to ensure availability). " +
"To ensure that the effective replication factor of the offsets topic is the configured value, " +
"the number of alive brokers has to be at least the replication factor at the time of the " +
"first request for the offsets topic. If not, either the offsets topic creation will fail or " +
"it will get a replication factor of min(alive brokers, configured replication factor)"
val OffsetsTopicPartitionsDoc = "The number of partitions for the offset commit topic (should not change after deployment)"
val OffsetsTopicSegmentBytesDoc = "The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads"
val OffsetsTopicCompressionCodecDoc = "Compression codec for the offsets topic - compression may be used to achieve \\"atomic\\" commits"
val OffsetsRetentionMinutesDoc = "Log retention window in minutes for offsets topic"
val OffsetsRetentionCheckIntervalMsDoc = "Frequency at which to check for stale offsets"
val OffsetCommitTimeoutMsDoc = "Offset commit will be delayed until all replicas for the offsets topic receive the commit " +
"or this timeout is reached. This is similar to the producer request timeout."
val OffsetCommitRequiredAcksDoc = "The required acks before the commit can be accepted. In general, the default (-1) should not be overridden"
/** ********* Quota Configuration ***********/
val ProducerQuotaBytesPerSecondDefaultDoc = "Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second"
val ConsumerQuotaBytesPerSecondDefaultDoc = "Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second"
val NumQuotaSamplesDoc = "The number of samples to retain in memory"
val QuotaWindowSizeSecondsDoc = "The time span of each sample"
val DeleteTopicEnableDoc = "Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off"
val CompressionTypeDoc = "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs " +
"('gzip', 'snappy', lz4). It additionally accepts 'uncompressed' which is equivalent to no compression; and " +
"'producer' which means retain the original compression codec set by the producer."
/** ********* Kafka Metrics Configuration ***********/
val MetricSampleWindowMsDoc = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC
val MetricNumSamplesDoc = CommonClientConfigs.METRICS_NUM_SAMPLES_DOC
val MetricReporterClassesDoc = CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC
/** ********* SSL Configuration ****************/
val PrincipalBuilderClassDoc = SslConfigs.PRINCIPAL_BUILDER_CLASS_DOC
val SslProtocolDoc = SslConfigs.SSL_PROTOCOL_DOC
val SslProviderDoc = SslConfigs.SSL_PROVIDER_DOC
val SslCipherSuitesDoc = SslConfigs.SSL_CIPHER_SUITES_DOC
val SslEnabledProtocolsDoc = SslConfigs.SSL_ENABLED_PROTOCOLS_DOC
val SslKeystoreTypeDoc = SslConfigs.SSL_KEYSTORE_TYPE_DOC
val SslKeystoreLocationDoc = SslConfigs.SSL_KEYSTORE_LOCATION_DOC
val SslKeystorePasswordDoc = SslConfigs.SSL_KEYSTORE_PASSWORD_DOC
val SslKeyPasswordDoc = SslConfigs.SSL_KEY_PASSWORD_DOC
val SslTruststoreTypeDoc = SslConfigs.SSL_TRUSTSTORE_TYPE_DOC
val SslTruststorePasswordDoc = SslConfigs.SSL_TRUSTSTORE_PASSWORD_DOC
val SslTruststoreLocationDoc = SslConfigs.SSL_TRUSTSTORE_LOCATION_DOC
val SslKeyManagerAlgorithmDoc = SslConfigs.SSL_KEYMANAGER_ALGORITHM_DOC
val SslTrustManagerAlgorithmDoc = SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC
val SslEndpointIdentificationAlgorithmDoc = SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC
val SslClientAuthDoc = SslConfigs.SSL_CLIENT_AUTH_DOC
/** ********* Sasl Configuration ****************/
val SaslKerberosServiceNameDoc = SaslConfigs.SASL_KERBEROS_SERVICE_NAME_DOC
val SaslKerberosKinitCmdDoc = SaslConfigs.SASL_KERBEROS_KINIT_CMD_DOC
val SaslKerberosTicketRenewWindowFactorDoc = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR_DOC
val SaslKerberosTicketRenewJitterDoc = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_JITTER_DOC
val SaslKerberosMinTimeBeforeReloginDoc = SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN_DOC
val SaslKerberosPrincipalToLocalRulesDoc = SaslConfigs.SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC
private val configDef = {
import ConfigDef.Importance._
import ConfigDef.Range._
import ConfigDef.Type._
import ConfigDef.ValidString._
new ConfigDef()
/** ********* Zookeeper Configuration ***********/
.define(ZkConnectProp, STRING, HIGH, ZkConnectDoc)
.define(ZkSessionTimeoutMsProp, INT, Defaults.ZkSessionTimeoutMs, HIGH, ZkSessionTimeoutMsDoc)
.define(ZkConnectionTimeoutMsProp, INT, null, HIGH, ZkConnectionTimeoutMsDoc)
.define(ZkSyncTimeMsProp, INT, Defaults.ZkSyncTimeMs, LOW, ZkSyncTimeMsDoc)
.define(ZkEnableSecureAclsProp, BOOLEAN, Defaults.ZkEnableSecureAcls, HIGH, ZkEnableSecureAclsDoc)
/** ********* General Configuration ***********/
.define(MaxReservedBrokerIdProp, INT, Defaults.MaxReservedBrokerId, atLeast(0), MEDIUM, MaxReservedBrokerIdProp)
.define(BrokerIdProp, INT, Defaults.BrokerId, HIGH, BrokerIdDoc)
.define(MessageMaxBytesProp, INT, Defaults.MessageMaxBytes, atLeast(0), HIGH, MessageMaxBytesDoc)
.define(NumNetworkThreadsProp, INT, Defaults.NumNetworkThreads, atLeast(1), HIGH, NumNetworkThreadsDoc)
.define(NumIoThreadsProp, INT, Defaults.NumIoThreads, atLeast(1), HIGH, NumIoThreadsDoc)
.define(BackgroundThreadsProp, INT, Defaults.BackgroundThreads, atLeast(1), HIGH, BackgroundThreadsDoc)
.define(QueuedMaxRequestsProp, INT, Defaults.QueuedMaxRequests, atLeast(1), HIGH, QueuedMaxRequestsDoc)
.define(RequestTimeoutMsProp, INT, Defaults.RequestTimeoutMs, HIGH, RequestTimeoutMsDoc)
/************* Authorizer Configuration ***********/
.define(AuthorizerClassNameProp, STRING, Defaults.AuthorizerClassName, LOW, AuthorizerClassNameDoc)
/** ********* Socket Server Configuration ***********/
.define(PortProp, INT, Defaults.Port, HIGH, PortDoc)
.define(HostNameProp, STRING, Defaults.HostName, HIGH, HostNameDoc)
.define(ListenersProp, STRING, null, HIGH, ListenersDoc)
.define(AdvertisedHostNameProp, STRING, null, HIGH, AdvertisedHostNameDoc)
.define(AdvertisedPortProp, INT, null, HIGH, AdvertisedPortDoc)
.define(AdvertisedListenersProp, STRING, null, HIGH, AdvertisedListenersDoc)
.define(SocketSendBufferBytesProp, INT, Defaults.SocketSendBufferBytes, HIGH, SocketSendBufferBytesDoc)
.define(SocketReceiveBufferBytesProp, INT, Defaults.SocketReceiveBufferBytes, HIGH, SocketReceiveBufferBytesDoc)
.define(SocketRequestMaxBytesProp, INT, Defaults.SocketRequestMaxBytes, atLeast(1), HIGH, SocketRequestMaxBytesDoc)
.define(MaxConnectionsPerIpProp, INT, Defaults.MaxConnectionsPerIp, atLeast(1), MEDIUM, MaxConnectionsPerIpDoc)
.define(MaxConnectionsPerIpOverridesProp, STRING, Defaults.MaxConnectionsPerIpOverrides, MEDIUM, MaxConnectionsPerIpOverridesDoc)
.define(ConnectionsMaxIdleMsProp, LONG, Defaults.ConnectionsMaxIdleMs, MEDIUM, ConnectionsMaxIdleMsDoc)
/** ********* Log Configuration ***********/
.define(NumPartitionsProp, INT, Defaults.NumPartitions, atLeast(1), MEDIUM, NumPartitionsDoc)
.define(LogDirProp, STRING, Defaults.LogDir, HIGH, LogDirDoc)
.define(LogDirsProp, STRING, null, HIGH, LogDirsDoc)
.define(LogSegmentBytesProp, INT, Defaults.LogSegmentBytes, atLeast(Message.MinHeaderSize), HIGH, LogSegmentBytesDoc)
.define(LogRollTimeMillisProp, LONG, null, HIGH, LogRollTimeMillisDoc)
.define(LogRollTimeHoursProp, INT, Defaults.LogRollHours, atLeast(1), HIGH, LogRollTimeHoursDoc)
.define(LogRollTimeJitterMillisProp, LONG, null, HIGH, LogRollTimeJitterMillisDoc)
.define(LogRollTimeJitterHoursProp, INT, Defaults.LogRollJitterHours, atLeast(0), HIGH, LogRollTimeJitterHoursDoc)
.define(LogRetentionTimeMillisProp, LONG, null, HIGH, LogRetentionTimeMillisDoc)
.define(LogRetentionTimeMinutesProp, INT, null, HIGH, LogRetentionTimeMinsDoc)
.define(LogRetentionTimeHoursProp, INT, Defaults.LogRetentionHours, HIGH, LogRetentionTimeHoursDoc)
.define(LogRetentionBytesProp, LONG, Defaults.LogRetentionBytes, HIGH, LogRetentionBytesDoc)
.define(LogCleanupIntervalMsProp, LONG, Defaults.LogCleanupIntervalMs, atLeast(1), MEDIUM, LogCleanupIntervalMsDoc)
.define(LogCleanupPolicyProp, STRING, Defaults.LogCleanupPolicy, in(Defaults.Compact, Defaults.Delete), MEDIUM, LogCleanupPolicyDoc)
.define(LogCleanerThreadsProp, INT, Defaults.LogCleanerThreads, atLeast(0), MEDIUM, LogCleanerThreadsDoc)
.define(LogCleanerIoMaxBytesPerSecondProp, DOUBLE, Defaults.LogCleanerIoMaxBytesPerSecond, MEDIUM, LogCleanerIoMaxBytesPerSecondDoc)
.define(LogCleanerDedupeBufferSizeProp, LONG, Defaults.LogCleanerDedupeBufferSize, MEDIUM, LogCleanerDedupeBufferSizeDoc)
.define(LogCleanerIoBufferSizeProp, INT, Defaults.LogCleanerIoBufferSize, atLeast(0), MEDIUM, LogCleanerIoBufferSizeDoc)
.define(LogCleanerDedupeBufferLoadFactorProp, DOUBLE, Defaults.LogCleanerDedupeBufferLoadFactor, MEDIUM, LogCleanerDedupeBufferLoadFactorDoc)
.define(LogCleanerBackoffMsProp, LONG, Defaults.LogCleanerBackoffMs, atLeast(0), MEDIUM, LogCleanerBackoffMsDoc)
.define(LogCleanerMinCleanRatioProp, DOUBLE, Defaults.LogCleanerMinCleanRatio, MEDIUM, LogCleanerMinCleanRatioDoc)
.define(LogCleanerEnableProp, BOOLEAN, Defaults.LogCleanerEnable, MEDIUM, LogCleanerEnableDoc)
.define(LogCleanerDeleteRetentionMsProp, LONG, Defaults.LogCleanerDeleteRetentionMs, MEDIUM, LogCleanerDeleteRetentionMsDoc)
.define(LogIndexSizeMaxBytesProp, INT, Defaults.LogIndexSizeMaxBytes, atLeast(4), MEDIUM, LogIndexSizeMaxBytesDoc)
.define(LogIndexIntervalBytesProp, INT, Defaults.LogIndexIntervalBytes, atLeast(0), MEDIUM, LogIndexIntervalBytesDoc)
.define(LogFlushIntervalMessagesProp, LONG, Defaults.LogFlushIntervalMessages, atLeast(1), HIGH, LogFlushIntervalMessagesDoc)
.define(LogDeleteDelayMsProp, LONG, Defaults.LogDeleteDelayMs, atLeast(0), HIGH, LogDeleteDelayMsDoc)
.define(LogFlushSchedulerIntervalMsProp, LONG, Defaults.LogFlushSchedulerIntervalMs, HIGH, LogFlushSchedulerIntervalMsDoc)
.define(LogFlushIntervalMsProp, LONG, null, HIGH, LogFlushIntervalMsDoc)
.define(LogFlushOffsetCheckpointIntervalMsProp, INT, Defaults.LogFlushOffsetCheckpointIntervalMs, atLeast(0), HIGH, LogFlushOffsetCheckpointIntervalMsDoc)
.define(LogPreAllocateProp, BOOLEAN, Defaults.LogPreAllocateEnable, MEDIUM, LogPreAllocateEnableDoc)
.define(NumRecoveryThreadsPerDataDirProp, INT, Defaults.NumRecoveryThreadsPerDataDir, atLeast(1), HIGH, NumRecoveryThreadsPerDataDirDoc)
.define(AutoCreateTopicsEnableProp, BOOLEAN, Defaults.AutoCreateTopicsEnable, HIGH, AutoCreateTopicsEnableDoc)
.define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), HIGH, MinInSyncReplicasDoc)
/** ********* Replication configuration ***********/
.define(ControllerSocketTimeoutMsProp, INT, Defaults.ControllerSocketTimeoutMs, MEDIUM, ControllerSocketTimeoutMsDoc)
.define(DefaultReplicationFactorProp, INT, Defaults.DefaultReplicationFactor, MEDIUM, DefaultReplicationFactorDoc)
.define(ReplicaLagTimeMaxMsProp, LONG, Defaults.ReplicaLagTimeMaxMs, HIGH, ReplicaLagTimeMaxMsDoc)
.define(ReplicaSocketTimeoutMsProp, INT, Defaults.ReplicaSocketTimeoutMs, HIGH, ReplicaSocketTimeoutMsDoc)
.define(ReplicaSocketReceiveBufferBytesProp, INT, Defaults.ReplicaSocketReceiveBufferBytes, HIGH, ReplicaSocketReceiveBufferBytesDoc)
.define(ReplicaFetchMaxBytesProp, INT, Defaults.ReplicaFetchMaxBytes, HIGH, ReplicaFetchMaxBytesDoc)
.define(ReplicaFetchWaitMaxMsProp, INT, Defaults.ReplicaFetchWaitMaxMs, HIGH, ReplicaFetchWaitMaxMsDoc)
.define(ReplicaFetchBackoffMsProp, INT, Defaults.ReplicaFetchBackoffMs, atLeast(0), MEDIUM, ReplicaFetchBackoffMsDoc)
.define(ReplicaFetchMinBytesProp, INT, Defaults.ReplicaFetchMinBytes, HIGH, ReplicaFetchMinBytesDoc)
.define(NumReplicaFetchersProp, INT, Defaults.NumReplicaFetchers, HIGH, NumReplicaFetchersDoc)
.define(ReplicaHighWatermarkCheckpointIntervalMsProp, LONG, Defaults.ReplicaHighWatermarkCheckpointIntervalMs, HIGH, ReplicaHighWatermarkCheckpointIntervalMsDoc)
.define(FetchPurgatoryPurgeIntervalRequestsProp, INT, Defaults.FetchPurgatoryPurgeIntervalRequests, MEDIUM, FetchPurgatoryPurgeIntervalRequestsDoc)
.define(ProducerPurgatoryPurgeIntervalRequestsProp, INT, Defaults.ProducerPurgatoryPurgeIntervalRequests, MEDIUM, ProducerPurgatoryPurgeIntervalRequestsDoc)
.define(AutoLeaderRebalanceEnableProp, BOOLEAN, Defaults.AutoLeaderRebalanceEnable, HIGH, AutoLeaderRebalanceEnableDoc)
.define(LeaderImbalancePerBrokerPercentageProp, INT, Defaults.LeaderImbalancePerBrokerPercentage, HIGH, LeaderImbalancePerBrokerPercentageDoc)
.define(LeaderImbalanceCheckIntervalSecondsProp, LONG, Defaults.LeaderImbalanceCheckIntervalSeconds, HIGH, LeaderImbalanceCheckIntervalSecondsDoc)
.define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable, HIGH, UncleanLeaderElectionEnableDoc)
.define(InterBrokerSecurityProtocolProp, STRING, Defaults.InterBrokerSecurityProtocol, MEDIUM, InterBrokerSecurityProtocolDoc)
.define(InterBrokerProtocolVersionProp, STRING, Defaults.InterBrokerProtocolVersion, MEDIUM, InterBrokerProtocolVersionDoc)
/** ********* Controlled shutdown configuration ***********/
.define(ControlledShutdownMaxRetriesProp, INT, Defaults.ControlledShutdownMaxRetries, MEDIUM, ControlledShutdownMaxRetriesDoc)
.define(ControlledShutdownRetryBackoffMsProp, LONG, Defaults.ControlledShutdownRetryBackoffMs, MEDIUM, ControlledShutdownRetryBackoffMsDoc)
.define(ControlledShutdownEnableProp, BOOLEAN, Defaults.ControlledShutdownEnable, MEDIUM, ControlledShutdownEnableDoc)
/** ********* Consumer coordinator configuration ***********/
.define(GroupMinSessionTimeoutMsProp, INT, Defaults.ConsumerMinSessionTimeoutMs, MEDIUM, ConsumerMinSessionTimeoutMsDoc)
.define(GroupMaxSessionTimeoutMsProp, INT, Defaults.ConsumerMaxSessionTimeoutMs, MEDIUM, ConsumerMaxSessionTimeoutMsDoc)
/** ********* Offset management configuration ***********/
.define(OffsetMetadataMaxSizeProp, INT, Defaults.OffsetMetadataMaxSize, HIGH, OffsetMetadataMaxSizeDoc)
.define(OffsetsLoadBufferSizeProp, INT, Defaults.OffsetsLoadBufferSize, atLeast(1), HIGH, OffsetsLoadBufferSizeDoc)
.define(OffsetsTopicReplicationFactorProp, SHORT, Defaults.OffsetsTopicReplicationFactor, atLeast(1), HIGH, OffsetsTopicReplicationFactorDoc)
.define(OffsetsTopicPartitionsProp, INT, Defaults.OffsetsTopicPartitions, atLeast(1), HIGH, OffsetsTopicPartitionsDoc)
.define(OffsetsTopicSegmentBytesProp, INT, Defaults.OffsetsTopicSegmentBytes, atLeast(1), HIGH, OffsetsTopicSegmentBytesDoc)
.define(OffsetsTopicCompressionCodecProp, INT, Defaults.OffsetsTopicCompressionCodec, HIGH, OffsetsTopicCompressionCodecDoc)
.define(OffsetsRetentionMinutesProp, INT, Defaults.OffsetsRetentionMinutes, atLeast(1), HIGH, OffsetsRetentionMinutesDoc)
.define(OffsetsRetentionCheckIntervalMsProp, LONG, Defaults.OffsetsRetentionCheckIntervalMs, atLeast(1), HIGH, OffsetsRetentionCheckIntervalMsDoc)
.define(OffsetCommitTimeoutMsProp, INT, Defaults.OffsetCommitTimeoutMs, atLeast(1), HIGH, OffsetCommitTimeoutMsDoc)
.define(OffsetCommitRequiredAcksProp, SHORT, Defaults.OffsetCommitRequiredAcks, HIGH, OffsetCommitRequiredAcksDoc)
.define(DeleteTopicEnableProp, BOOLEAN, Defaults.DeleteTopicEnable, HIGH, DeleteTopicEnableDoc)
.define(CompressionTypeProp, STRING, Defaults.CompressionType, HIGH, CompressionTypeDoc)
/** ********* Kafka Metrics Configuration ***********/
.define(MetricNumSamplesProp, INT, Defaults.MetricNumSamples, atLeast(1), LOW, MetricNumSamplesDoc)
.define(MetricSampleWindowMsProp, LONG, Defaults.MetricSampleWindowMs, atLeast(1), LOW, MetricSampleWindowMsDoc)
.define(MetricReporterClassesProp, LIST, Defaults.MetricReporterClasses, LOW, MetricReporterClassesDoc)
/** ********* Quota configuration ***********/
.define(ProducerQuotaBytesPerSecondDefaultProp, LONG, Defaults.ProducerQuotaBytesPerSecondDefault, atLeast(1), HIGH, ProducerQuotaBytesPerSecondDefaultDoc)
.define(ConsumerQuotaBytesPerSecondDefaultProp, LONG, Defaults.ConsumerQuotaBytesPerSecondDefault, atLeast(1), HIGH, ConsumerQuotaBytesPerSecondDefaultDoc)
.define(NumQuotaSamplesProp, INT, Defaults.NumQuotaSamples, atLeast(1), LOW, NumQuotaSamplesDoc)
.define(QuotaWindowSizeSecondsProp, INT, Defaults.QuotaWindowSizeSeconds, atLeast(1), LOW, QuotaWindowSizeSecondsDoc)
/** ********* SSL Configuration ****************/
.define(PrincipalBuilderClassProp, CLASS, Defaults.PrincipalBuilderClass, MEDIUM, PrincipalBuilderClassDoc)
.define(SslProtocolProp, STRING, Defaults.SslProtocol, MEDIUM, SslProtocolDoc)
.define(SslProviderProp, STRING, null, MEDIUM, SslProviderDoc)
.define(SslEnabledProtocolsProp, LIST, Defaults.SslEnabledProtocols, MEDIUM, SslEnabledProtocolsDoc)
.define(SslKeystoreTypeProp, STRING, Defaults.SslKeystoreType, MEDIUM, SslKeystoreTypeDoc)
.define(SslKeystoreLocationProp, STRING, null, MEDIUM, SslKeystoreLocationDoc)
.define(SslKeystorePasswordProp, PASSWORD, null, MEDIUM, SslKeystorePasswordDoc)
.define(SslKeyPasswordProp, PASSWORD, null, MEDIUM, SslKeyPasswordDoc)
.define(SslTruststoreTypeProp, STRING, Defaults.SslTruststoreType, MEDIUM, SslTruststoreTypeDoc)
.define(SslTruststoreLocationProp, STRING, null, MEDIUM, SslTruststoreLocationDoc)
.define(SslTruststorePasswordProp, PASSWORD, null, MEDIUM, SslTruststorePasswordDoc)
.define(SslKeyManagerAlgorithmProp, STRING, Defaults.SslKeyManagerAlgorithm, MEDIUM, SslKeyManagerAlgorithmDoc)
.define(SslTrustManagerAlgorithmProp, STRING, Defaults.SslTrustManagerAlgorithm, MEDIUM, SslTrustManagerAlgorithmDoc)
.define(SslEndpointIdentificationAlgorithmProp, STRING, null, LOW, SslEndpointIdentificationAlgorithmDoc)
.define(SslClientAuthProp, STRING, Defaults.SslClientAuth, in(Defaults.SslClientAuthRequired, Defaults.SslClientAuthRequested, Defaults.SslClientAuthNone), MEDIUM, SslClientAuthDoc)
.define(SslCipherSuitesProp, LIST, null, MEDIUM, SslCipherSuitesDoc)
/** ********* Sasl Configuration ****************/
.define(SaslKerberosServiceNameProp, STRING, null, MEDIUM, SaslKerberosServiceNameDoc)
.define(SaslKerberosKinitCmdProp, STRING, Defaults.SaslKerberosKinitCmd, MEDIUM, SaslKerberosKinitCmdDoc)
.define(SaslKerberosTicketRenewWindowFactorProp, DOUBLE, Defaults.SaslKerberosTicketRenewWindowFactor, MEDIUM, SaslKerberosTicketRenewWindowFactorDoc)
.define(SaslKerberosTicketRenewJitterProp, DOUBLE, Defaults.SaslKerberosTicketRenewJitter, MEDIUM, SaslKerberosTicketRenewJitterDoc)
.define(SaslKerberosMinTimeBeforeReloginProp, LONG, Defaults.SaslKerberosMinTimeBeforeRelogin, MEDIUM, SaslKerberosMinTimeBeforeReloginDoc)
.define(SaslKerberosPrincipalToLocalRulesProp, LIST, Defaults.SaslKerberosPrincipalToLocalRules, MEDIUM, SaslKerberosPrincipalToLocalRulesDoc)
}
def configNames() = {
import scala.collection.JavaConversions._
configDef.names().toList.sorted
}
/**
* Check that property names are valid
*/
def validateNames(props: Properties) {
import scala.collection.JavaConversions._
val names = configDef.names()
for (name <- props.keys)
require(names.contains(name), "Unknown configuration \\"%s\\".".format(name))
}
def fromProps(props: Properties): KafkaConfig = {
KafkaConfig(props)
}
def fromProps(defaults: Properties, overrides: Properties): KafkaConfig = {
val props = new Properties()
props.putAll(defaults)
props.putAll(overrides)
fromProps(props)
}
}
case class KafkaConfig (props: java.util.Map[_, _]) extends AbstractConfig(KafkaConfig.configDef, props) {
/** ********* Zookeeper Configuration ***********/
val zkConnect: String = getString(KafkaConfig.ZkConnectProp)
val zkSessionTimeoutMs: Int = getInt(KafkaConfig.ZkSessionTimeoutMsProp)
val zkConnectionTimeoutMs: Int =
Option(getInt(KafkaConfig.ZkConnectionTimeoutMsProp)).map(_.toInt).getOrElse(getInt(KafkaConfig.ZkSessionTimeoutMsProp))
val zkSyncTimeMs: Int = getInt(KafkaConfig.ZkSyncTimeMsProp)
val zkEnableSecureAcls: Boolean = getBoolean(KafkaConfig.ZkEnableSecureAclsProp)
/** ********* General Configuration ***********/
val maxReservedBrokerId: Int = getInt(KafkaConfig.MaxReservedBrokerIdProp)
var brokerId: Int = getInt(KafkaConfig.BrokerIdProp)
val numNetworkThreads = getInt(KafkaConfig.NumNetworkThreadsProp)
val backgroundThreads = getInt(KafkaConfig.BackgroundThreadsProp)
val queuedMaxRequests = getInt(KafkaConfig.QueuedMaxRequestsProp)
val numIoThreads = getInt(KafkaConfig.NumIoThreadsProp)
val messageMaxBytes = getInt(KafkaConfig.MessageMaxBytesProp)
val requestTimeoutMs = getInt(KafkaConfig.RequestTimeoutMsProp)
/************* Authorizer Configuration ***********/
val authorizerClassName: String = getString(KafkaConfig.AuthorizerClassNameProp)
/** ********* Socket Server Configuration ***********/
val hostName = getString(KafkaConfig.HostNameProp)
val port = getInt(KafkaConfig.PortProp)
val advertisedHostName = Option(getString(KafkaConfig.AdvertisedHostNameProp)).getOrElse(hostName)
val advertisedPort: java.lang.Integer = Option(getInt(KafkaConfig.AdvertisedPortProp)).getOrElse(port)
val socketSendBufferBytes = getInt(KafkaConfig.SocketSendBufferBytesProp)
val socketReceiveBufferBytes = getInt(KafkaConfig.SocketReceiveBufferBytesProp)
val socketRequestMaxBytes = getInt(KafkaConfig.SocketRequestMaxBytesProp)
val maxConnectionsPerIp = getInt(KafkaConfig.MaxConnectionsPerIpProp)
val maxConnectionsPerIpOverrides: Map[String, Int] =
getMap(KafkaConfig.MaxConnectionsPerIpOverridesProp, getString(KafkaConfig.MaxConnectionsPerIpOverridesProp)).map { case (k, v) => (k, v.toInt)}
val connectionsMaxIdleMs = getLong(KafkaConfig.ConnectionsMaxIdleMsProp)
/** ********* Log Configuration ***********/
val autoCreateTopicsEnable = getBoolean(KafkaConfig.AutoCreateTopicsEnableProp)
val numPartitions = getInt(KafkaConfig.NumPartitionsProp)
val logDirs = CoreUtils.parseCsvList( Option(getString(KafkaConfig.LogDirsProp)).getOrElse(getString(KafkaConfig.LogDirProp)))
val logSegmentBytes = getInt(KafkaConfig.LogSegmentBytesProp)
val logFlushIntervalMessages = getLong(KafkaConfig.LogFlushIntervalMessagesProp)
val logCleanerThreads = getInt(KafkaConfig.LogCleanerThreadsProp)
val numRecoveryThreadsPerDataDir = getInt(KafkaConfig.NumRecoveryThreadsPerDataDirProp)
val logFlushSchedulerIntervalMs = getLong(KafkaConfig.LogFlushSchedulerIntervalMsProp)
val logFlushOffsetCheckpointIntervalMs = getInt(KafkaConfig.LogFlushOffsetCheckpointIntervalMsProp).toLong
val logCleanupIntervalMs = getLong(KafkaConfig.LogCleanupIntervalMsProp)
val logCleanupPolicy = getString(KafkaConfig.LogCleanupPolicyProp)
val offsetsRetentionMinutes = getInt(KafkaConfig.OffsetsRetentionMinutesProp)
val offsetsRetentionCheckIntervalMs = getLong(KafkaConfig.OffsetsRetentionCheckIntervalMsProp)
val logRetentionBytes = getLong(KafkaConfig.LogRetentionBytesProp)
val logCleanerDedupeBufferSize = getLong(KafkaConfig.LogCleanerDedupeBufferSizeProp)
val logCleanerDedupeBufferLoadFactor = getDouble(KafkaConfig.LogCleanerDedupeBufferLoadFactorProp)
val logCleanerIoBufferSize = getInt(KafkaConfig.LogCleanerIoBufferSizeProp)
val logCleanerIoMaxBytesPerSecond = getDouble(KafkaConfig.LogCleanerIoMaxBytesPerSecondProp)
val logCleanerDeleteRetentionMs = getLong(KafkaConfig.LogCleanerDeleteRetentionMsProp)
val logCleanerBackoffMs = getLong(KafkaConfig.LogCleanerBackoffMsProp)
val logCleanerMinCleanRatio = getDouble(KafkaConfig.LogCleanerMinCleanRatioProp)
val logCleanerEnable = getBoolean(KafkaConfig.LogCleanerEnableProp)
val logIndexSizeMaxBytes = getInt(KafkaConfig.LogIndexSizeMaxBytesProp)
val logIndexIntervalBytes = getInt(KafkaConfig.LogIndexIntervalBytesProp)
val logDeleteDelayMs = getLong(KafkaConfig.LogDeleteDelayMsProp)
val logRollTimeMillis: java.lang.Long = Option(getLong(KafkaConfig.LogRollTimeMillisProp)).getOrElse(60 * 60 * 1000L * getInt(KafkaConfig.LogRollTimeHoursProp))
val logRollTimeJitterMillis: java.lang.Long = Option(getLong(KafkaConfig.LogRollTimeJitterMillisProp)).getOrElse(60 * 60 * 1000L * getInt(KafkaConfig.LogRollTimeJitterHoursProp))
val logFlushIntervalMs: java.lang.Long = Option(getLong(KafkaConfig.LogFlushIntervalMsProp)).getOrElse(getLong(KafkaConfig.LogFlushSchedulerIntervalMsProp))
val logRetentionTimeMillis = getLogRetentionTimeMillis
val minInSyncReplicas = getInt(KafkaConfig.MinInSyncReplicasProp)
val logPreAllocateEnable: java.lang.Boolean = getBoolean(KafkaConfig.LogPreAllocateProp)
/** ********* Replication configuration ***********/
val controllerSocketTimeoutMs: Int = getInt(KafkaConfig.ControllerSocketTimeoutMsProp)
val defaultReplicationFactor: Int = getInt(KafkaConfig.DefaultReplicationFactorProp)
val replicaLagTimeMaxMs = getLong(KafkaConfig.ReplicaLagTimeMaxMsProp)
val replicaSocketTimeoutMs = getInt(KafkaConfig.ReplicaSocketTimeoutMsProp)
val replicaSocketReceiveBufferBytes = getInt(KafkaConfig.ReplicaSocketReceiveBufferBytesProp)
val replicaFetchMaxBytes = getInt(KafkaConfig.ReplicaFetchMaxBytesProp)
val replicaFetchWaitMaxMs = getInt(KafkaConfig.ReplicaFetchWaitMaxMsProp)
val replicaFetchMinBytes = getInt(KafkaConfig.ReplicaFetchMinBytesProp)
val replicaFetchBackoffMs = getInt(KafkaConfig.ReplicaFetchBackoffMsProp)
val numReplicaFetchers = getInt(KafkaConfig.NumReplicaFetchersProp)
val replicaHighWatermarkCheckpointIntervalMs = getLong(KafkaConfig.ReplicaHighWatermarkCheckpointIntervalMsProp)
val fetchPurgatoryPurgeIntervalRequests = getInt(KafkaConfig.FetchPurgatoryPurgeIntervalRequestsProp)
val producerPurgatoryPurgeIntervalRequests = getInt(KafkaConfig.ProducerPurgatoryPurgeIntervalRequestsProp)
val autoLeaderRebalanceEnable = getBoolean(KafkaConfig.AutoLeaderRebalanceEnableProp)
val leaderImbalancePerBrokerPercentage = getInt(KafkaConfig.LeaderImbalancePerBrokerPercentageProp)
val leaderImbalanceCheckIntervalSeconds = getLong(KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp)
val uncleanLeaderElectionEnable: java.lang.Boolean = getBoolean(KafkaConfig.UncleanLeaderElectionEnableProp)
val interBrokerSecurityProtocol = SecurityProtocol.valueOf(getString(KafkaConfig.InterBrokerSecurityProtocolProp))
val interBrokerProtocolVersion = ApiVersion(getString(KafkaConfig.InterBrokerProtocolVersionProp))
/** ********* Controlled shutdown configuration ***********/
val controlledShutdownMaxRetries = getInt(KafkaConfig.ControlledShutdownMaxRetriesProp)
val controlledShutdownRetryBackoffMs = getLong(KafkaConfig.ControlledShutdownRetryBackoffMsProp)
val controlledShutdownEnable = getBoolean(KafkaConfig.ControlledShutdownEnableProp)
/** ********* Group coordinator configuration ***********/
val groupMinSessionTimeoutMs = getInt(KafkaConfig.GroupMinSessionTimeoutMsProp)
val groupMaxSessionTimeoutMs = getInt(KafkaConfig.GroupMaxSessionTimeoutMsProp)
/** ********* Offset management configuration ***********/
val offsetMetadataMaxSize = getInt(KafkaConfig.OffsetMetadataMaxSizeProp)
val offsetsLoadBufferSize = getInt(KafkaConfig.OffsetsLoadBufferSizeProp)
val offsetsTopicReplicationFactor = getShort(KafkaConfig.OffsetsTopicReplicationFactorProp)
val offsetsTopicPartitions = getInt(KafkaConfig.OffsetsTopicPartitionsProp)
val offsetCommitTimeoutMs = getInt(KafkaConfig.OffsetCommitTimeoutMsProp)
val offsetCommitRequiredAcks = getShort(KafkaConfig.OffsetCommitRequiredAcksProp)
val offsetsTopicSegmentBytes = getInt(KafkaConfig.OffsetsTopicSegmentBytesProp)
val offsetsTopicCompressionCodec = Option(getInt(KafkaConfig.OffsetsTopicCompressionCodecProp)).map(value => CompressionCodec.getCompressionCodec(value)).orNull
/** ********* Metric Configuration **************/
val metricNumSamples = getInt(KafkaConfig.MetricNumSamplesProp)
val metricSampleWindowMs = getLong(KafkaConfig.MetricSampleWindowMsProp)
val metricReporterClasses: java.util.List[MetricsReporter] = getConfiguredInstances(KafkaConfig.MetricReporterClassesProp, classOf[MetricsReporter])
/** ********* SSL Configuration **************/
val principalBuilderClass = getClass(KafkaConfig.PrincipalBuilderClassProp)
val sslProtocol = getString(KafkaConfig.SslProtocolProp)
val sslProvider = getString(KafkaConfig.SslProviderProp)
val sslEnabledProtocols = getList(KafkaConfig.SslEnabledProtocolsProp)
val sslKeystoreType = getString(KafkaConfig.SslKeystoreTypeProp)
val sslKeystoreLocation = getString(KafkaConfig.SslKeystoreLocationProp)
val sslKeystorePassword = getPassword(KafkaConfig.SslKeystorePasswordProp)
val sslKeyPassword = getPassword(KafkaConfig.SslKeyPasswordProp)
val sslTruststoreType = getString(KafkaConfig.SslTruststoreTypeProp)
val sslTruststoreLocation = getString(KafkaConfig.SslTruststoreLocationProp)
val sslTruststorePassword = getPassword(KafkaConfig.SslTruststorePasswordProp)
val sslKeyManagerAlgorithm = getString(KafkaConfig.SslKeyManagerAlgorithmProp)
val sslTrustManagerAlgorithm = getString(KafkaConfig.SslTrustManagerAlgorithmProp)
val sslClientAuth = getString(KafkaConfig.SslClientAuthProp)
val sslCipher = getList(KafkaConfig.SslCipherSuitesProp)
/** ********* Sasl Configuration **************/
val saslKerberosServiceName = getString(KafkaConfig.SaslKerberosServiceNameProp)
val saslKerberosKinitCmd = getString(KafkaConfig.SaslKerberosKinitCmdProp)
val saslKerberosTicketRenewWindowFactor = getDouble(KafkaConfig.SaslKerberosTicketRenewWindowFactorProp)
val saslKerberosTicketRenewJitter = getDouble(KafkaConfig.SaslKerberosTicketRenewJitterProp)
val saslKerberosMinTimeBeforeRelogin = getLong(KafkaConfig.SaslKerberosMinTimeBeforeReloginProp)
val saslKerberosPrincipalToLocalRules = getList(KafkaConfig.SaslKerberosPrincipalToLocalRulesProp)
/** ********* Quota Configuration **************/
val producerQuotaBytesPerSecondDefault = getLong(KafkaConfig.ProducerQuotaBytesPerSecondDefaultProp)
val consumerQuotaBytesPerSecondDefault = getLong(KafkaConfig.ConsumerQuotaBytesPerSecondDefaultProp)
val numQuotaSamples = getInt(KafkaConfig.NumQuotaSamplesProp)
val quotaWindowSizeSeconds = getInt(KafkaConfig.QuotaWindowSizeSecondsProp)
val deleteTopicEnable = getBoolean(KafkaConfig.DeleteTopicEnableProp)
val compressionType = getString(KafkaConfig.CompressionTypeProp)
val listeners = getListeners
val advertisedListeners = getAdvertisedListeners
private def getLogRetentionTimeMillis: Long = {
val millisInMinute = 60L * 1000L
val millisInHour = 60L * millisInMinute
val millis: java.lang.Long =
Option(getLong(KafkaConfig.LogRetentionTimeMillisProp)).getOrElse(
Option(getInt(KafkaConfig.LogRetentionTimeMinutesProp)) match {
case Some(mins) => millisInMinute * mins
case None => getInt(KafkaConfig.LogRetentionTimeHoursProp) * millisInHour
})
if (millis < 0) return -1
millis
}
private def getMap(propName: String, propValue: String): Map[String, String] = {
try {
CoreUtils.parseCsvMap(propValue)
} catch {
case e: Exception => throw new IllegalArgumentException("Error parsing configuration property '%s': %s".format(propName, e.getMessage))
}
}
private def validateUniquePortAndProtocol(listeners: String) {
val endpoints = try {
val listenerList = CoreUtils.parseCsvList(listeners)
listenerList.map(listener => EndPoint.createEndPoint(listener))
} catch {
case e: Exception => throw new IllegalArgumentException("Error creating broker listeners from '%s': %s".format(listeners, e.getMessage))
}
// filter port 0 for unit tests
val endpointsWithoutZeroPort = endpoints.map(ep => ep.port).filter(_ != 0)
val distinctPorts = endpointsWithoutZeroPort.distinct
val distinctProtocols = endpoints.map(ep => ep.protocolType).distinct
require(distinctPorts.size == endpointsWithoutZeroPort.size, "Each listener must have a different port")
require(distinctProtocols.size == endpoints.size, "Each listener must have a different protocol")
}
// If the user did not define listeners but did define host or port, let's use them in backward compatible way
// If none of those are defined, we default to PLAINTEXT://:9092
private def getListeners(): immutable.Map[SecurityProtocol, EndPoint] = {
if (getString(KafkaConfig.ListenersProp) != null) {
validateUniquePortAndProtocol(getString(KafkaConfig.ListenersProp))
CoreUtils.listenerListToEndPoints(getString(KafkaConfig.ListenersProp))
} else {
CoreUtils.listenerListToEndPoints("PLAINTEXT://" + hostName + ":" + port)
}
}
// If the user defined advertised listeners, we use those
// If he didn't but did define advertised host or port, we'll use those and fill in the missing value from regular host / port or defaults
// If none of these are defined, we'll use the listeners
private def getAdvertisedListeners(): immutable.Map[SecurityProtocol, EndPoint] = {
if (getString(KafkaConfig.AdvertisedListenersProp) != null) {
validateUniquePortAndProtocol(getString(KafkaConfig.AdvertisedListenersProp))
CoreUtils.listenerListToEndPoints(getString(KafkaConfig.AdvertisedListenersProp))
} else if (getString(KafkaConfig.AdvertisedHostNameProp) != null || getInt(KafkaConfig.AdvertisedPortProp) != null) {
CoreUtils.listenerListToEndPoints("PLAINTEXT://" + advertisedHostName + ":" + advertisedPort)
} else {
getListeners()
}
}
private def getMetricClasses(metricClasses: java.util.List[String]): java.util.List[MetricsReporter] = {
val reporterList = new util.ArrayList[MetricsReporter]()
val iterator = metricClasses.iterator()
while (iterator.hasNext) {
val reporterName = iterator.next()
if (!reporterName.isEmpty) {
val reporter: MetricsReporter = CoreUtils.createObject[MetricsReporter](reporterName)
reporter.configure(originals)
reporterList.add(reporter)
}
}
reporterList
}
private def getPrincipalBuilderClass(principalBuilderClass: String): PrincipalBuilder = {
CoreUtils.createObject[PrincipalBuilder](principalBuilderClass)
}
validateValues()
private def validateValues() {
require(brokerId >= -1 && brokerId <= maxReservedBrokerId, "broker.id must be equal or greater than -1 and not greater than reserved.broker.max.id")
require(logRollTimeMillis >= 1, "log.roll.ms must be equal or greater than 1")
require(logRollTimeJitterMillis >= 0, "log.roll.jitter.ms must be equal or greater than 0")
require(logRetentionTimeMillis >= 1 || logRetentionTimeMillis == -1, "log.retention.ms must be unlimited (-1) or, equal or greater than 1")
require(logDirs.size > 0)
require(logCleanerDedupeBufferSize / logCleanerThreads > 1024 * 1024, "log.cleaner.dedupe.buffer.size must be at least 1MB per cleaner thread.")
require(replicaFetchWaitMaxMs <= replicaSocketTimeoutMs, "replica.socket.timeout.ms should always be at least replica.fetch.wait.max.ms" +
" to prevent unnecessary socket timeouts")
require(replicaFetchMaxBytes >= messageMaxBytes, "replica.fetch.max.bytes should be equal or greater than message.max.bytes")
require(replicaFetchWaitMaxMs <= replicaLagTimeMaxMs, "replica.fetch.wait.max.ms should always be at least replica.lag.time.max.ms" +
" to prevent frequent changes in ISR")
require(offsetCommitRequiredAcks >= -1 && offsetCommitRequiredAcks <= offsetsTopicReplicationFactor,
"offsets.commit.required.acks must be greater or equal -1 and less or equal to offsets.topic.replication.factor")
require(BrokerCompressionCodec.isValid(compressionType), "compression.type : " + compressionType + " is not valid." +
" Valid options are " + BrokerCompressionCodec.brokerCompressionOptions.mkString(","))
}
}
| eljefe6a/kafka | core/src/main/scala/kafka/server/KafkaConfig.scala | Scala | apache-2.0 | 65,780 |
package com.twitter.finagle.server
import com.twitter.finagle.util.{InetSocketAddressUtil, StackRegistry}
import com.twitter.logging.Level
import com.twitter.util.Time
import java.net.SocketAddress
import java.util.concurrent.ConcurrentHashMap
import java.util.function.{Function => JFunction}
import java.util.logging.Logger
import scala.collection.JavaConverters._
private[twitter] object ServerRegistry extends StackRegistry {
private val log = Logger.getLogger(getClass.getName)
private var addrNames = Map[SocketAddress, String]()
def registryName: String = "server"
// This is a terrible hack until we have a better
// way of labeling addresses.
def register(addr: String): SocketAddress = synchronized {
addr.split("=", 2) match {
case Array(addr) =>
val Seq(ia) = InetSocketAddressUtil.parseHosts(addr)
ia
case Array(name, addr) =>
log.log(Level.WARNING, "Labeling servers with the <label>=<addr>" +
" syntax is deprecated! Configure your server with a" +
" com.twitter.finagle.param.Label instead.")
val Seq(ia) = InetSocketAddressUtil.parseHosts(addr)
addrNames += (ia -> name)
ia
}
}
def nameOf(addr: SocketAddress): Option[String] = synchronized {
addrNames.get(addr)
}
/**
* Maps a server's local address to a [[ConnectionRegistry]] of all its active connections.
*/
private[this] val registries = new ConcurrentHashMap[SocketAddress, ConnectionRegistry]()
def connectionRegistry(localAddr: SocketAddress): ConnectionRegistry =
registries.computeIfAbsent(localAddr, connRegFn)
private[this] val connRegFn = new JFunction[SocketAddress, ConnectionRegistry] {
def apply(localAddr: SocketAddress): ConnectionRegistry =
new ConnectionRegistry(localAddr)
}
private[server] case class ConnectionInfo(establishedAt: Time)
/**
* Used to maintain a registry of connections to a server, represented by the remote
* [[SocketAddress]], to [[ConnectionInfo]].
*/
private[server] class ConnectionRegistry(localAddr: SocketAddress) {
private[this] val map = new ConcurrentHashMap[SocketAddress, ConnectionInfo]
def register(remoteAddr: SocketAddress): ConnectionInfo =
map.put(remoteAddr, ConnectionInfo(Time.now))
def unregister(remoteAddr: SocketAddress): Unit = map.remove(remoteAddr)
def iterator: Iterator[SocketAddress] = map.keySet.iterator.asScala
}
}
| koshelev/finagle | finagle-core/src/main/scala/com/twitter/finagle/server/ServerRegistry.scala | Scala | apache-2.0 | 2,451 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import scala.collection.mutable.Queue
import org.openqa.selenium.WebDriver
import org.openqa.selenium.htmlunit.HtmlUnitDriver
import org.scalatest._
import org.scalatest.concurrent.Eventually._
import org.scalatest.selenium.WebBrowser
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.internal.config.UI.UI_ENABLED
import org.apache.spark.ui.SparkUICssErrorHandler
/**
* Selenium tests for the Spark Streaming Web UI.
*/
class UISeleniumSuite
extends SparkFunSuite with WebBrowser with Matchers with BeforeAndAfterAll with TestSuiteBase {
implicit var webDriver: WebDriver = _
override def beforeAll(): Unit = {
super.beforeAll()
webDriver = new HtmlUnitDriver {
getWebClient.setCssErrorHandler(new SparkUICssErrorHandler)
}
}
override def afterAll(): Unit = {
try {
if (webDriver != null) {
webDriver.quit()
}
} finally {
super.afterAll()
}
}
/**
* Create a test SparkStreamingContext with the SparkUI enabled.
*/
private def newSparkStreamingContext(): StreamingContext = {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test")
.set(UI_ENABLED, true)
val ssc = new StreamingContext(conf, Seconds(1))
assert(ssc.sc.ui.isDefined, "Spark UI is not started!")
ssc
}
private def setupStreams(ssc: StreamingContext): Unit = {
val rdds = Queue(ssc.sc.parallelize(1 to 4, 4))
val inputStream = ssc.queueStream(rdds)
inputStream.foreachRDD { rdd =>
rdd.foreach(_ => {})
rdd.foreach(_ => {})
}
inputStream.foreachRDD { rdd =>
rdd.foreach(_ => {})
try {
rdd.foreach { _ =>
// Failing the task with id 15 to ensure only one task fails
if (TaskContext.get.taskAttemptId() % 15 == 0) {
throw new RuntimeException("Oops")
}
}
} catch {
case e: SparkException if e.getMessage.contains("Oops") =>
}
}
}
test("attaching and detaching a Streaming tab") {
withStreamingContext(newSparkStreamingContext()) { ssc =>
setupStreams(ssc)
ssc.start()
val sparkUI = ssc.sparkContext.ui.get
sparkUI.getDelegatingHandlers.count(_.getContextPath.contains("/streaming")) should be (5)
eventually(timeout(10.seconds), interval(50.milliseconds)) {
go to (sparkUI.webUrl.stripSuffix("/"))
find(cssSelector( """ul li a[href*="streaming"]""")) should not be (None)
}
eventually(timeout(10.seconds), interval(50.milliseconds)) {
// check whether streaming page exists
go to (sparkUI.webUrl.stripSuffix("/") + "/streaming")
val h3Text = findAll(cssSelector("h3")).map(_.text).toSeq
h3Text should contain("Streaming Statistics")
// Check stat table
val statTableHeaders = findAll(cssSelector("#stat-table th")).map(_.text).toSeq
statTableHeaders.exists(
_.matches("Timelines \\(Last \\d+ batches, \\d+ active, \\d+ completed\\)")
) should be (true)
statTableHeaders should contain ("Histograms")
val statTableCells = findAll(cssSelector("#stat-table td")).map(_.text).toSeq
statTableCells.exists(_.contains("Input Rate")) should be (true)
statTableCells.exists(_.contains("Scheduling Delay")) should be (true)
statTableCells.exists(_.contains("Processing Time")) should be (true)
statTableCells.exists(_.contains("Total Delay")) should be (true)
// Check batch tables
val h4Text = findAll(cssSelector("h4")).map(_.text).toSeq
h4Text.exists(_.matches("Active Batches \\(\\d+\\)")) should be (true)
h4Text.exists(_.matches("Completed Batches \\(last \\d+ out of \\d+\\)")) should be (true)
findAll(cssSelector("""#active-batches-table th""")).map(_.text).toSeq should be {
List("Batch Time", "Records", "Scheduling Delay (?)", "Processing Time (?)",
"Output Ops: Succeeded/Total", "Status")
}
findAll(cssSelector("""#completed-batches-table th""")).map(_.text).toSeq should be {
List("Batch Time", "Records", "Scheduling Delay (?)", "Processing Time (?)",
"Total Delay (?)", "Output Ops: Succeeded/Total")
}
val batchLinks =
findAll(cssSelector("""#completed-batches-table a""")).flatMap(_.attribute("href")).toSeq
batchLinks.size should be >= 1
// Check a normal batch page
go to (batchLinks.last) // Last should be the first batch, so it will have some jobs
val summaryText = findAll(cssSelector("li strong")).map(_.text).toSeq
summaryText should contain ("Batch Duration:")
summaryText should contain ("Input data size:")
summaryText should contain ("Scheduling delay:")
summaryText should contain ("Processing time:")
summaryText should contain ("Total delay:")
findAll(cssSelector("""#batch-job-table th""")).map(_.text).toSeq should be {
List("Output Op Id", "Description", "Output Op Duration", "Status", "Job Id",
"Job Duration", "Stages: Succeeded/Total", "Tasks (for all stages): Succeeded/Total",
"Error")
}
// Check we have 2 output op ids
val outputOpIds = findAll(cssSelector(".output-op-id-cell")).toSeq
outputOpIds.map(_.attribute("rowspan")) should be (List(Some("2"), Some("2")))
outputOpIds.map(_.text) should be (List("0", "1"))
// Check job ids
val jobIdCells = findAll(cssSelector( """#batch-job-table a""")).toSeq
jobIdCells.map(_.text) should be (List("0", "1", "2", "3"))
val jobLinks = jobIdCells.flatMap(_.attribute("href"))
jobLinks.size should be (4)
// Check stage progress
findAll(cssSelector(""".stage-progress-cell""")).map(_.text).toList should be (
List("1/1", "1/1", "1/1", "0/1 (1 failed)"))
// Check job progress
findAll(cssSelector(""".progress-cell""")).map(_.text).toList should be (
List("4/4", "4/4", "4/4", "3/4 (1 failed)"))
// Check stacktrace
val errorCells = findAll(cssSelector(""".stacktrace-details""")).map(_.underlying).toSeq
errorCells should have size 1
// Can't get the inner (invisible) text without running JS
// Check the job link in the batch page is right
go to (jobLinks(0))
val jobDetails = findAll(cssSelector("li strong")).map(_.text).toSeq
jobDetails should contain("Status:")
jobDetails should contain("Completed Stages:")
// Check a batch page without id
go to (sparkUI.webUrl.stripSuffix("/") + "/streaming/batch/")
webDriver.getPageSource should include ("Missing id parameter")
// Check a non-exist batch
go to (sparkUI.webUrl.stripSuffix("/") + "/streaming/batch/?id=12345")
webDriver.getPageSource should include ("does not exist")
}
ssc.stop(false)
sparkUI.getDelegatingHandlers.count(_.getContextPath.contains("/streaming")) should be (0)
eventually(timeout(10.seconds), interval(50.milliseconds)) {
go to (sparkUI.webUrl.stripSuffix("/"))
find(cssSelector( """ul li a[href*="streaming"]""")) should be(None)
}
eventually(timeout(10.seconds), interval(50.milliseconds)) {
go to (sparkUI.webUrl.stripSuffix("/") + "/streaming")
val h3Text = findAll(cssSelector("h3")).map(_.text).toSeq
h3Text should not contain("Streaming Statistics")
}
}
}
}
| bdrillard/spark | streaming/src/test/scala/org/apache/spark/streaming/UISeleniumSuite.scala | Scala | apache-2.0 | 8,412 |
/*
* Copyright (c) 2014, Cloudera, Inc. All Rights Reserved.
*
* Cloudera, Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"). You may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for
* the specific language governing permissions and limitations under the
* License.
*/
package com.cloudera.oryx.api.serving
import com.cloudera.oryx.api.KeyMessage
import com.typesafe.config.Config
import java.util.Objects
import org.apache.hadoop.conf.Configuration
import org.slf4j.{LoggerFactory, Logger}
/**
* Convenience implementation of [[ScalaServingModelManager]] that provides several default implementations.
*
* @param config Oryx [[Config]] object
* @tparam U type of update message read/written
* @since 2.0.0
*/
abstract class AbstractScalaServingModelManager[U](private val config: Config) extends ScalaServingModelManager[U] {
import AbstractScalaServingModelManager._
private val readOnly = config.getBoolean("oryx.serving.api.read-only")
/**
* @since 2.0.0
*/
override def getConfig: Config = config
/**
* @since 2.0.0
*/
override def isReadOnly: Boolean = readOnly
override def consume(updateIterator: Iterator[KeyMessage[String, U]], hadoopConf: Configuration): Unit = {
updateIterator.foreach { km =>
val key = km.getKey
val message = km.getMessage
try {
Objects.requireNonNull(key)
consumeKeyMessage(key, message, hadoopConf)
} catch {
case e: Exception =>
log.warn("Exception while processing message", e)
log.warn("Key/message were {} : {}", key, message)
}
}
}
/**
* Convenience method that is called by the default implementation of
* [[ScalaServingModelManager.consume()]], to process one key-message pair.
* It does nothing, except log the message. This should generally be overridden
* if and only if [[ScalaServingModelManager.consume()]] is not.
*
* @param key key to process (non-null)
* @param message message to process
* @param hadoopConf Hadoop configuration for process
* @since 2.3.0
*/
def consumeKeyMessage(key: String, message: U, hadoopConf: Configuration): Unit = {
log.info("{} : {}", key, message)
}
}
object AbstractScalaServingModelManager {
val log: Logger = LoggerFactory.getLogger(classOf[AbstractScalaServingModelManager[_]])
}
| srowen/oryx | framework/oryx-api/src/main/scala/com/cloudera/oryx/api/serving/AbstractScalaServingModelManager.scala | Scala | apache-2.0 | 2,633 |
package redbot.bots
import java.net.URL
import java.util.concurrent.Executors
import redbot.bots.FeedBot._
import redbot.cmd.Command
import redbot.discord.Permission.ManageChannels
import redbot.discord.Snowflake._
import redbot.discord._
import redbot.utils.{DataStore, InputUtils, JoinMap, Logger}
import regex.Grinch
import scala.concurrent.Future
import scala.util.{Success, Failure}
import scala.collection.mutable
import scala.collection.JavaConverters._
case class CrushBot(client: Client) extends CommandBot {
private implicit val ec = scala.concurrent.ExecutionContext.global
override def commands: Seq[Command[_]] = Vector(
Command("crush <username#0000>",
"Marks the specified user as one of your crushes") ( msg => {
case gr"crush $user(.+)+" =>
client.getChannel(msg.channelId).onComplete {
case Failure(e) => Logger.error(e)("User" -> msg.user, "Channel" -> msg.channelId)
case Success(c) if !c.isPM =>
msg.reply("This command can only be used in PMs.")
case _ =>
Future.sequence(
(for {
g <- client.getGuilds()
uid <- client.getMembers(g)
} yield client.getUser(uid)).
map(fu => fu.map(Success(_)).recover{
case e => Failure(e)
})
).map(_
.collect { case Success(u) => (u,
InputUtils.distance(u.fullUsername, user, ignoreCaps = true))
}.filter(_._2 < 7)
).foreach { found =>
if (found.isEmpty)
msg.reply(s"User not found. I might not be in a server with them!")
else {
val closest = found.minBy(_._2)
if (closest._2 != 0) {
msg.reply(s"User not found. Closest match: `${closest._1.username}#${closest._1.discrim}`." +
"Rerun the command with the full username if that's who you want.")
} else {
val u = closest._1
val s = crushes.getOrElseUpdate(msg.user.id, Set.empty)
crushes(msg.user.id) = s + u.id
if (crushes.getOrElseUpdate(u.id, Set.empty) contains msg.user.id) {
client.getPM(u.id).foreach { c =>
client.sendMessage(c, s"You matched with ${msg.user.fullUsername}!")
}
msg.reply("It's a match! I've sent a message their way.")
} else {
msg.reply("Crush noted! You'll get a message if you match.")
}
saveCrushes()
}
}
}
}
}
))
import CrushBot._
private val crushes: mutable.Map[User.Id, Set[User.Id]] =
DataStore.getOrElse("crush_map", mutable.Map.empty[User.Id, Set[User.Id]])
private def saveCrushes(): Unit = DataStore.store("crush_map", crushes)
}
object CrushBot {
import play.api.libs.json._
private implicit val userIdFormat: Format[User.Id] = Format(
Reads.LongReads.map(_.asId[User.Id]),
Writes.LongWrites
)
private implicit val crushesFormat: Format[mutable.Map[User.Id, Set[User.Id]]] = Format(
Reads.ArrayReads[(User.Id, Set[User.Id])].map(a => mutable.Map(a.toSeq: _*)),
Writes.arrayWrites[(User.Id, Set[User.Id])].contramap(m => m.toArray)
)
}
| JamesGallicchio/RedBot | src/main/scala/redbot/bots/CrushBot.scala | Scala | mit | 3,490 |
package org.eichelberger.sfc.study.composition
import org.eichelberger.sfc.examples.Geohash
import org.eichelberger.sfc.study.composition.CompositionSampleData._
import org.eichelberger.sfc.study.composition.SchemaStudy.TestCase
import org.eichelberger.sfc.utils.Timing
import org.eichelberger.sfc._
import org.eichelberger.sfc.SpaceFillingCurve._
import org.eichelberger.sfc.study._
import org.joda.time.{DateTimeZone, DateTime}
/*
Need the ability to force the planner to bail out early,
because we've declared in advance that we don't want more
than RRR ranges returned.
Need a new metric:
Something to represent the percentage of (expected) false-positives
that result from the current range (approximation).
*/
object SchemaStudy extends App {
val output: OutputDestination = {
val columns = OutputMetadata(Seq(
ColumnSpec("when", isQuoted = true),
ColumnSpec("curve", isQuoted = true),
ColumnSpec("test.type", isQuoted = true),
ColumnSpec("label", isQuoted = true),
ColumnSpec("precision", isQuoted = false),
ColumnSpec("replications", isQuoted = false),
ColumnSpec("dimensions", isQuoted = false),
ColumnSpec("plys", isQuoted = false),
ColumnSpec("avg.ranges", isQuoted = false),
ColumnSpec("avg.cells", isQuoted = false),
ColumnSpec("cells.per.second", isQuoted = false),
ColumnSpec("cells.per.range", isQuoted = false),
ColumnSpec("seconds.per.cell", isQuoted = false),
ColumnSpec("seconds.per.range", isQuoted = false),
ColumnSpec("score", isQuoted = false),
ColumnSpec("adj.score", isQuoted = false),
ColumnSpec("seconds", isQuoted = false)
))
val baseFile = "schema"
new MultipleOutput(Seq(
new MirroredTSV(s"/tmp/$baseFile.tsv", columns, writeHeader = true),
new JSON(s"/tmp/$baseFile.js", columns)
with FileOutputDestination { def fileName = s"/tmp/$baseFile.js" }
))
}
import TestLevels._
val testLevel = Large
// standard test suite of points and queries
val n = getN(testLevel)
val pointQueryPairs = getPointQueryPairs(testLevel)
val points: Seq[XYZTPoint] = pointQueryPairs.map(_._1)
val cells: Seq[Cell] = pointQueryPairs.map(_._2)
val labels: Seq[String] = pointQueryPairs.map(_._3)
val uniqueLabels = labels.toSet.toSeq
case class TestCase(curve: ComposedCurve, columnOrder: String)
val TXY = "T ~ X ~ Y"
val T0XYT1 = "T0 ~ X ~ Y ~ T1"
val XYT = "X ~ Y ~ T"
def testSchema(testCase: TestCase): Unit = {
val TestCase(curve, columnOrder) = testCase
println(s"[SCHEMA ${curve.name}]")
// conduct all queries against this curve
val results: List[(String, Seq[OrdinalPair], Long)] = pointQueryPairs.map{
case (point, rawCell, label) =>
val x = rawCell.dimensions.head
val y = rawCell.dimensions(1)
val t = rawCell.dimensions(3)
val cell = columnOrder match {
case s if s == TXY =>
Cell(Seq(t, x, y))
case s if s == T0XYT1 =>
Cell(Seq(t, x, y, t))
case s if s == XYT =>
Cell(Seq(x, y, t))
case _ =>
throw new Exception(s"Unhandled column order: $columnOrder")
}
//@TODO debug!
//System.out.println(s"TEST: cell $cell")
curve.clearCache()
val (ranges, msElapsed) = Timing.time(() => {
val itr = curve.getRangesCoveringCell(cell)
val list = itr.toList
list
})
// compute a net label (only needed for 3D curves)
val netLabel = label.take(2) + label.takeRight(1)
(netLabel, ranges, msElapsed)
}.toList
// aggregate by label
val aggregates = results.groupBy(_._1)
aggregates.foreach {
case (aggLabel, group) =>
var totalCells = 0L
var totalRanges = 0L
var totalMs = 0L
group.foreach {
case (_, ranges, ms) =>
totalRanges = totalRanges + ranges.size
totalCells = totalCells + ranges.map(_.size).sum
totalMs = totalMs + ms
}
val m = group.size.toDouble
val avgRanges = totalRanges.toDouble / m
val avgCells = totalCells.toDouble / m
val seconds = totalMs.toDouble / 1000.0
val avgCellsPerSecond = totalCells / seconds
val avgRangesPerSecond = totalRanges / seconds
val avgCellsPerRange = totalRanges / seconds
val avgSecondsPerCell = seconds / totalCells
val avgSecondsPerRange = seconds / totalRanges
val avgScore = avgCellsPerSecond * avgCellsPerRange
val avgAdjScore = avgCellsPerSecond * Math.log(1.0 + avgCellsPerRange)
val data = Seq(
DateTime.now().toString,
curve.name,
"ranges",
aggLabel,
curve.M,
n,
curve.numLeafNodes,
curve.plys,
avgRanges,
avgCells,
avgCellsPerSecond,
avgCellsPerRange,
avgSecondsPerCell,
avgSecondsPerRange,
avgScore,
avgAdjScore,
seconds
)
output.println(data)
}
}
val BasePrecision = 9
// Geohash dimensions
val gh = new Geohash(BasePrecision << 1L)
val subGH0 = new SubDimension[Seq[Any]]("gh0", gh.pointToIndex, gh.M, 0, BasePrecision * 2 / 3)
val subGH1 = new SubDimension[Seq[Any]]("gh1", gh.pointToIndex, gh.M, BasePrecision * 2 / 3, BasePrecision * 2 / 3)
val subGH2 = new SubDimension[Seq[Any]]("gh2", gh.pointToIndex, gh.M, BasePrecision * 4 / 3, (BasePrecision << 1L) - BasePrecision * 4 / 3)
// geographic dimensions
val dimX = DefaultDimensions.createLongitude(BasePrecision)
val dimY = DefaultDimensions.createLongitude(BasePrecision)
// time dimensions
val dimTime = DefaultDimensions.createNearDateTime(BasePrecision)
val dimT0 = new SubDimension[DateTime]("t0", dimTime.index, dimTime.precision, 0, dimTime.precision >> 1)
val dimT1 = new SubDimension[DateTime]("t1", dimTime.index, dimTime.precision, dimTime.precision >> 1, dimTime.precision - (dimTime.precision >> 1))
def getCurves: Seq[TestCase] = Seq(
// R(t, gh)
TestCase(new ComposedCurve(RowMajorCurve(dimTime.precision, gh.M), Seq(dimTime, gh)), TXY),
// R(gh, t)
TestCase(new ComposedCurve(RowMajorCurve(gh.M, dimTime.precision), Seq(gh, dimTime)), XYT),
// R(t0, gh, t1)
TestCase(new ComposedCurve(RowMajorCurve(dimT0.precision, gh.M, dimT1.precision), Seq(dimT0, gh, dimT1)), T0XYT1),
// R(t0, Z(x, y, t1))
TestCase(
new ComposedCurve(
RowMajorCurve(dimT0.precision, dimX.precision + dimY.precision + dimT1.precision),
Seq(dimT0, new ComposedCurve(
ZCurve(dimX.precision, dimY.precision, dimT1.precision),
Seq(dimX, dimY, dimT1)))),
T0XYT1)
)
getCurves.foreach(testSchema)
output.close()
}
| cne1x/sfseize | src/main/scala/org/eichelberger/sfc/study/composition/SchemaStudy.scala | Scala | apache-2.0 | 6,882 |
package com.github.andyglow.config
import java.time.temporal.TemporalAmount
import java.time.{Period, Duration => JDuration}
import com.typesafe.config._
import scala.collection.generic.CanBuildFrom
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.language.higherKinds
trait FromConf[T] extends ((Config, String) => T)
object FromConf {
def apply[T: FromConf](
config: Config,
path: String): T = {
val fc = implicitly[FromConf[T]]
fc(config, path)
}
implicit val stringFC: FromConf[String] = _ getString _
implicit val booleanFC: FromConf[Boolean] = _ getBoolean _
implicit val intFC: FromConf[Int] = _ getInt _
implicit val doubleFC: FromConf[Double] = _ getDouble _
implicit val longFC: FromConf[Long] = _ getLong _
implicit val finDurFC: FromConf[FiniteDuration] = (c, p) => Duration.fromNanos((c getDuration p).toNanos).toCoarsest.asInstanceOf[FiniteDuration]
implicit val durationFC: FromConf[Duration] = (c, p) => Duration.fromNanos((c getDuration p).toNanos).toCoarsest
implicit val jDurationFC: FromConf[JDuration] = _ getDuration _
implicit val periodFC: FromConf[Period] = _ getPeriod _
implicit val temporalFC: FromConf[TemporalAmount] = _ getTemporal _
implicit val configListFC: FromConf[ConfigList] = _ getList _
implicit val configFC: FromConf[Config] = _ getConfig _
implicit val objectFC: FromConf[ConfigObject] = _ getObject _
implicit val memorySizeFC: FromConf[ConfigMemorySize] = _ getMemorySize _
implicit val sizeInBytesFC: FromConf[SizeInBytes] = (c, p) => SizeInBytes(c getBytes p)
implicit def optFC[T: FromConf]: FromConf[Option[T]] =
(c, p) => if (c.hasPath(p)) Some(FromConf[T](c, p)) else None
implicit def traversableFC[C[_], T](implicit ct: ConfType[T], cbf: CanBuildFrom[C[T], T, C[T]]): FromConf[C[T]] = {
(c, p) =>
import scala.collection.JavaConverters._
val from = c.getList(p)
val to = cbf()
to.sizeHint(from.size())
from.asScala foreach { e =>
to += ct.make(e, p)
}
to.result()
}
implicit def fromConfType[T](implicit ct: ConfType[T]): FromConf[T] = (c, p) => ct.make(c.getValue(p), p)
}
| andyglow/typesafe-config-scala | src/main/scala-2.12/com/github/andyglow/config/FromConf.scala | Scala | gpl-3.0 | 2,332 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package tools.nsc
package reporters
import java.io.{BufferedReader, PrintWriter}
import scala.reflect.internal.util.Position
/** This class implements a Reporter that displays messages on a text console. */
class ConsoleReporter(val settings: Settings, val reader: BufferedReader, val writer: PrintWriter, val echoWriter: PrintWriter) extends FilteringReporter with PrintReporter {
def this(settings: Settings) = this(settings, Console.in, new PrintWriter(Console.err, true), new PrintWriter(Console.out, true))
def this(settings: Settings, reader: BufferedReader, writer: PrintWriter) = this(settings, reader, writer, writer)
def doReport(pos: Position, msg: String, severity: Severity): Unit = display(pos, msg, severity)
override def finish(): Unit = {
import reflect.internal.util.StringOps.countElementsAsString
if (!settings.nowarn && hasWarnings)
echo(countElementsAsString(warningCount, WARNING.toString.toLowerCase))
if (hasErrors)
echo(countElementsAsString(errorCount, ERROR.toString.toLowerCase))
super.finish()
}
}
| scala/scala | src/compiler/scala/tools/nsc/reporters/ConsoleReporter.scala | Scala | apache-2.0 | 1,374 |
package filodb.memory.format.vectors
import java.nio.ByteBuffer
import debox.Buffer
import filodb.memory.format._
class DoubleVectorTest extends NativeVectorTest {
describe("DoubleMaskedAppendableVector") {
it("should append a list of all NAs and read all NAs back") {
val builder = DoubleVector.appendingVector(memFactory, 100)
builder.addNA
builder.isAllNA should be (true)
builder.noNAs should be (false)
val sc = builder.freeze(memFactory)
DoubleVector(acc, sc).length(acc, sc) shouldEqual 1
// Just to make sure this does not throw an exception
DoubleVector(acc, sc)(acc, sc, 0)
DoubleVector(acc, sc)
.toBuffer(acc, sc) shouldEqual Buffer.empty[Double]
}
it("should encode a mix of NAs and Doubles and decode iterate and skip NAs") {
val cb = DoubleVector.appendingVector(memFactory, 5)
cb.addNA
cb.addData(101)
cb.addData(102.5)
cb.addData(103)
cb.addNA
cb.isAllNA should be (false)
cb.noNAs should be (false)
val sc = cb.freeze(memFactory)
DoubleVector(acc, sc)
.length(acc, sc) shouldEqual 5
DoubleVector(acc, sc)(acc, sc, 1) shouldEqual 101.0
DoubleVector(acc, sc)
.toBuffer(acc, sc) shouldEqual Buffer(101, 102.5, 103)
}
it("should be able to append lots of Doubles off-heap and grow vector") {
val numDoubles = 1000
val builder = DoubleVector.appendingVector(memFactory, numDoubles / 2)
(0 until numDoubles).map(_.toDouble).foreach(builder.addData)
builder.length should equal (numDoubles)
builder.isAllNA should be (false)
builder.noNAs should be (true)
}
it("should be able to return minMax accurately with NAs") {
val cb = DoubleVector.appendingVector(memFactory, 5)
cb.addNA
cb.addData(10.1)
cb.addData(102)
cb.addData(1.03E9)
cb.addNA
val inner = cb.asInstanceOf[GrowableVector[Double]].inner.asInstanceOf[MaskedDoubleAppendingVector]
inner.minMax should equal ((10.1, 1.03E9))
}
it("should be able to freeze() and minimize bytes used") {
val builder = DoubleVector.appendingVector(memFactory, 100)
// Test numBytes to make sure it's accurate
builder.numBytes should equal (12 + 16 + 8) // 2 long words needed for 100 bits
(0 to 4).map(_.toDouble).foreach(builder.addData)
builder.numBytes should equal (12 + 16 + 8 + 40)
val frozen = builder.freeze(memFactory)
BinaryVector.totalBytes(acc, frozen) should equal (12 + 8 + 8 + 40) // bitmask truncated
DoubleVector(acc, frozen).length(acc, frozen) shouldEqual 5
DoubleVector(acc, frozen)
.toBuffer(acc, frozen) shouldEqual Buffer.fromIterable((0 to 4).map(_.toDouble))
}
it("should be able to optimize all integral vector to DeltaDeltaVector") {
val orig = (0 to 9).map(_.toDouble)
val builder = DoubleVector(memFactory, orig)
val optimized = builder.optimize(memFactory)
DoubleVector(acc, optimized).length(acc, optimized) shouldEqual 10
DoubleVector(acc, optimized).toBuffer(acc, optimized).toList shouldEqual orig
DoubleVector(acc, optimized)(acc, optimized, 0) shouldEqual 0.0
DoubleVector(acc, optimized)(acc, optimized, 2) shouldEqual 2.0
// Const DeltaDeltaVector (since this is linearly increasing)
BinaryVector.totalBytes(acc, optimized) shouldEqual 24
}
it("should be able to read from on-heap DeltaDeltaVector") {
val orig = (0 to 9).map(_.toDouble)
val builder = DoubleVector(memFactory, orig)
val optimized = builder.optimize(memFactory)
val bytes = DoubleVector(acc, optimized).toBytes(acc, optimized)
val onHeapAcc = Seq(MemoryReader.fromArray(bytes),
MemoryReader.fromByteBuffer(BinaryVector.asBuffer(optimized)),
MemoryReader.fromByteBuffer(ByteBuffer.wrap(bytes)))
onHeapAcc.foreach { a =>
DoubleVector(a, 0).toBuffer(a, 0).toList shouldEqual orig
}
}
it("should encode some edge cases correctly to DDV") {
val orig = Seq(55.0, 60.0) ++ Seq.fill(10)(60.0)
val appender = DoubleVector.appendingVectorNoNA(memFactory, 100)
orig.foreach(appender.addData)
appender.length shouldEqual orig.length
val optimized = appender.optimize(memFactory)
DoubleVector(acc, optimized).length(acc, optimized) shouldEqual orig.length
DoubleVector(acc, optimized).toBuffer(acc, optimized).toList shouldEqual orig
BinaryVector.totalBytes(acc, optimized) should be > (24) // Not const DDV!
}
it("should be able to optimize off-heap No NA integral vector to DeltaDeltaVector") {
val builder = DoubleVector.appendingVectorNoNA(memFactory, 100)
// Use higher numbers to verify they can be encoded efficiently too
(100000 to 100004).map(_.toDouble).foreach(builder.addData)
val optimized = builder.optimize(memFactory)
DoubleVector(acc, optimized).length(acc, optimized) shouldEqual 5
DoubleVector(acc, optimized)
.toBuffer(acc, optimized) shouldEqual Buffer.fromIterable((100000 to 100004).map(_.toDouble))
DoubleVector(acc, optimized)(acc, optimized, 2) shouldEqual 100002.0
// Const DeltaDeltaVector (since this is linearly increasing)
BinaryVector.totalBytes(acc, optimized) shouldEqual 24
}
it("should iterate with startElement > 0") {
val orig = Seq(1000, 2001.1, 2999.99, 5123.4, 5250, 6004, 7678)
val builder = DoubleVector.appendingVectorNoNA(memFactory, orig.length)
orig.foreach(builder.addData)
builder.length shouldEqual orig.length
val frozen = builder.optimize(memFactory)
(2 to 5).foreach { start =>
DoubleVector(acc, frozen).toBuffer(acc, frozen, start).toList shouldEqual orig.drop(start)
}
}
it("should sum uncompressed double vectors and ignore NaN values") {
val orig = Seq(1000, 2001.1, 2999.99, 5123.4, 5250, 6004, 7678)
val builder = DoubleVector.appendingVectorNoNA(memFactory, orig.length + 2)
orig.foreach(builder.addData)
val reader = builder.reader.asDoubleReader
reader.sum(acc, builder.addr, 2, orig.length - 1) shouldEqual (orig.drop(2).sum)
// Now add a NaN
builder.addData(Double.NaN)
builder.length shouldEqual (orig.length + 1)
reader.sum(acc, builder.addr, 2, orig.length) shouldEqual (orig.drop(2).sum)
}
it("should not allow sum() with out of bound indices") {
val orig = Seq(1000, 2001.1, 2999.99, 5123.4, 5250, 6004, 7678)
val builder = DoubleVector.appendingVectorNoNA(memFactory, orig.length + 2)
orig.foreach(builder.addData)
val optimized = builder.optimize(memFactory)
builder.reader.asDoubleReader.sum(acc, builder.addr, 0, 4) shouldEqual orig.take(5).sum
intercept[IllegalArgumentException] {
builder.reader.asDoubleReader.sum(acc, builder.addr, 1, orig.length)
}
val readVect = DoubleVector(acc, optimized)
intercept[IllegalArgumentException] {
readVect.sum(acc, optimized, 1, orig.length)
}
}
it("should support resetting and optimizing AppendableVector multiple times") {
val cb = DoubleVector.appendingVector(memFactory, 5)
// Use large numbers on purpose so cannot optimize to Doubles or const
val orig = Seq(11.11E101, -2.2E-176, 1.77E88)
cb.addNA()
orig.foreach(cb.addData)
cb.copyToBuffer.toList shouldEqual orig
val optimized = cb.optimize(memFactory)
DoubleVector(acc, optimized).toBuffer(acc, optimized).toList shouldEqual orig
// Now the optimize should not have damaged original vector
cb.copyToBuffer.toList shouldEqual orig
cb.reset()
val orig2 = orig.map(_ * 2)
orig2.foreach(cb.addData)
val opt2 = cb.optimize(memFactory)
DoubleVector(acc, opt2).toBuffer(acc, opt2).toList shouldEqual orig2
cb.copyToBuffer.toList shouldEqual orig2
}
}
describe("bugs") {
it("should enumerate same samples regardless of where start enumeration from") {
val data = scala.io.Source.fromURL(getClass.getResource("/timeseries_bug1.txt"))
.getLines.map(_.split(' '))
.map(ArrayStringRowReader).toSeq
val origValues = data.map(_.getDouble(1))
val timestampAppender = LongBinaryVector.appendingVectorNoNA(memFactory, data.length)
val valuesAppender = DoubleVector.appendingVectorNoNA(memFactory, data.length)
data.foreach { reader =>
timestampAppender.addData(reader.getLong(0))
valuesAppender.addData(reader.getDouble(1))
}
val tsEncoded = timestampAppender.optimize(memFactory)
val valuesEncoded = valuesAppender.optimize(memFactory)
val tsReader = LongBinaryVector(acc, tsEncoded)
val dReader = DoubleVector(acc, valuesEncoded)
val samples = new collection.mutable.ArrayBuffer[Double]
for { i <- 0 until timestampAppender.length by 10 } {
samples.clear()
val iter = dReader.iterate(acc, valuesEncoded, i)
(i until timestampAppender.length).foreach(_ => samples.append(iter.next))
samples shouldEqual origValues.drop(i)
}
}
}
describe("counter correction") {
val orig = Seq(1000, 2001.1, 2999.99, 5123.4, 5250, 6004, 7678)
val builder = DoubleVector.appendingVectorNoNA(memFactory, orig.length)
orig.foreach(builder.addData)
builder.length shouldEqual orig.length
val frozen = builder.optimize(memFactory)
val reader = DoubleVector(acc, frozen)
it("should detect drop correctly at beginning of chunk and adjust CorrectionMeta") {
reader.detectDropAndCorrection(acc, frozen, NoCorrection) shouldEqual NoCorrection
// No drop in first value, correction should be returned unchanged
val corr1 = DoubleCorrection(999.9, 100.0)
reader.detectDropAndCorrection(acc, frozen, corr1) shouldEqual corr1
// Drop in first value, correction should be done
val corr2 = DoubleCorrection(1201.2, 100.0)
reader.detectDropAndCorrection(acc, frozen, corr2) shouldEqual DoubleCorrection(1201.2, 100.0 + 1201.2)
}
it("should return correctedValue with correction adjustment even if vector has no drops") {
reader.correctedValue(acc, frozen, 1, NoCorrection) shouldEqual 2001.1
val corr1 = DoubleCorrection(999.9, 100.0)
reader.correctedValue(acc, frozen, 3, corr1) shouldEqual 5223.4
}
it("should updateCorrections correctly") {
reader.updateCorrection(acc, frozen, NoCorrection) shouldEqual DoubleCorrection(7678, 0.0)
val corr1 = DoubleCorrection(999.9, 50.0)
reader.updateCorrection(acc, frozen, corr1) shouldEqual DoubleCorrection(7678, 50.0)
}
it("should detect drops with DoubleCounterAppender and carry flag to optimized version") {
val cb = DoubleVector.appendingVectorNoNA(memFactory, 10, detectDrops = true)
cb.addData(101)
cb.addData(102.5)
// So far, no drops
PrimitiveVectorReader.dropped(acc, cb.addr) shouldEqual false
// Add dropped value, flag should be set to true
cb.addData(9)
PrimitiveVectorReader.dropped(acc, cb.addr) shouldEqual true
// Add more values, no drops, flag should still be true
cb.addData(13.3)
cb.addData(21.1)
PrimitiveVectorReader.dropped(acc, cb.addr) shouldEqual true
// Optimize, flag should still be true in optimized version
val sc = cb.optimize(memFactory)
PrimitiveVectorReader.dropped(acc, sc) shouldEqual true
DoubleVector(acc, sc).toBuffer(acc, sc) shouldEqual Buffer(101, 102.5, 9, 13.3, 21.1)
// Make sure return correcting version of reader as well
cb.reader shouldBe a[CorrectingDoubleVectorReader]
DoubleVector(acc, sc) shouldBe a[CorrectingDoubleVectorReader]
}
it("should read out corrected values properly") {
val orig = Seq(101, 102.5, 9, 13.3, 21.1)
val cb = DoubleVector.appendingVectorNoNA(memFactory, 10, detectDrops = true)
orig.foreach(cb.addData)
cb.length shouldEqual orig.length
val sc = cb.optimize(memFactory)
val reader = DoubleVector(acc, sc)
reader shouldBe a[CorrectingDoubleVectorReader]
reader.correctedValue(acc, sc, 1, NoCorrection) shouldEqual 102.5
reader.correctedValue(acc, sc, 2, NoCorrection) shouldEqual 111.5
reader.correctedValue(acc, sc, 4, NoCorrection) shouldEqual (21.1 + 102.5)
val corr1 = DoubleCorrection(999.9, 50.0)
reader.correctedValue(acc, sc, 1, corr1) shouldEqual 152.5
reader.correctedValue(acc, sc, 2, corr1) shouldEqual 161.5
reader.correctedValue(acc, sc, 4, corr1) shouldEqual (21.1 + 102.5 + 50)
reader.updateCorrection(acc, sc, corr1) shouldEqual DoubleCorrection(21.1, 102.5 + 50.0)
}
it("should read out length and values correctly for corrected vectors") {
val orig = Seq(4419.00, 4511.00, 4614.00, 4724.00, 4909.00, 948.00, 1000.00, 1095.00, 1102.00, 1201.00)
val cb = DoubleVector.appendingVectorNoNA(memFactory, 10, detectDrops = true)
orig.foreach(cb.addData)
cb.length shouldEqual orig.length
val sc = cb.optimize(memFactory)
val reader = DoubleVector(acc, sc)
reader shouldBe a[CorrectingDoubleVectorReader]
reader.length(acc, sc) shouldEqual orig.length
reader.toBuffer(acc, sc).toList shouldEqual orig
}
}
} | tuplejump/FiloDB | memory/src/test/scala/filodb.memory/format/vectors/DoubleVectorTest.scala | Scala | apache-2.0 | 13,464 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ml.dmlc.mxnet
import java.io.File
import java.util.concurrent.atomic.AtomicInteger
import ml.dmlc.mxnet.NDArrayConversions._
import org.scalatest.{Matchers, BeforeAndAfterAll, FunSuite}
class NDArraySuite extends FunSuite with BeforeAndAfterAll with Matchers {
private val sequence: AtomicInteger = new AtomicInteger(0)
test("to java array") {
val ndarray = NDArray.zeros(2, 2)
assert(ndarray.toArray === Array(0f, 0f, 0f, 0f))
}
test("to scalar") {
val ndzeros = NDArray.zeros(1)
assert(ndzeros.toScalar === 0f)
val ndones = NDArray.ones(1)
assert(ndones.toScalar === 1f)
}
test ("call toScalar on an ndarray which is not a scalar") {
intercept[Exception] { NDArray.zeros(1, 1).toScalar }
}
test("size and shape") {
val ndzeros = NDArray.zeros(4, 1)
assert(ndzeros.shape === Shape(4, 1))
assert(ndzeros.size === 4)
}
test("dtype") {
val arr = NDArray.zeros(3, 2)
assert(arr.dtype === DType.Float32)
}
test("set scalar value") {
val ndarray = NDArray.empty(2, 1)
ndarray.set(10f)
assert(ndarray.toArray === Array(10f, 10f))
}
test("copy from java array") {
val ndarray = NDArray.empty(4, 1)
ndarray.set(Array(1f, 2f, 3f, 4f))
assert(ndarray.toArray === Array(1f, 2f, 3f, 4f))
}
test("plus") {
val ndzeros = NDArray.zeros(2, 1)
val ndones = ndzeros + 1f
assert(ndones.toArray === Array(1f, 1f))
assert((ndones + ndzeros).toArray === Array(1f, 1f))
assert((1 + ndones).toArray === Array(2f, 2f))
// in-place
ndones += ndones
assert(ndones.toArray === Array(2f, 2f))
}
test("minus") {
val ndones = NDArray.ones(2, 1)
val ndzeros = ndones - 1f
assert(ndzeros.toArray === Array(0f, 0f))
assert((ndones - ndzeros).toArray === Array(1f, 1f))
assert((ndzeros - ndones).toArray === Array(-1f, -1f))
assert((ndones - 1).toArray === Array(0f, 0f))
// in-place
ndones -= ndones
assert(ndones.toArray === Array(0f, 0f))
}
test("multiplication") {
val ndones = NDArray.ones(2, 1)
val ndtwos = ndones * 2
assert(ndtwos.toArray === Array(2f, 2f))
assert((ndones * ndones).toArray === Array(1f, 1f))
assert((ndtwos * ndtwos).toArray === Array(4f, 4f))
ndtwos *= ndtwos
// in-place
assert(ndtwos.toArray === Array(4f, 4f))
}
test("division") {
val ndones = NDArray.ones(2, 1)
val ndzeros = ndones - 1f
val ndhalves = ndones / 2
assert(ndhalves.toArray === Array(0.5f, 0.5f))
assert((ndhalves / ndhalves).toArray === Array(1f, 1f))
assert((ndones / ndones).toArray === Array(1f, 1f))
assert((ndzeros / ndones).toArray === Array(0f, 0f))
ndhalves /= ndhalves
// in-place
assert(ndhalves.toArray === Array(1f, 1f))
}
test("full") {
val arr = NDArray.full(Shape(1, 2), 3f)
assert(arr.shape === Shape(1, 2))
assert(arr.toArray === Array(3f, 3f))
}
test("clip") {
val ndarray = NDArray.empty(3, 2)
ndarray.set(Array(1f, 2f, 3f, 4f, 5f, 6f))
assert(NDArray.clip(ndarray, 2f, 5f).toArray === Array(2f, 2f, 3f, 4f, 5f, 5f))
}
test("sqrt") {
val ndarray = NDArray.empty(4, 1)
ndarray.set(Array(0f, 1f, 4f, 9f))
assert(NDArray.sqrt(ndarray).toArray === Array(0f, 1f, 2f, 3f))
}
test("rsqrt") {
val ndarray = NDArray.array(Array(1f, 4f), shape = Shape(2, 1))
assert(NDArray.rsqrt(ndarray).toArray === Array(1f, 0.5f))
}
test("norm") {
val ndarray = NDArray.empty(3, 1)
ndarray.set(Array(1f, 2f, 3f))
val normed = NDArray.norm(ndarray)
assert(normed.shape === Shape(1))
assert(normed.toScalar === math.sqrt(14.0).toFloat +- 1e-3f)
}
test("one hot encode") {
val indices = NDArray.array(Array(1f, 0f, 2f), shape = Shape(3))
val array = NDArray.empty(3, 3)
NDArray.onehotEncode(indices, array)
assert(array.shape === Shape(3, 3))
assert(array.toArray === Array(0f, 1f, 0f,
1f, 0f, 0f,
0f, 0f, 1f))
}
test("dot") {
val arr1 = NDArray.array(Array(1f, 2f), shape = Shape(1, 2))
val arr2 = NDArray.array(Array(3f, 4f), shape = Shape(2, 1))
val res = NDArray.dot(arr1, arr2)
assert(res.shape === Shape(1, 1))
assert(res.toArray === Array(11f))
}
test("power") {
val arr = NDArray.array(Array(3f, 5f), shape = Shape(2, 1))
val arrPower1 = NDArray.power(2f, arr)
assert(arrPower1.shape === Shape(2, 1))
assert(arrPower1.toArray === Array(8f, 32f))
val arrPower2 = NDArray.power(arr, 2f)
assert(arrPower2.shape === Shape(2, 1))
assert(arrPower2.toArray === Array(9f, 25f))
val arrPower3 = NDArray.power(arr, arr)
assert(arrPower3.shape === Shape(2, 1))
assert(arrPower3.toArray === Array(27f, 3125f))
}
test("choose_element_0index") {
val arr = NDArray.array(Array(1f, 2f, 3f, 4f, 6f, 5f), shape = Shape(2, 3))
val indices = NDArray.array(Array(0f, 1f), shape = Shape(2))
val res = NDArray.choose_element_0index(arr, indices)
assert(res.toArray === Array(1f, 6f))
}
test("copy to") {
val source = NDArray.array(Array(1f, 2f, 3f), shape = Shape(1, 3))
val dest = NDArray.empty(1, 3)
source.copyTo(dest)
assert(dest.shape === Shape(1, 3))
assert(dest.toArray === Array(1f, 2f, 3f))
}
test("abs") {
val arr = NDArray.array(Array(-1f, -2f, 3f), shape = Shape(3, 1))
assert(NDArray.abs(arr).toArray === Array(1f, 2f, 3f))
}
test("sign") {
val arr = NDArray.array(Array(-1f, -2f, 3f), shape = Shape(3, 1))
assert(NDArray.sign(arr).toArray === Array(-1f, -1f, 1f))
}
test("round") {
val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1))
assert(NDArray.round(arr).toArray === Array(2f, 2f, 4f))
}
test("ceil") {
val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1))
assert(NDArray.ceil(arr).toArray === Array(2f, 3f, 4f))
}
test("floor") {
val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1))
assert(NDArray.floor(arr).toArray === Array(1f, 2f, 3f))
}
test("square") {
val arr = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1))
assert(NDArray.square(arr).toArray === Array(1f, 4f, 9f))
}
test("exp") {
val arr = NDArray.ones(1)
assert(NDArray.exp(arr).toScalar === 2.71828f +- 1e-3f)
}
test("log") {
val arr = NDArray.empty(1)
arr.set(10f)
assert(NDArray.log(arr).toScalar === 2.302585f +- 1e-5f)
}
test("cos") {
val arr = NDArray.empty(1)
arr.set(12f)
assert(NDArray.cos(arr).toScalar === 0.8438539f +- 1e-5f)
}
test("sin") {
val arr = NDArray.empty(1)
arr.set(12f)
assert(NDArray.sin(arr).toScalar === -0.536572918f +- 1e-5f)
}
test("max") {
val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1))
assert(NDArray.max(arr).toScalar === 3.7f +- 1e-3f)
}
test("maximum") {
val arr1 = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1))
val arr2 = NDArray.array(Array(4f, 1f, 3.5f), shape = Shape(3, 1))
val arr = NDArray.maximum(arr1, arr2)
assert(arr.shape === Shape(3, 1))
assert(arr.toArray === Array(4f, 2.1f, 3.7f))
}
test("min") {
val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1))
assert(NDArray.min(arr).toScalar === 1.5f +- 1e-3f)
}
test("minimum") {
val arr1 = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1))
val arr2 = NDArray.array(Array(4f, 1f, 3.5f), shape = Shape(3, 1))
val arr = NDArray.minimum(arr1, arr2)
assert(arr.shape === Shape(3, 1))
assert(arr.toArray === Array(1.5f, 1f, 3.5f))
}
test("sum") {
val arr = NDArray.array(Array(1f, 2f, 3f, 4f), shape = Shape(2, 2))
assert(NDArray.sum(arr).toScalar === 10f +- 1e-3f)
}
test("argmaxChannel") {
val arr = NDArray.array(Array(1f, 2f, 4f, 3f), shape = Shape(2, 2))
val argmax = NDArray.argmax_channel(arr)
assert(argmax.shape === Shape(2))
assert(argmax.toArray === Array(1f, 0f))
}
test("concatenate axis-0") {
val arr1 = NDArray.array(Array(1f, 2f, 4f, 3f, 3f, 3f), shape = Shape(2, 3))
val arr2 = NDArray.array(Array(8f, 7f, 6f), shape = Shape(1, 3))
val arr = NDArray.concatenate(arr1, arr2)
assert(arr.shape === Shape(3, 3))
assert(arr.toArray === Array(1f, 2f, 4f, 3f, 3f, 3f, 8f, 7f, 6f))
}
test("concatenate axis-1") {
val arr1 = NDArray.array(Array(1f, 2f, 3f, 4f), shape = Shape(2, 2))
val arr2 = NDArray.array(Array(5f, 6f), shape = Shape(2, 1))
val arr = NDArray.concatenate(Array(arr1, arr2), axis = 1)
assert(arr.shape === Shape(2, 3))
assert(arr.toArray === Array(1f, 2f, 5f, 3f, 4f, 6f))
}
test("transpose") {
val arr = NDArray.array(Array(1f, 2f, 4f, 3f, 3f, 3f), shape = Shape(2, 3))
assert(arr.toArray === Array(1f, 2f, 4f, 3f, 3f, 3f))
assert(arr.T.shape === Shape(3, 2))
assert(arr.T.toArray === Array(1f, 3f, 2f, 3f, 4f, 3f))
}
test("save and load with names") {
val filename
= s"${System.getProperty("java.io.tmpdir")}/ndarray-${sequence.getAndIncrement}.bin"
try {
val ndarray = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1))
NDArray.save(filename, Map("local" -> ndarray))
val (keys, arrays) = NDArray.load(filename)
assert(keys.length === 1)
assert(keys(0) === "local")
assert(arrays.length === 1)
val loadedArray = arrays(0)
assert(loadedArray.shape === Shape(3, 1))
assert(loadedArray.toArray === Array(1f, 2f, 3f))
} finally {
val file = new File(filename)
file.delete()
}
}
test("save and load without names") {
val filename
= s"${System.getProperty("java.io.tmpdir")}/ndarray-${sequence.getAndIncrement}.bin"
try {
val ndarray = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1))
NDArray.save(filename, Array(ndarray))
val (keys, arrays) = NDArray.load(filename)
assert(keys.length === 0)
assert(arrays.length === 1)
val loadedArray = arrays(0)
assert(loadedArray.shape === Shape(3, 1))
assert(loadedArray.toArray === Array(1f, 2f, 3f))
} finally {
val file = new File(filename)
file.delete()
}
}
test("get context") {
val ndarray = NDArray.ones(3, 2)
val ctx = ndarray.context
assert(ctx.deviceType === "cpu")
assert(ctx.deviceId === 0)
}
test("equals") {
val ndarray1 = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1))
val ndarray2 = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1))
val ndarray3 = NDArray.array(Array(1f, 2f, 3f), shape = Shape(1, 3))
val ndarray4 = NDArray.array(Array(3f, 2f, 3f), shape = Shape(3, 1))
ndarray1 shouldEqual ndarray2
ndarray1 shouldNot equal(ndarray3)
ndarray1 shouldNot equal(ndarray4)
}
test("slice") {
val arr = NDArray.array(Array(1f, 2f, 3f, 4f, 5f, 6f), shape = Shape(3, 2))
val arr1 = arr.slice(1)
assert(arr1.shape === Shape(1, 2))
assert(arr1.toArray === Array(3f, 4f))
val arr2 = arr.slice(1, 3)
assert(arr2.shape === Shape(2, 2))
assert(arr2.toArray === Array(3f, 4f, 5f, 6f))
}
test("at") {
val arr = NDArray.array(Array(1f, 2f, 3f, 4f, 5f, 6f), shape = Shape(3, 2))
val arr1 = arr.at(1)
assert(arr1.shape === Shape(2))
assert(arr1.toArray === Array(3f, 4f))
}
test("reshape") {
val arr = NDArray.array(Array(1f, 2f, 3f, 4f, 5f, 6f), shape = Shape(3, 2))
val arr1 = arr.reshape(Array(2, 3))
assert(arr1.shape === Shape(2, 3))
assert(arr1.toArray === Array(1f, 2f, 3f, 4f, 5f, 6f))
arr.set(1f)
assert(arr1.toArray === Array(1f, 1f, 1f, 1f, 1f, 1f))
}
test("dispose deps") {
val arr1 = NDArray.ones(1, 2)
val arr2 = NDArray.ones(1, 2)
val arr3 = NDArray.ones(1, 2)
val arrWithDeps = (arr1 + arr2) + arr3
assert(arrWithDeps.dependencies.size === 4) // arr1 + arr2
assert(arrWithDeps.dependencies.contains(arr1.handle))
assert(arrWithDeps.dependencies.contains(arr2.handle))
assert(arrWithDeps.dependencies.contains(arr3.handle))
assert(!arr1.isDisposed)
assert(!arr2.isDisposed)
assert(!arr3.isDisposed)
val arrNoDeps = (arr1 + arr2 + arr3).disposeDeps()
assert(arrNoDeps.dependencies.isEmpty)
assert(arr1.isDisposed)
assert(arr2.isDisposed)
assert(arr3.isDisposed)
}
test("dispose deps except") {
val arr1 = NDArray.ones(1, 2)
val arr2 = NDArray.ones(1, 2)
val arr3 = NDArray.ones(1, 2)
val arr1_2 = arr1 + arr2
val arr = (arr1 + arr2 + arr1_2 + arr3).disposeDepsExcept(arr1_2)
// since arr1_2 depends on arr1 & arr2
// arr1 & arr2 will not be disposed either
assert(arr.dependencies.size === 3)
assert(arr.dependencies.contains(arr1.handle))
assert(arr.dependencies.contains(arr2.handle))
assert(arr.dependencies.contains(arr1_2.handle))
assert(!arr1.isDisposed)
assert(!arr2.isDisposed)
assert(!arr1_2.isDisposed)
assert(arr3.isDisposed)
}
test("serialize and deserialize") {
val arr = NDArray.ones(1, 2) * 3
val bytes = arr.serialize()
val arrCopy = NDArray.deserialize(bytes)
assert(arr === arrCopy)
}
test("dtype int32") {
val arr = NDArray.ones(Shape(1, 2), dtype = DType.Int32) * 2
assert(arr.dtype === DType.Int32)
assert(arr.internal.getRaw.length === 8)
assert(arr.internal.toFloatArray === Array(2f, 2f))
assert(arr.internal.toIntArray === Array(2, 2))
assert(arr.internal.toDoubleArray === Array(2d, 2d))
assert(arr.internal.toByteArray === Array(2.toByte, 2.toByte))
}
test("dtype uint8") {
val arr = NDArray.ones(Shape(1, 2), dtype = DType.UInt8) * 2
assert(arr.dtype === DType.UInt8)
assert(arr.internal.getRaw.length === 2)
assert(arr.internal.toFloatArray === Array(2f, 2f))
assert(arr.internal.toIntArray === Array(2, 2))
assert(arr.internal.toDoubleArray === Array(2d, 2d))
assert(arr.internal.toByteArray === Array(2.toByte, 2.toByte))
}
test("dtype float64") {
val arr = NDArray.ones(Shape(1, 2), dtype = DType.Float64) * 2
assert(arr.dtype === DType.Float64)
assert(arr.internal.getRaw.length === 16)
assert(arr.internal.toFloatArray === Array(2f, 2f))
assert(arr.internal.toIntArray === Array(2, 2))
assert(arr.internal.toDoubleArray === Array(2d, 2d))
assert(arr.internal.toByteArray === Array(2.toByte, 2.toByte))
}
}
| rishita/mxnet | scala-package/core/src/test/scala/ml/dmlc/mxnet/NDArraySuite.scala | Scala | apache-2.0 | 15,252 |
package com.twitter.finagle.httpx
import com.twitter.finagle.httpx.netty.HttpMessageProxy
import org.jboss.netty.handler.codec.http.HttpMessage
import scala.collection.mutable
import scala.collection.JavaConverters._
/**
* Mutable message headers map.
*
* Header names are case-insensitive. For example, get("accept") is the same as
* get("Accept").
*
* The map is a multi-map. Use getAll() to get all values for a key. Use add()
* to append a key-value.
*/
abstract class HeaderMap
extends mutable.Map[String, String]
with mutable.MapLike[String, String, HeaderMap] {
def getAll(key: String): Iterable[String]
/** Add a header but don't replace existing header(s). */
def add(k: String, v: String)
override def empty: HeaderMap = new MapHeaderMap(mutable.Map.empty)
}
/** Mutable-Map-backed HeaderMap */
class MapHeaderMap(underlying: mutable.Map[String, Seq[String]]) extends HeaderMap {
def getAll(key: String): Iterable[String] =
underlying.getOrElse(key, Nil)
def add(k: String, v: String) = {
underlying(k) = underlying.getOrElse(k, Nil) :+ v
this
}
// For Map/MapLike
def get(key: String): Option[String] = {
underlying.find { case (k, v) => k.equalsIgnoreCase(key) } flatMap { _._2.headOption }
}
// For Map/MapLike
def iterator: Iterator[(String, String)] = {
for ((k, vs) <- underlying.iterator; v <- vs) yield
(k, v)
}
// For Map/MapLike
def += (kv: (String, String)) = {
underlying(kv._1) = Seq(kv._2)
this
}
// For Map/MapLike
def -= (key: String) = {
underlying.retain { case (a, b) => !a.equalsIgnoreCase(key) }
this
}
override def keys: Iterable[String] =
underlying.keys
override def keySet: Set[String] =
underlying.keySet.toSet
override def keysIterator: Iterator[String] =
underlying.keysIterator
}
object MapHeaderMap {
def apply(headers: Tuple2[String, String]*): MapHeaderMap = {
val map = headers
.groupBy { case (k, v) => k.toLowerCase }
.mapValues { case values => values.map { _._2 } } // remove keys
new MapHeaderMap(mutable.Map() ++ map)
}
}
/**
* Mutable HttpMessage-backed HeaderMap.
*/
private[finagle] class MessageHeaderMap(httpMessage: HttpMessageProxy) extends HeaderMap {
def get(key: String): Option[String] =
Option(httpMessage.headers.get(key))
def iterator: Iterator[(String, String)] =
httpMessage.headers.iterator.asScala map { entry =>
(entry.getKey, entry.getValue)
}
override def keys: Iterable[String] =
httpMessage.headers.names.asScala
override def keySet: Set[String] =
keys.toSet
override def keysIterator: Iterator[String] =
keySet.iterator
override def contains(key: String): Boolean =
httpMessage.headers.contains(key)
def += (kv: (String, String)) = {
httpMessage.headers.set(kv._1, kv._2)
this
}
def -= (key: String) = {
httpMessage.headers.remove(key)
this
}
def getAll(key: String): Iterable[String] =
httpMessage.headers.getAll(key).asScala
def add(k: String, v: String) = {
httpMessage.headers.add(k, v)
this
}
}
object HeaderMap {
/** Create HeaderMap from header list. Convenience method for testing. */
def apply(headers: Tuple2[String, String]*): HeaderMap =
MapHeaderMap(headers: _*)
}
| kristofa/finagle | finagle-httpx/src/main/scala/com/twitter/finagle/httpx/HeaderMap.scala | Scala | apache-2.0 | 3,320 |
package de.juergens.time
import java.time.{LocalDate, Month, Year}
import de.juergens.util.{Ordinal, Up}
import org.junit.Assert._
import org.junit.Test
class MonthAdjusterTest {
@Test
@throws[Exception]
def adjustInto: Unit = {
val adjuster = MonthAdjuster(Ordinal(1),Month.JULY,Up)
val expected = LocalDate.parse("1776-07-31")
val actual = adjuster.adjustInto(Year.of(1775).atMonth(Month.DECEMBER).atDay(4))
assertEquals( expected, actual )
}
} | hjuergens/date-parser | date-rule-combinators/src/test/scala/de/juergens/time/MonthAdjusterTest.scala | Scala | apache-2.0 | 474 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr
import org.orbeon.scaxon.XML._
import org.orbeon.saxon.om.NodeInfo
import XMLNames._
trait FormRunnerContainerOps extends FormRunnerControlOps {
def isFBBody(node: NodeInfo) = (node self XFGroupTest) && node.attClasses("fb-body")
val RepeatContentToken = "content"
val LegacyRepeatContentToken = "true"
// Predicates
val IsGrid: NodeInfo ⇒ Boolean = _ self FRGridTest
val IsSection: NodeInfo ⇒ Boolean = _ self FRSectionTest
def isRepeatable(node: NodeInfo) =
IsGrid(node) || IsSection(node)
def isContentRepeat(node: NodeInfo) =
isRepeatable(node) && node.attValue("repeat") == RepeatContentToken
def isLegacyRepeat(node: NodeInfo) =
! isContentRepeat(node) &&
isRepeatable(node) && (
node.attValue("repeat") == LegacyRepeatContentToken ||
node.att("minOccurs").nonEmpty ||
node.att("maxOccurs").nonEmpty ||
node.att("min").nonEmpty ||
node.att("max").nonEmpty
)
def isRepeat(node: NodeInfo) =
isContentRepeat(node) || isLegacyRepeat(node)
val IsContainer: NodeInfo ⇒ Boolean =
node ⇒ (node self FRContainerTest) || isFBBody(node)
def controlRequiresNestedIterationElement(node: NodeInfo) =
isRepeat(node)
// Namespace URL a section template component must match
private val ComponentURI = """^http://orbeon.org/oxf/xml/form-builder/component/([^/]+)/([^/]+)$""".r
val IsSectionTemplateContent: NodeInfo ⇒ Boolean =
container ⇒ (container parent * exists IsSection) && ComponentURI.findFirstIn(container.namespaceURI).nonEmpty
// XForms callers: get the name for a section or grid element or null (the empty sequence)
def getContainerNameOrEmpty(elem: NodeInfo) = getControlNameOpt(elem).orNull
// Find ancestor sections and grids (including non-repeated grids) from leaf to root
def findAncestorContainers(descendant: NodeInfo, includeSelf: Boolean = false) =
(if (includeSelf) descendant ancestorOrSelf * else descendant ancestor *) filter IsContainer
// Find ancestor section and grid names from root to leaf
// Don't return non-repeated fr:grid until an enclosing element is needed. See:
// - https://github.com/orbeon/orbeon-forms/issues/2173
// - https://github.com/orbeon/orbeon-forms/issues/1947
def findContainerNamesForModel(descendant: NodeInfo, includeSelf: Boolean = false): Seq[String] = {
val namesWithContainers =
for {
container ← findAncestorContainers(descendant, includeSelf)
name ← getControlNameOpt(container)
if ! (IsGrid(container) && ! isRepeat(container))
} yield
name → container
// Repeated sections add an intermediary iteration element
val namesFromLeaf =
namesWithContainers flatMap {
case (name, container) ⇒
findRepeatIterationName(descendant, name).toList ::: name :: Nil
}
namesFromLeaf.reverse
}
// A container's children containers
def childrenContainers(container: NodeInfo) =
container \\ * filter IsContainer
// A container's children grids (including repeated grids)
def childrenGrids(container: NodeInfo) =
container \\ * filter IsGrid
// Find all ancestor repeats from leaf to root
def findAncestorRepeats(descendantOrSelf: NodeInfo, includeSelf: Boolean = false) =
findAncestorContainers(descendantOrSelf, includeSelf) filter isRepeat
def findAncestorRepeatNames(descendantOrSelf: NodeInfo, includeSelf: Boolean = false) =
findAncestorRepeats(descendantOrSelf, includeSelf) flatMap getControlNameOpt
// Find all ancestor sections from leaf to root
def findAncestorSections(descendantOrSelf: NodeInfo, includeSelf: Boolean = false) =
findAncestorContainers(descendantOrSelf, includeSelf) filter IsSection
//@XPathFunction
def findRepeatIterationNameOrEmpty(inDoc: NodeInfo, controlName: String) =
findRepeatIterationName(inDoc, controlName) getOrElse ""
def findRepeatIterationName(inDoc: NodeInfo, controlName: String): Option[String] =
for {
control ← findControlByName(inDoc, controlName)
if controlRequiresNestedIterationElement(control)
bind ← findBindByName(inDoc, controlName)
iterationBind ← bind / XFBindTest headOption // there should be only a single nested bind
} yield
getBindNameOrEmpty(iterationBind)
}
| wesley1001/orbeon-forms | src/main/scala/org/orbeon/oxf/fr/FormRunnerContainerOps.scala | Scala | lgpl-2.1 | 5,084 |
package org.skycastle.util.mesh
import com.jme3.math.{Vector2f, Vector3f}
/**
* Shape for one segment separator on a lathe.
*
* Theta is the angle around the trunk in radians.
*/
trait Segment {
def pos(theta: Float): Vector3f
def texturePos(theta: Float): Vector2f
def centerPos: Vector3f
def centerTexturePos: Vector2f
} | zzorn/skycastle | src/main/scala/org/skycastle/util/mesh/Segment.scala | Scala | gpl-2.0 | 339 |
/*
*
*/
package see.values
/* A generic number. */
private[see] abstract class Number extends Comparable {
override def selType = 'Number
override def isType(typeId: Symbol) = (typeId == 'Number) || super.isType(typeId)
def negate: Number
def abs: Number
protected def add_(rhs: Number): Number
protected def sub_(rhs: Number): Number
protected def mul_(rhs: Number): Number
protected def div_(rhs: Number): Number
protected def mod_(rhs: Number): Number
protected def pwr_(rhs: Number): Number
// Ensures, operation will be performed upon richest necessary type.
// We could probably use some intrinsics mechanism instead, but
// first I'm not fond of that mechanism at all, and second it
// still would not allow e.g n1 + n2 with n1: Number, n2: Number
private final def np(n: Number) = propagate(n).asInstanceOf[Number]
final def +(rhs: Number) = np(rhs).add_(rhs)
final def -(rhs: Number) = np(rhs).sub_(rhs)
final def *(rhs: Number) = np(rhs).mul_(rhs)
final def /(rhs: Number) = np(rhs).div_(rhs)
final def %(rhs: Number) = np(rhs).mod_(rhs)
final def **(rhs: Number) = np(rhs).pwr_(rhs)
// for convenience:
def toInt = toLong.toInt
}
private[see] object Number {
implicit object ord extends Ordering[Number] {
def compare(l: Number, r: Number): Int = Comparable(l, r)
}
}
/* Defines bitwise operations and shifts */
private[see] abstract class IntLike extends Number {
override def selType = 'Integral
override def isType(typeId: Symbol) =
(typeId == 'Integral) || super.isType(typeId)
def ~ : IntLike
protected def and_(rhs: IntLike): IntLike
protected def or_(rhs: IntLike): IntLike
protected def xor_(rhs: IntLike): IntLike
protected def lsh_(rhs: IntLike): IntLike
protected def rsh_(rhs: IntLike): IntLike
protected def gcd_(rhs: IntLike): IntLike
private final def ip(n: Number) = propagate(n).asInstanceOf[IntLike]
final def &(rhs: IntLike) = ip(rhs).and_(rhs)
final def |(rhs: IntLike) = ip(rhs).or_(rhs)
final def ^(rhs: IntLike) = ip(rhs).xor_(rhs)
final def <<(rhs: IntLike) = ip(rhs).lsh_(rhs)
final def >>(rhs: IntLike) = ip(rhs).rsh_(rhs)
final def gcd(rhs: IntLike) = ip(rhs).gcd_(rhs)
}
| acruise/see | src/main/scala/see/values/Numbers.scala | Scala | bsd-3-clause | 2,242 |
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.step.results
class NoResultsException( val message : String ) extends Exception( message )
| tylerroyal/api-checker | core/src/main/scala/com/rackspace/com/papi/components/checker/step/results/NoResultsException.scala | Scala | apache-2.0 | 766 |
package beam.agentsim.agents.ridehail.allocation
import beam.agentsim.agents.ridehail.RideHailManager
import beam.utils.{PointToPlot, SpatialPlot}
import java.awt.Color
/**
* BEAM
*/
class DebugRepositioning {}
object DebugRepositioning {
def produceRepositioningDebugImages(tick: Int, rideHailManager: RideHailManager) = {
if (tick > 0 && tick.toInt % 3600 == 0 && tick < 24 * 3600) {
val spatialPlot = new SpatialPlot(1100, 1100, 50)
for (veh <- rideHailManager.resources.values) {
spatialPlot.addPoint(
PointToPlot(
rideHailManager.vehicleManager.getRideHailAgentLocation(veh.id).currentLocationUTM.loc,
Color.BLACK,
5
)
)
}
rideHailManager.tncIterationStats.foreach(tncIterationStats => {
val tazEntries = tncIterationStats getCoordinatesWithRideHailStatsEntry (tick, tick + 3600)
for (tazEntry <- tazEntries.filter(x => x._2.sumOfRequestedRides > 0)) {
spatialPlot.addPoint(
PointToPlot(
tazEntry._1,
Color.RED,
10 + Math.log(tazEntry._2.sumOfRequestedRides).toInt
)
)
}
})
val iteration = "it." + rideHailManager.beamServices.matsimServices.getIterationNumber
spatialPlot.writeImage(
rideHailManager.beamServices.matsimServices.getControlerIO
.getIterationFilename(
rideHailManager.beamServices.matsimServices.getIterationNumber,
tick.toInt / 3600 + "locationOfAgentsInitally.png"
)
.replace(iteration, iteration + "/rideHailDebugging")
)
}
}
}
| colinsheppard/beam | src/main/scala/beam/agentsim/agents/ridehail/allocation/DebugRepositioning.scala | Scala | gpl-3.0 | 1,668 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.runtime
import org.apache.flink.api.common.ExecutionConfig
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.common.typeutils.TypeSerializer
import org.apache.flink.api.java.typeutils.runtime.kryo.KryoSerializer
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.typeutils.TraversableSerializer
import org.junit.Assert._
import org.junit.{Assert, Test}
import scala.collection.immutable.{BitSet, LinearSeq}
import scala.collection.mutable
import scala.ref.WeakReference
import scala.reflect.internal.util.ScalaClassLoader.URLClassLoader
class TraversableSerializerTest {
// Note: SortedMap and SortedSet are serialized with Kryo
@Test
def testSeq(): Unit = {
val testData = Array(Seq(1,2,3), Seq(2,3))
runTests(testData)
}
@Test
def testIndexedSeq(): Unit = {
val testData = Array(IndexedSeq(1,2,3), IndexedSeq(2,3))
runTests(testData)
}
@Test
def testLinearSeq(): Unit = {
val testData = Array(LinearSeq(1,2,3), LinearSeq(2,3))
runTests(testData)
}
@Test
def testMap(): Unit = {
val testData = Array(Map("Hello" -> 1, "World" -> 2), Map("Foo" -> 42))
runTests(testData)
}
@Test
def testSet(): Unit = {
val testData = Array(Set(1,2,3,3), Set(2,3))
runTests(testData)
}
@Test
def testBitSet(): Unit = {
val testData = Array(BitSet(1,2,3,4), BitSet(2,3,2))
runTests(testData)
}
@Test
def testMutableList(): Unit = {
val testData = Array(mutable.MutableList(1,2,3), mutable.MutableList(2,3,2))
runTests(testData)
}
@Test
def testWithCaseClass(): Unit = {
val testData = Array(Seq((1, "String"), (2, "Foo")), Seq((4, "String"), (3, "Foo")))
runTests(testData)
}
@Test
def testWithPojo(): Unit = {
val testData = Array(Seq(new Pojo("hey", 1)), Seq(new Pojo("Ciao", 2), new Pojo("Foo", 3)))
runTests(testData)
}
@Test
def testWithMixedPrimitives(): Unit = {
// Does not work yet because the GenericTypeInfo used for the elements will
// have a typeClass of Object, and therefore not deserialize the elements correctly.
// It does work when used in a Job, though. Because the Objects get cast to
// the correct type in the user function.
val testData = Array(Seq(1, 1L, 1d, true, "Hello"), Seq(2, 2L, 2d, false, "Ciao"))
runTests(testData)
}
@Test
def sameClassLoaderAndCodeShouldProvideEqualKeys(): Unit = {
val classLoaderA = new URLClassLoader(Seq.empty[java.net.URL], null)
val keyA = TraversableSerializer.Key(classLoaderA, "code")
val keyB = TraversableSerializer.Key(classLoaderA, "code")
assertEquals(keyA, keyB)
}
@Test
def differentClassLoadersProvideNonEqualKeys(): Unit = {
val classLoaderA = new URLClassLoader(Seq.empty[java.net.URL], null)
val classLoaderB = new URLClassLoader(Seq.empty[java.net.URL], null)
val keyA = TraversableSerializer.Key(classLoaderA, "code")
val keyB = TraversableSerializer.Key(classLoaderB, "code")
assertNotEquals(keyA, keyB)
}
@Test
def expiredReferenceShouldProduceNonEqualKeys(): Unit = {
val classLoaderA = new URLClassLoader(Seq.empty[java.net.URL], null)
val keyA = TraversableSerializer.Key(classLoaderA, "code")
val keyB = keyA.copy(classLoaderRef = WeakReference(null))
assertNotEquals(keyA, keyB)
}
@Test
def bootStrapClassLoaderShouldProduceTheSameKeys(): Unit = {
val keyA = TraversableSerializer.Key(null, "a")
val keyB = TraversableSerializer.Key(null, "a")
assertEquals(keyA, keyB)
}
@Test
def differentCanBuildFromCodeShouldProduceDifferentKeys(): Unit = {
val classLoaderA = new URLClassLoader(Seq.empty[java.net.URL], null)
val keyA = TraversableSerializer.Key(classLoaderA, "a")
val keyB = TraversableSerializer.Key(classLoaderA, "b")
assertNotEquals(keyA, keyB)
}
private final def runTests[T : TypeInformation](instances: Array[T]) {
try {
val typeInfo = implicitly[TypeInformation[T]]
val serializer = typeInfo.createSerializer(new ExecutionConfig)
val typeClass = typeInfo.getTypeClass
val test = new TraversableSerializerTestInstance[T](serializer, typeClass, -1, instances)
test.testAll()
} catch {
case e: Exception =>
System.err.println(e.getMessage)
e.printStackTrace()
Assert.fail(e.getMessage)
}
}
}
class Pojo(var name: String, var count: Int) {
def this() = this("", -1)
override def equals(other: Any): Boolean = {
other match {
case oP: Pojo => name == oP.name && count == oP.count
case _ => false
}
}
}
class TraversableSerializerTestInstance[T](
serializer: TypeSerializer[T],
typeClass: Class[T],
length: Int,
testData: Array[T])
extends ScalaSpecialTypesSerializerTestInstance[T](serializer, typeClass, length, testData) {
@Test
override def testAll(): Unit = {
super.testAll()
testTraversableDeepCopy()
}
@Test
def testTraversableDeepCopy(): Unit = {
val serializer = getSerializer
val elementSerializer = serializer.asInstanceOf[TraversableSerializer[_, _]].elementSerializer
val data = getTestData
// check for deep copy if type is immutable and not serialized with Kryo
// elements of traversable should not have reference equality
if (!elementSerializer.isImmutableType && !elementSerializer.isInstanceOf[KryoSerializer[_]]) {
data.foreach { datum =>
val original = datum.asInstanceOf[Traversable[_]].toIterable
val copy = serializer.copy(datum).asInstanceOf[Traversable[_]].toIterable
copy.zip(original).foreach { case (c: AnyRef, o: AnyRef) =>
assertTrue("Copy of mutable element has reference equality.", c ne o)
case _ => // ok
}
}
}
}
@Test
override def testInstantiate(): Unit = {
try {
val serializer: TypeSerializer[T] = getSerializer
val instance: T = serializer.createInstance
assertNotNull("The created instance must not be null.", instance)
val tpe: Class[T] = getTypeClass
assertNotNull("The test is corrupt: type class is null.", tpe)
// We cannot check this because Collection Instances are not always of the type
// that the user writes, they might have generated names.
// assertEquals("Type of the instantiated object is wrong.", tpe, instance.getClass)
}
catch {
case e: Exception =>
System.err.println(e.getMessage)
e.printStackTrace()
fail("Exception in test: " + e.getMessage)
}
}
}
| hequn8128/flink | flink-scala/src/test/scala/org/apache/flink/api/scala/runtime/TraversableSerializerTest.scala | Scala | apache-2.0 | 7,430 |
import doodle.core.Image._
import doodle.syntax._
import doodle.jvm.Java2DCanvas._
import doodle.backend.StandardInterpreter._
import doodle.core.{Color, Image}
object Chapter6 extends App{
def square(x:Int):Int = {
x * x
}
def halve(x:Double): Double = x/2
def sumThreeNumbers (a: Int, b:Int, c:Int) : Int = {
println("inside method - calculation is done here")
println (a+b+c)
(a+b+c)
}
sumThreeNumbers({println("a");2},{println("b");2},{println("c");2})
val box = rectangle(20,20).lineWidth(5).lineColor(Color.red)fillColor(Color.red)
def stackingBoxes (noOfBoxes:Int): Image= noOfBoxes match {
case 0 => Image.empty
case noOfBoxes => box beside stackingBoxes(noOfBoxes-1)
}
stackingBoxes(5).draw
}
| hbollini1/creative-scala-template | src/main/scala/Chapter6.scala | Scala | apache-2.0 | 757 |
package pureconfig.module.cats
import cats.instances.either._
import cats.instances.int._
import cats.instances.tuple._
import cats.instances.unit._
import cats.kernel.laws.discipline.{MonoidTests, SemigroupTests}
import cats.laws.discipline._
import com.typesafe.config.{Config, ConfigValue}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import org.typelevel.discipline.scalatest.FunSuiteDiscipline
import pureconfig._
import pureconfig.error.ConfigReaderFailures
import pureconfig.module.cats.arbitrary._
import pureconfig.module.cats.eq._
import pureconfig.module.cats.instances._
class CatsLawsSuite extends AnyFunSuite with ScalaCheckDrivenPropertyChecks with FunSuiteDiscipline {
checkAll(
"ConfigReader[Int]",
ApplicativeErrorTests[ConfigReader, ConfigReaderFailures].applicativeError[Int, Int, Int]
)
checkAll("ConfigWriter[Int]", ContravariantSemigroupalTests[ConfigWriter].contravariantSemigroupal[Int, Int, Int])
checkAll("ConfigConvert[Int]", InvariantSemigroupalTests[ConfigConvert].invariantSemigroupal[Int, Int, Int])
checkAll("ConfigValue", SemigroupTests[ConfigValue].semigroup)
checkAll("Config", MonoidTests[Config].monoid)
checkAll("ConfigReaderFailures", SemigroupTests[ConfigReaderFailures].semigroup)
checkAll("ConfigObjectSource", MonoidTests[ConfigObjectSource].monoid)
}
| pureconfig/pureconfig | modules/cats/src/test/scala/pureconfig/module/cats/CatsLawsSuite.scala | Scala | mpl-2.0 | 1,388 |
package example
import scala.language/*=>scala.language.*/.implicitConversions/*=>scala.language.implicitConversions.*/
class ImplicitConversion/*<=example.ImplicitConversion#*/ {
implicit def string2Number/*<=example.ImplicitConversion#string2Number().*/(
string/*<=example.ImplicitConversion#string2Number().(string)*/: String/*=>scala.Predef.String#*/
): Int/*=>scala.Int#*/ = 42
val message/*<=example.ImplicitConversion#message.*/ = ""
val number/*<=example.ImplicitConversion#number.*/ = 42
val tuple/*<=example.ImplicitConversion#tuple.*/ = (1, 2)
val char/*<=example.ImplicitConversion#char.*/: Char/*=>scala.Char#*/ = 'a'
// extension methods
message/*=>example.ImplicitConversion#message.*/
.stripSuffix/*=>scala.collection.immutable.StringLike#stripSuffix().*/("h")
tuple/*=>example.ImplicitConversion#tuple.*/ +/*=>scala.Predef.any2stringadd#`+`().*/ "Hello"
// implicit conversions
val x/*<=example.ImplicitConversion#x.*/: Int/*=>scala.Int#*/ = message/*=>example.ImplicitConversion#message.*/
// interpolators
s/*=>scala.StringContext#s().*/"Hello $message/*=>example.ImplicitConversion#message.*/ $number/*=>example.ImplicitConversion#number.*/"
s/*=>scala.StringContext#s().*/"""Hello
|$message/*=>example.ImplicitConversion#message.*/
|$number/*=>example.ImplicitConversion#number.*/""".stripMargin/*=>scala.collection.immutable.StringLike#stripMargin(+1).*/
val a/*<=example.ImplicitConversion#a.*/: Int/*=>scala.Int#*/ = char/*=>scala.Char#toInt().*/
val b/*<=example.ImplicitConversion#b.*/: Long/*=>scala.Long#*/ = char/*=>scala.Char#toLong().*/
}
| scalameta/scalameta | tests/jvm/src/test/resources/example/ImplicitConversion.scala | Scala | bsd-3-clause | 1,628 |
package japgolly.microlibs.nonempty
import cats.instances.vector.{catsKernelStdEqForVector, catsKernelStdOrderForVector}
import cats.{NonEmptyTraverse, _}
import japgolly.univeq.UnivEq
import scala.annotation.nowarn
import scala.collection.immutable.Range
import scala.collection.{AbstractIterator, Factory}
import scala.math.Ordering
final class NonEmptyVector[+A](val head: A, val tail: Vector[A]) {
override def toString = "NonEmpty" + whole.toString
override def hashCode = head.## * 31 + tail.##
override def equals(o: Any) = o match {
case that: NonEmptyVector[Any] => this.head == that.head && this.tail == that.tail
case _ => false
}
def length: Int =
tail.length + 1
def unsafeApply(i: Int): A =
if (i == 0)
head
else
tail(i - 1)
def apply(i: Int): Option[A] =
try {
Some(unsafeApply(i))
} catch {
case _: IndexOutOfBoundsException => None
}
def init: Vector[A] =
if (tail.isEmpty)
Vector.empty
else
head +: tail.init
def initNonEmpty: Option[NonEmptyVector[A]] = NonEmptyVector option init
def tailNonEmpty: Option[NonEmptyVector[A]] = NonEmptyVector option tail
def map[B](f: A => B): NonEmptyVector[B] =
NonEmptyVector(f(head), tail map f)
def flatMap[B](f: A => NonEmptyVector[B]): NonEmptyVector[B] =
reduceMapLeft1(f)(_ ++ _)
def foreach[U](f: A => U): Unit = {
f(head)
tail foreach f
}
def foreachWithIndex[U](f: (A, Int) => U): Unit = {
f(head, 0)
var i = 0
for (a <- tail) {
i += 1
f(a, i)
}
}
def indices: Range =
0 until length
def forall(f: A => Boolean): Boolean =
f(head) && tail.forall(f)
def exists(f: A => Boolean): Boolean =
f(head) || tail.exists(f)
def find(f: A => Boolean): Option[A] =
if (f(head)) Some(head) else tail.find(f)
def mapTail[B >: A](f: Vector[A] => Vector[B]): NonEmptyVector[B] =
NonEmptyVector(head, f(tail))
def mapWithIndex[B](f: (A, Int) => B): NonEmptyVector[B] = {
val h = f(head, 0)
var i = 0
var t = Vector.empty[B]
for (a <- tail) {
i += 1
t :+= f(a, i)
}
NonEmptyVector(h, t)
}
def :+[B >: A](a: B): NonEmptyVector[B] =
mapTail(_ :+ a)
def +:[B >: A](a: B): NonEmptyVector[B] =
NonEmptyVector(a, head +: tail)
def ++[B >: A](as: IterableOnce[B]): NonEmptyVector[B] =
mapTail(_ ++ as)
def ++[B >: A](b: NonEmptyVector[B]): NonEmptyVector[B] =
++(b.whole)
def ++:[B >: A](as: Vector[B]): NonEmptyVector[B] =
if (as.isEmpty) this else NonEmptyVector(as.head, as.tail ++ whole)
def last: A =
if (tail.isEmpty) head else tail.last
def whole: Vector[A] =
head +: tail
def reverse: NonEmptyVector[A] =
if (tail.isEmpty) this else NonEmptyVector.end(tail.reverse, head)
def foldLeft[B](z: B)(f: (B, A) => B): B =
tail.foldLeft(f(z, head))(f)
def foldMapLeft1[B](g: A => B)(f: (B, A) => B): B =
tail.foldLeft(g(head))(f)
def reduceMapLeft1[B](f: A => B)(g: (B, B) => B): B =
foldMapLeft1(f)((b, a) => g(b, f(a)))
def reduce[B >: A](f: (B, B) => B): B =
reduceMapLeft1[B](a => a)(f)
// Reduce bullshit red in IntelliJ
// def traverseD[L, B](f: A => L \\/ B): L \\/ NonEmptyVector[B] =
// NonEmptyVector.traverse1.traverseU(this)(f)
def intercalate[B >: A](b: B): NonEmptyVector[B] =
intercalateF(b)(a => a)
def intercalateF[B](b: B)(f: A => B): NonEmptyVector[B] = {
val r = implicitly[Factory[B, Vector[B]]].newBuilder
for (a <- tail) {
r += b
r += f(a)
}
NonEmptyVector(f(head), r.result())
}
def filter(f: A => Boolean): Option[NonEmptyVector[A]] =
NonEmptyVector.option(whole filter f)
def filterNot(f: A => Boolean): Option[NonEmptyVector[A]] =
filter(!f(_))
def iterator: Iterator[A] =
whole.iterator
def mapToNES[B: UnivEq](f: A => B): NonEmptySet[B] =
NonEmptySet force iterator.map(f).toSet
def toNES[B >: A : UnivEq]: NonEmptySet[B] =
NonEmptySet(head, tail.toSet[B])
private def safeTrans[B](f: Vector[A] => Vector[B]): NonEmptyVector[B] =
NonEmptyVector force f(whole)
def sorted[B >: A](implicit ord: Ordering[B]) = safeTrans(_.sorted[B])
def sortBy[B](f: A => B)(implicit ord: Ordering[B]) = safeTrans(_ sortBy f)
def sortWith(lt: (A, A) => Boolean) = safeTrans(_ sortWith lt)
def partitionD[B, C](f: A => Either[B, C]): Either[(NonEmptyVector[B], Vector[C]), (Vector[B], NonEmptyVector[C])] = {
var bs = Vector.empty[B]
var cs = Vector.empty[C]
for (a <- tail)
f(a) match {
case Left(b) => bs :+= b
case Right(c) => cs :+= c
}
f(head) match {
case Left(b) => Left((NonEmptyVector(b, bs), cs))
case Right(c) => Right((bs, NonEmptyVector(c, cs)))
}
}
def partitionB(f: A => Boolean): (NonEmptyVector[A], Vector[A]) = {
var ts = Vector.empty[A]
var fs = Vector.empty[A]
for (a <- tail)
if (f(a))
ts :+= a
else
fs :+= a
if (ts.nonEmpty)
(NonEmptyVector force ts, fs)
else
(NonEmptyVector force fs, ts)
}
/**
* Peels away elements from the end until there are no elements left.
*
* Example:
*
* NonEmptyVector(2,4,6,8) will yield
*
* NonEmptyVector(2,4,6,8)
* NonEmptyVector(2,4,6)
* NonEmptyVector(2,4)
* NonEmptyVector(2)
*/
def peelFromEnd: Iterator[NonEmptyVector[A]] =
new AbstractIterator[NonEmptyVector[A]] {
var cur: NonEmptyVector[A] = NonEmptyVector.this
override def hasNext = cur ne null
override def next() = {
val r = cur
cur = r.initNonEmpty.orNull
r
}
}
def mkString(start: String, sep: String, end: String): String =
whole.mkString(start, sep, end)
def mkString(sep: String): String = mkString("", sep, "")
def mkString: String = mkString("")
def to[B](factory: Factory[A, B]): B =
factory.fromSpecific(whole)
}
// =====================================================================================================================
object NonEmptyVector extends NonEmptyVectorImplicits0 {
def one[A](h: A): NonEmptyVector[A] =
new NonEmptyVector(h, Vector.empty)
/** Avoids failed type-inference with NonEmptyVector(Vector.empty[Int], Vector.empty[Int]) */
def varargs[A](h: A, t: A*): NonEmptyVector[A] =
apply(h, t.toVector)
def apply[A](h: A, t: A*): NonEmptyVector[A] =
apply(h, t.toVector)
def apply[A](h: A, t: Vector[A]): NonEmptyVector[A] =
new NonEmptyVector(h, t)
def endOV[A](init: Option[Vector[A]], last: A): NonEmptyVector[A] =
init.fold(one(last))(end(_, last))
def endO[A](init: Option[NonEmptyVector[A]], last: A): NonEmptyVector[A] =
init.fold(one(last))(_ :+ last)
def end[A](init: Vector[A], last: A): NonEmptyVector[A] =
if (init.isEmpty)
one(last)
else
new NonEmptyVector(init.head, init.tail :+ last)
def maybe[A, B](v: Vector[A], empty: => B)(f: NonEmptyVector[A] => B): B =
if (v.isEmpty) empty else f(NonEmptyVector(v.head, v.tail))
def option[A](v: Vector[A]): Option[NonEmptyVector[A]] =
maybe[A, Option[NonEmptyVector[A]]](v, None)(Some.apply)
def force[A](v: Vector[A]): NonEmptyVector[A] =
apply(v.head, v.tail)
def unwrapOption[A](o: Option[NonEmptyVector[A]]): Vector[A] =
o.fold(Vector.empty[A])(_.whole)
def newBuilder[A](head: A): Builder[A] =
new Builder(head)
def newBuilderNE[A](as: NonEmptyVector[A]): Builder[A] = {
val b = newBuilder(as.head)
b ++= as.tail
b
}
final class Builder[A](head: A) {
private[this] val tail = Vector.newBuilder[A]
def +=(a: A): Unit = {
tail += a
()
}
def ++=(as: IterableOnce[A]): Unit = {
tail ++= as
()
}
def ++=(as: NonEmptyVector[A]): Unit = {
this += as.head
this ++= as.tail
}
def result(): NonEmptyVector[A] =
NonEmptyVector(head, tail.result())
}
@nowarn("cat=unused")
implicit def univEq[A: UnivEq]: UnivEq[NonEmptyVector[A]] =
UnivEq.force
implicit def semigroup[A]: Semigroup[NonEmptyVector[A]] =
new Semigroup[NonEmptyVector[A]] {
override def combine(a: NonEmptyVector[A], b: NonEmptyVector[A]) = a ++ b
}
implicit def nonEmptyTraverse: NonEmptyTraverse[NonEmptyVector] = new NonEmptyTraverse[NonEmptyVector] {
override def foldLeft[A, B](fa: NonEmptyVector[A], z: B)(f: (B, A) => B): B =
fa.foldLeft(z)(f)
override def reduceLeftTo[A, B](fa: NonEmptyVector[A])(f: A => B)(g: (B, A) => B): B =
fa.tail.foldLeft(f(fa.head))(g)
override def foldRight[A, B](fa: NonEmptyVector[A], lb: Eval[B])(f: (A, Eval[B]) => Eval[B]): Eval[B] =
fa.whole.reverseIterator.foldLeft(lb)((b, a) => f(a, b))
override def reduceRightTo[A, B](fa: NonEmptyVector[A])(f: A => B)(g: (A, Eval[B]) => Eval[B]): Eval[B] =
fa.init.reverseIterator.foldLeft(Eval.later(f(fa.last)))((b, a) => g(a, b))
override def size[A](fa: NonEmptyVector[A]) =
fa.length
override def map[A, B](fa: NonEmptyVector[A])(f: A => B): NonEmptyVector[B] =
fa map f
override def nonEmptyTraverse[G[_], A, B](fa: NonEmptyVector[A])(f: A => G[B])(implicit ap: Apply[G]): G[NonEmptyVector[B]] = {
val gh = f(fa.head)
if (fa.tail.isEmpty)
ap.map(gh)(one)
else {
val gz = ap.map(gh)(_ => Vector.empty[B])
val gt = fa.tail.foldLeft(gz)((q, a) => ap.map2(q, f(a))(_ :+ _))
ap.map2(gh, gt)(new NonEmptyVector(_, _))
}
}
}
object Sole {
def unapply[A](v: NonEmptyVector[A]) = new Unapply(v)
final class Unapply[A](val v: NonEmptyVector[A]) extends AnyVal {
def isEmpty = v.tail.nonEmpty
def get = v.head
}
}
}
trait NonEmptyVectorImplicits1 {
implicit def order[A: Order]: Order[NonEmptyVector[A]] =
Order.by(_.whole)
}
trait NonEmptyVectorImplicits0 extends NonEmptyVectorImplicits1 {
implicit def equality[A: Eq]: Eq[NonEmptyVector[A]] =
Eq.by(_.whole)
}
| japgolly/microlibs-scala | nonempty/shared/src/main/scala/japgolly/microlibs/nonempty/NonEmptyVector.scala | Scala | apache-2.0 | 10,057 |
package com.betfair.service
/**
* Created by geraint on 02/09/15.
*/
final case class BetfairServiceNGException(message: String) extends Throwable
| city81/betfair-service-ng | src/main/scala/com/betfair/service/BetfairServiceNGException.scala | Scala | bsd-2-clause | 150 |
package com.github.mrpowers.spark.daria.sql
import org.apache.hadoop.fs._
import org.apache.spark.SparkContext
import org.apache.spark.sql.{SaveMode, DataFrame}
import scala.util.Try
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.IOUtils
import java.io.IOException
import com.github.mrpowers.spark.daria.hadoop.FsHelpers
object DariaWriters {
// tmpFolder should look like s3a://bucket/data/src
// filename should look like s3a://bucket/data/dest/my_cool_file.csv
def writeSingleFile(
df: DataFrame, // must be small
format: String = "csv", // csv, parquet
sc: SparkContext, // pass in spark.sparkContext
tmpFolder: String, // will be deleted, so make sure it doesn't already exist
filename: String, // the full filename you want outputted
saveMode: String = "error" // Spark default is error, overwrite and append are also common
): Unit = {
df.repartition(1)
.write
.mode(saveMode)
.format(format)
.save(tmpFolder)
val conf = sc.hadoopConfiguration
val src = new Path(tmpFolder)
val fs = src.getFileSystem(conf)
val oneFile = fs.listStatus(src).map(x => x.getPath.toString()).find(x => x.endsWith(format))
val srcFile = new Path(oneFile.getOrElse(""))
val dest = new Path(filename)
fs.rename(srcFile, dest)
}
def writeThenMerge(
df: DataFrame,
format: String = "csv", // csv, parquet
sc: SparkContext, // pass in spark.sparkContext
tmpFolder: String, // will be deleted, so make sure it doesn't already exist
filename: String, // the full filename you want outputted
saveModeForTmpFolder: String = "error" // Spark default is error, overwrite and append are also common
): Unit = {
df.write
.mode(saveModeForTmpFolder)
.format(format)
.save(tmpFolder)
FsHelpers.dariaCopyMerge(tmpFolder, filename, sc)
}
}
| MrPowers/spark-daria | src/main/scala/com/github/mrpowers/spark/daria/sql/DariaWriters.scala | Scala | mit | 2,041 |
import scala.language.higherKinds
trait Zhuri {
type 太阳的路 <: HList
def 太阳的路: 太阳的路
type 夸父的路 <: HList
def 夸父的路: 夸父的路
type 追逐 <: Zhuri
def 追逐: 追逐
}
class ZhuriImpl[TT <: HList, KK <: HList](override val 太阳的路: TT, override val 夸父的路: KK) extends Zhuri {
override type 太阳的路 = TT
override type 夸父的路 = KK
override type 追逐 = ZhuriImpl[TT#Tail, KK#Add[TT#Head]]
override def 追逐: ZhuriImpl[TT#Tail, KK#Add[TT#Head]] = new ZhuriImpl(太阳的路.tail, 夸父的路.add(太阳的路.head))
}
class Init extends ZhuriImpl[路, 路](路, 路)
object Init extends Init
| djx314/ubw | raw11-夸父逐日/src/main/scala/Zhuri.scala | Scala | bsd-3-clause | 681 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.data.storage.elasticsearch
import grizzled.slf4j.Logging
import org.apache.predictionio.data.storage.Channel
import org.apache.predictionio.data.storage.Channels
import org.apache.predictionio.data.storage.StorageClientConfig
import org.elasticsearch.ElasticsearchException
import org.elasticsearch.client.Client
import org.elasticsearch.index.query.FilterBuilders.termFilter
import org.json4s.DefaultFormats
import org.json4s.JsonDSL._
import org.json4s.native.JsonMethods._
import org.json4s.native.Serialization.read
import org.json4s.native.Serialization.write
class ESChannels(client: Client, config: StorageClientConfig, index: String)
extends Channels with Logging {
implicit val formats = DefaultFormats.lossless
private val estype = "channels"
private val seq = new ESSequences(client, config, index)
private val seqName = "channels"
val indices = client.admin.indices
val indexExistResponse = indices.prepareExists(index).get
if (!indexExistResponse.isExists) {
indices.prepareCreate(index).get
}
val typeExistResponse = indices.prepareTypesExists(index).setTypes(estype).get
if (!typeExistResponse.isExists) {
val json =
(estype ->
("properties" ->
("name" -> ("type" -> "string") ~ ("index" -> "not_analyzed"))))
indices.preparePutMapping(index).setType(estype).
setSource(compact(render(json))).get
}
def insert(channel: Channel): Option[Int] = {
val id =
if (channel.id == 0) {
var roll = seq.genNext(seqName)
while (!get(roll).isEmpty) roll = seq.genNext(seqName)
roll
} else channel.id
val realChannel = channel.copy(id = id)
if (update(realChannel)) Some(id) else None
}
def get(id: Int): Option[Channel] = {
try {
val response = client.prepareGet(
index,
estype,
id.toString).get()
Some(read[Channel](response.getSourceAsString))
} catch {
case e: ElasticsearchException =>
error(e.getMessage)
None
case e: NullPointerException => None
}
}
def getByAppid(appid: Int): Seq[Channel] = {
try {
val builder = client.prepareSearch(index).setTypes(estype).
setPostFilter(termFilter("appid", appid))
ESUtils.getAll[Channel](client, builder)
} catch {
case e: ElasticsearchException =>
error(e.getMessage)
Seq[Channel]()
}
}
def update(channel: Channel): Boolean = {
try {
val response = client.prepareIndex(index, estype, channel.id.toString).
setSource(write(channel)).get()
true
} catch {
case e: ElasticsearchException =>
error(e.getMessage)
false
}
}
def delete(id: Int): Unit = {
try {
client.prepareDelete(index, estype, id.toString).get
} catch {
case e: ElasticsearchException =>
error(e.getMessage)
}
}
}
| himanshudhami/PredictionIO | storage/elasticsearch1/src/main/scala/org/apache/predictionio/data/storage/elasticsearch/ESChannels.scala | Scala | apache-2.0 | 3,723 |
package chess.builders
import chess.Color
import chess.TipoDeTrebejo._
import chess.Trebejo
class TrebejoBuilder(val tipo: TipoDeTrebejo) {
def deColor(c: Color) = new Trebejo(tipo, c, 0)
} | upsidedownmind/ScalaChess | src/main/scala/chess/builders/TrebejoBuilder.scala | Scala | mit | 193 |
package com.twitter.finagle.service
import RetryPolicy._
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.{
ChannelClosedException,
Failure,
FailureFlags,
TimeoutException,
WriteException
}
import com.twitter.util._
import org.scalatest.FunSpec
class RetryPolicyTest extends FunSpec {
def getBackoffs(
policy: RetryPolicy[Try[Nothing]],
exceptions: Stream[Exception]
): Stream[Duration] =
exceptions match {
case Stream.Empty => Stream.empty
case e #:: tail =>
policy(Throw(e)) match {
case None => Stream.empty
case Some((backoff, p2)) => backoff #:: getBackoffs(p2, tail)
}
}
describe("RetryPolicy") {
val NoExceptions: PartialFunction[Try[Nothing], Boolean] = {
case _ => false
}
val timeoutExc = new TimeoutException {
protected val timeout = 0.seconds
protected val explanation = "!"
}
it("should WriteExceptionsOnly") {
val weo = WriteExceptionsOnly orElse NoExceptions
assert(!weo(Throw(new Exception)))
assert(weo(Throw(WriteException(new Exception))))
assert(!weo(Throw(Failure(new Exception, FailureFlags.Interrupted))))
// it's important that this failure isn't retried, despite being "retryable".
// interrupted futures should never be retried.
assert(!weo(Throw(Failure(new Exception, FailureFlags.Interrupted | FailureFlags.Retryable))))
assert(weo(Throw(Failure(new Exception, FailureFlags.Retryable))))
assert(!weo(Throw(Failure(new Exception, FailureFlags.Rejected | FailureFlags.NonRetryable))))
assert(!weo(Throw(timeoutExc)))
}
it("should TimeoutAndWriteExceptionsOnly") {
val taweo = TimeoutAndWriteExceptionsOnly orElse NoExceptions
assert(!taweo(Throw(new Exception)))
assert(taweo(Throw(WriteException(new Exception))))
assert(!taweo(Throw(Failure(new Exception, FailureFlags.Interrupted))))
assert(taweo(Throw(Failure(timeoutExc, FailureFlags.Interrupted))))
assert(taweo(Throw(timeoutExc)))
assert(taweo(Throw(new com.twitter.util.TimeoutException(""))))
}
it("RetryableWriteException matches retryable exception") {
val retryable = Seq(Failure.rejected("test"), WriteException(new Exception))
val nonRetryable =
Seq(
Failure("test", FailureFlags.Interrupted),
new Exception,
new ChannelClosedException,
Failure("boo", FailureFlags.NonRetryable)
)
retryable.foreach {
case RetryPolicy.RetryableWriteException(_) =>
case _ => fail("should match RetryableWriteException")
}
nonRetryable.foreach {
case RetryPolicy.RetryableWriteException(_) =>
fail("should not match RetryableWriteException")
case _ =>
}
}
}
case class IException(i: Int) extends Exception
val iExceptionsOnly: PartialFunction[Try[Nothing], Boolean] = {
case Throw(IException(_)) => true
}
val iGreaterThan1: Try[Nothing] => Boolean = {
case Throw(IException(i)) if i > 1 => true
case _ => false
}
describe("RetryPolicy.filter/filterEach") {
val backoffs = Stream(10.milliseconds, 20.milliseconds, 30.milliseconds)
val policy = RetryPolicy.backoff(backoffs)(iExceptionsOnly).filter(iGreaterThan1)
it("returns None if filter rejects") {
val actual = getBackoffs(policy, Stream(IException(0), IException(1)))
assert(actual == Stream.empty)
}
it("returns underlying result if filter accepts first") {
val actual = getBackoffs(policy, Stream(IException(2), IException(0)))
assert(actual == backoffs.take(2))
}
}
describe("RetryPolicy.filterEach") {
val backoffs = Stream(10.milliseconds, 20.milliseconds, 30.milliseconds)
val policy = RetryPolicy.backoff(backoffs)(iExceptionsOnly).filterEach(iGreaterThan1)
it("returns None if filterEach rejects") {
val actual = getBackoffs(policy, Stream(IException(0), IException(1)))
assert(actual == Stream.empty)
}
it("returns underlying result if filterEach accepts") {
val actual = getBackoffs(policy, Stream(IException(2), IException(2), IException(0)))
assert(actual == backoffs.take(2))
}
}
describe("RetryPolicy.limit") {
var currentMaxRetries: Int = 0
val maxBackoffs = Stream.fill(3)(10.milliseconds)
val policy =
RetryPolicy
.backoff(maxBackoffs)(RetryPolicy.ChannelClosedExceptionsOnly)
.limit(currentMaxRetries)
it("limits retries dynamically") {
for (i <- 0 until 5) {
currentMaxRetries = i
val backoffs = getBackoffs(policy, Stream.fill(3)(new ChannelClosedException()))
assert(backoffs == maxBackoffs.take(i min 3))
}
}
}
describe("RetryPolicy.combine") {
val channelClosedBackoff = 10.milliseconds
val writeExceptionBackoff = 0.milliseconds
val combinedPolicy =
RetryPolicy.combine(
RetryPolicy.backoff(Backoff.const(Duration.Zero).take(2))(RetryPolicy.WriteExceptionsOnly),
RetryPolicy
.backoff(Stream.fill(3)(channelClosedBackoff))(RetryPolicy.ChannelClosedExceptionsOnly)
)
it("return None for unmatched exception") {
val backoffs = getBackoffs(combinedPolicy, Stream(new UnsupportedOperationException))
assert(backoffs == Stream.empty)
}
it("mimicks first policy") {
val backoffs = getBackoffs(combinedPolicy, Stream.fill(4)(WriteException(new Exception)))
assert(backoffs == Stream.fill(2)(writeExceptionBackoff))
}
it("mimicks second policy") {
val backoffs = getBackoffs(combinedPolicy, Stream.fill(4)(new ChannelClosedException()))
assert(backoffs == Stream.fill(3)(channelClosedBackoff))
}
it("interleaves backoffs") {
val exceptions = Stream(
new ChannelClosedException(),
WriteException(new Exception),
WriteException(new Exception),
new ChannelClosedException(),
WriteException(new Exception)
)
val backoffs = getBackoffs(combinedPolicy, exceptions)
val expectedBackoffs = Stream(
channelClosedBackoff,
writeExceptionBackoff,
writeExceptionBackoff,
channelClosedBackoff
)
assert(backoffs == expectedBackoffs)
}
}
describe("RetryPolicy.Never") {
val never = RetryPolicy.Never.asInstanceOf[RetryPolicy[Try[Int]]]
it("should not retry") {
assert(None == never(Return(1)))
assert(None == never(Throw(new RuntimeException)))
}
}
describe("RetryPolicy.none") {
val nah = RetryPolicy.none
it("should not retry") {
assert(None == nah((1, Return(1))))
assert(None == nah((1, Throw(new RuntimeException))))
}
}
}
| luciferous/finagle | finagle-core/src/test/scala/com/twitter/finagle/service/RetryPolicyTest.scala | Scala | apache-2.0 | 6,780 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package common
import enums.ApplicationType
import models.ProtectionModel
object Strings {
implicit class StringImprovements(val s: String) {
import scala.util.control.Exception._
def toIntOpt = catching(classOf[NumberFormatException]) opt s.toInt
}
def nameString(name: String)(implicit protectionType: ApplicationType.Value): String = {
protectionType match {
case ApplicationType.FP2016 => "fp16"+name.capitalize
case ApplicationType.IP2014 => "ip14"+name.capitalize
case _ => name
}
}
def keyStoreProtectionName(protection: ProtectionModel): String = {
statusString(protection.status) + protectionTypeString(protection.protectionType) + "Amendment"
}
def keyStoreNonAmendableProtectionName(protection: ProtectionModel): String = {
statusString(protection.status).toLowerCase + protectionTypeString(protection.protectionType).toUpperCase
}
def keyStoreAmendFetchString(protectionType: String, status: String): String = {
status.toLowerCase + protectionType.toUpperCase + "Amendment"
}
def protectionTypeString(modelProtectionType: Option[String]) = {
modelProtectionType match {
case Some("FP2016") => "FP2016"
case Some("IP2014") => "IP2014"
case Some("IP2016") => "IP2016"
case Some("Primary") => "primary"
case Some("Enhanced") => "enhanced"
case Some("Fixed") => "fixed"
case Some("FP2014") => "FP2014"
case _ => "notRecorded"
}
}
def statusString(modelStatus: Option[String]): String = {
modelStatus match {
case Some("Open") => "open"
case Some("Dormant") => "dormant"
case Some("Withdrawn") => "withdrawn"
case Some("Expired") => "expired"
case Some("Unsuccessful") => "unsuccessful"
case Some("Rejected") => "rejected"
case _ => "notRecorded"
}
}
}
| hmrc/pensions-lifetime-allowance-frontend | app/common/Strings.scala | Scala | apache-2.0 | 2,452 |
/*
* Copyright (c) 2018. Yuriy Stul
*/
package com.stulsoft.poc.pomtest4.scala
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FunSuite, Matchers}
/**
* @author Yuriy Stul
*/
@RunWith(classOf[JUnitRunner])
class SomeScalaClassTest extends FunSuite with Matchers {
test("testFoo") {
new SomeScalaClass foo() should not be null
}
}
| ysden123/poc | pom-experiments/pomtest4/src/test/scala/com/stulsoft/poc/pomtest4/scala/SomeScalaClassTest.scala | Scala | mit | 394 |
package scalacl
package impl
import scalacl._
import scala.collection.generic.CanBuildFrom
import com.nativelibs4java.opencl._
import com.nativelibs4java.opencl.util._
import org.bridj.Pointer
import org.bridj.PointerIO
import scala.math._
object CLDataIO {
abstract sealed class ArgType
object Value extends ArgType
object InputPointer extends ArgType
object OutputPointer extends ArgType
}
trait CLDataIO[T] {
implicit val t: ClassManifest[T]
val elementCount: Int
val pointerIO: PointerIO[T]
def elementSize: Int
def elements: Seq[CLDataIO[Any]]
def clType: String
lazy val clearSource = new CLSimpleCode("""
__kernel void clearKernel(int size, __global """ + clType + """* buffer) {
int i = get_global_id(0);
if (i >= size)
return;
buffer[i] = (""" + clType + """)0;
}
""")
def clear(buffer: CLBuffer[T], evts: CLEvent*)(implicit context: Context): CLEvent = {
val kernel = clearSource.getKernel(context)
kernel.synchronized {
val size = buffer.getElementCount.toInt
kernel.setArgs(size, buffer)
kernel.enqueueNDRange(context.queue, Array(size), evts:_*)
}
}
def reductionType: (OpenCLType, Int) = error("Not a reductible type : " + this)
def createBuffers(length: Int)(implicit context: Context): Array[CLGuardedBuffer[Any]]
def openCLKernelArgDeclarations(nameBasis: String, argType: CLDataIO.ArgType, offset: Int): Seq[String]
def openCLKernelNthItemExprs(nameBasis: String, argType: CLDataIO.ArgType, offset: Int, n: String): Seq[(String, List[Int])]
def openCLIntermediateKernelTupleElementsExprs(expr: String): Seq[(String, List[Int])]
def openCLIthTupleElementNthItemExpr(nameBasis: String, argType: CLDataIO.ArgType, offset: Int, indexes: List[Int], n: String): String
def extract(arrays: Array[Array[Any]], index: Int): T = {
assert(elementCount == arrays.length)
extract(arrays, 0, index)
}
def extract(arrays: Array[CLGuardedBuffer[Any]], index: Int): CLFuture[T] = {
assert(elementCount == arrays.length)
extract(arrays, 0, index)
//CLInstantFuture(extract(arrays.map(_.toArray.asInstanceOf[Array[Any]]), index))
}
def store(v: T, arrays: Array[CLGuardedBuffer[Any]], index: Int): Unit = {
assert(elementCount == arrays.length)
store(v, arrays, 0, index)
}
def extract(pointers: Array[Pointer[Any]], index: Int): T = {
//extract(pointers.map(_.toArray.asInstanceOf[Array[Any]]), index)
assert(elementCount == pointers.length)
extract(pointers, 0, index)
}
def store(v: T, pointers: Array[Pointer[Any]], index: Int): Unit = {
assert(elementCount == pointers.length)
store(v, pointers, 0, index)
}
def extract(arrays: Array[CLGuardedBuffer[Any]], offset: Int, index: Int): CLFuture[T]
def store(v: T, arrays: Array[CLGuardedBuffer[Any]], offset: Int, index: Int): Unit
def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): T
def extract(arrays: Array[Array[Any]], offset: Int, index: Int): T
def store(v: T, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit
def exprs(arrayExpr: String): Seq[String]
def toArray(arrays: Array[CLGuardedBuffer[Any]]): Array[T] = {
val size = arrays.head.buffer.getElementCount.toInt
if (arrays.length == 1) {
arrays.head.withReadablePointer(p => p.toArray.asInstanceOf[Array[T]])
} else {
val out = new Array[T](size)
copyToArray(arrays, out, 0, size)
out
}
}
def copyToArray[B >: T](arrays: Array[CLGuardedBuffer[Any]], out: Array[B], start: Int = 0, length: Int = -1) = {
assert(elementCount == arrays.length)
val pointers = arrays.map(_.toPointer)
val size = pointers(0).getValidElements.toInt
var i = start
val sup = if (length < 0) size else min(size, start + length)
while (i < sup) {
out(i) = extract(pointers, i)
i += 1
}
}
}
object CLTupleDataIO {
val builtInArities = Set(1, 2, 4, 8)
}
class CLTupleDataIO[T](ios: Array[CLDataIO[Any]], values: T => Array[Any], tuple: Array[Any] => T)(implicit override val t: ClassManifest[T]) extends CLDataIO[T] {
assert(!ios.isEmpty)
override lazy val pointerIO: PointerIO[T] =
error("Cannot create PointerIO for tuples !")
override def openCLKernelArgDeclarations(nameBasis: String, argType: CLDataIO.ArgType, offset: Int): Seq[String] =
iosAndOffsets.flatMap { case (io, ioOffset) => io.openCLKernelArgDeclarations(nameBasis, argType, offset + ioOffset) }
override def openCLKernelNthItemExprs(nameBasis: String, argType: CLDataIO.ArgType, offset: Int, n: String): Seq[(String, List[Int])] =
iosAndOffsets.zipWithIndex.flatMap {
case ((io, ioOffset), i) =>
io.openCLKernelNthItemExprs(nameBasis, argType, offset + ioOffset, n).map {
case (s, indexes) => (s, i :: indexes)
}
}
//override def openCLIntermediateKernelNthItemExprs(argType: CLDataIO.ArgType, offset: Int, n: String): Seq[String] =
// iosAndOffsets.flatMap { case (io, ioOffset) => io.openCLIntermediateKernelNthItemExprs(argType, offset + ioOffset, n) }
/*override def openCLTupleShuffleNthFieldExprs(argType: CLDataIO.ArgType, offset: Int, n: String, shuffleExpr: String): Seq[String] =
if (!isOpenCLTuple)
error("Type " + this + " is not an OpenCL tuple !")
else
shuffleExpr.view.map(_ match {
case 'x' => 0
case 'y' => 1
case 'z' => 2
case 'w' => 3
}).flatMap(i => {
val (io, ioOffset) = iosAndOffsets(i)
io.openCLKernelNthItemExprs(argType, offset + ioOffset, n).map(_._1)
})
*/
override def openCLIthTupleElementNthItemExpr(nameBasis: String, argType: CLDataIO.ArgType, offset: Int, indexes: List[Int], n: String): String = {
val (io, ioOffset) = iosAndOffsets(indexes.head)
io.openCLIthTupleElementNthItemExpr(nameBasis, argType, offset + ioOffset, indexes.tail, n)
}
override def openCLIntermediateKernelTupleElementsExprs(expr: String): Seq[(String, List[Int])] = {
ios.zipWithIndex.flatMap {
case (io, i) =>
io.openCLIntermediateKernelTupleElementsExprs(expr + "._" + (i + 1)).map {
case (x, indexes) =>
(x, i :: indexes)
}
}
}
override def elementSize = ios.map(_.elementSize).sum
override def elements: Seq[CLDataIO[Any]] =
ios.flatMap(_.elements)
val types = ios.map(_.clType)
val uniqTypes = types.toSet
val isOpenCLTuple = {
uniqTypes.size == 1 &&
CLTupleDataIO.builtInArities.contains(ios.size) &&
ios.head.isInstanceOf[CLValDataIO[_]]
}
override def clType = {
if (isOpenCLTuple)
uniqTypes.head + ios.size
else
"struct { " + types.reduceLeft(_ + "; " + _) + "; }"
}
override def reductionType = if (isOpenCLTuple)
(ios.head.reductionType._1, ios.size)
else
super.reductionType
override def createBuffers(length: Int)(implicit context: Context): Array[CLGuardedBuffer[Any]] =
ios.flatMap(_.createBuffers(length))
val (iosAndOffsets, elementCount) = {
var off = 0
(
ios.map(io => {
val o = off
off += io.elementCount
(io, o)
}),
off
)
}
override def extract(arrays: Array[CLGuardedBuffer[Any]], offset: Int, index: Int): CLFuture[T] =
new CLTupleFuture(
iosAndOffsets.map(p => {
val (io, ioOffset) = p
io.extract(arrays, offset + ioOffset, index)
}),
tuple
)
override def store(v: T, arrays: Array[CLGuardedBuffer[Any]], offset: Int, index: Int): Unit =
iosAndOffsets.zip(values(v)).foreach { case ((io, ioOffset), vi) => io.store(vi, arrays, offset + ioOffset, index) }
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): T = {
var i = 0
val length = iosAndOffsets.length
val data = new Array[Any](length)
while (i < length) {
val (io, ioOffset) = iosAndOffsets(i)
data(i) = io.extract(pointers, offset + ioOffset, index)
i += 1
}
tuple(data)
/*
tuple(iosAndOffsets.map(p => {
val (io, ioOffset) = p
io.extract(pointers, offset + ioOffset, index)
}))
*/
}
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): T = {
var i = 0
val length = iosAndOffsets.length
val data = new Array[Any](length)
while (i < length) {
val (io, ioOffset) = iosAndOffsets(i)
data(i) = io.extract(arrays, offset + ioOffset, index)
i += 1
}
tuple(data)
}
override def store(v: T, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit = {
var i = 0
val length = iosAndOffsets.length
val vals = values(v)
while (i < length) {
val (io, ioOffset) = iosAndOffsets(i)
val vi = vals(i)
io.store(vi, pointers, offset + ioOffset, index)
i += 1
}
//iosAndOffsets.zip(values(v)).foreach { case ((io, ioOffset), vi) => io.store(vi, pointers, offset + ioOffset, index) }
}
override def exprs(arrayExpr: String): Seq[String] =
ios.zipWithIndex.flatMap { case (io, i) => io.exprs(arrayExpr + "._" + (i + 1)) }
override def toString =
"(" + ios.mkString(", ") + ")"
}
abstract class CLValDataIO[T <: AnyVal](implicit override val t: ClassManifest[T]) extends CLDataIO[T] {
override val elementCount = 1
override val pointerIO: PointerIO[T] =
PointerIO.getInstance(t.erasure)
override def elementSize = pointerIO.getTargetSize.toInt
override def elements: Seq[CLDataIO[Any]] =
Seq(this.asInstanceOf[CLDataIO[Any]])
/*override def openCLTupleShuffleNthFieldExprs(argType: CLDataIO.ArgType, offset: Int, n: String, shuffleExpr: String): Seq[String] =
error("Calling tuple shuffle field '" + shuffleExpr + "' on scalar type " + this)*/
override def openCLKernelArgDeclarations(nameBasis: String, argType: CLDataIO.ArgType, offset: Int): Seq[String] = {
Seq(
(
argType match {
case CLDataIO.InputPointer =>
"__global const " + clType + "* " + nameBasis
case CLDataIO.OutputPointer =>
"__global " + clType + "* " + nameBasis
case CLDataIO.Value =>
"const " + clType + " " + nameBasis
}
) + offset
)
}
override def openCLKernelNthItemExprs(nameBasis: String, argType: CLDataIO.ArgType, offset: Int, n: String) =
Seq(
(
argType match {
case CLDataIO.Value =>
nameBasis + offset
case CLDataIO.InputPointer =>
nameBasis + offset + "[" + n + "]"
case CLDataIO.OutputPointer =>
nameBasis + offset + "[" + n + "]"
},
List(0)
)
)
override def openCLIntermediateKernelTupleElementsExprs(expr: String): Seq[(String, List[Int])] =
Seq((expr, List(0)))
override def openCLIthTupleElementNthItemExpr(nameBasis: String, argType: CLDataIO.ArgType, offset: Int, indexes: List[Int], n: String): String = {
if (indexes != List(0))
error("There is only one item in this array of " + this + " (trying to access item " + indexes + ")")
openCLKernelNthItemExprs(nameBasis, argType, offset, n)(0)._1
}
override def clType = t.erasure.getSimpleName.toLowerCase match {
case "sizet" => "size_t"
case "boolean" => CLFilteredArray.presenceCLType
case "character" => "short"
case n => n
}
override def toString = t.erasure.getSimpleName + " /* " + clType + "*/"
override def createBuffers(length: Int)(implicit context: Context): Array[CLGuardedBuffer[Any]] =
Array(new CLGuardedBuffer[T](length)(context, this).asInstanceOf[CLGuardedBuffer[Any]])
override def extract(arrays: Array[CLGuardedBuffer[Any]], offset: Int, index: Int): CLFuture[T] =
arrays(offset).asInstanceOf[CLGuardedBuffer[T]].apply(index)
//override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): T =
// pointers(offset).asInstanceOf[Pointer[T]].get(index)
//override def store(v: T, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
// pointers(offset).asInstanceOf[Pointer[T]].set(index, v)
override def store(v: T, arrays: Array[CLGuardedBuffer[Any]], offset: Int, index: Int): Unit =
arrays(offset).asInstanceOf[CLGuardedBuffer[T]].update(index, v)
override def exprs(arrayExpr: String): Seq[String] =
Seq(arrayExpr)
override def toArray(arrays: Array[CLGuardedBuffer[Any]]): Array[T] = {
assert(elementCount == arrays.length)
arrays(0).asInstanceOf[CLGuardedBuffer[T]].toArray
}
}
object CLIntDataIO extends CLValDataIO[Int] {
override def reductionType = (OpenCLType.Int, 1)
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): Int =
pointers(offset).getIntAtOffset(index * 4)
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): Int =
arrays(offset).asInstanceOf[Array[Int]](index)
override def store(v: Int, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
pointers(offset).setIntAtOffset(index * 4, v)
}
object CLShortDataIO extends CLValDataIO[Short] {
override def reductionType = (OpenCLType.Short, 1)
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): Short =
pointers(offset).getShortAtOffset(index * 2)
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): Short =
arrays(offset).asInstanceOf[Array[Short]](index)
override def store(v: Short, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
pointers(offset).setShortAtOffset(index * 2, v)
}
object CLByteDataIO extends CLValDataIO[Byte] {
override def reductionType = (OpenCLType.Byte, 1)
override def clType = "char"
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): Byte =
pointers(offset).getByteAtOffset(index * 1)
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): Byte =
arrays(offset).asInstanceOf[Array[Byte]](index)
override def store(v: Byte, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
pointers(offset).setByteAtOffset(index * 1, v)
}
object CLBooleanDataIO extends CLValDataIO[Boolean] {
override def reductionType = (OpenCLType.Byte, 1)
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): Boolean =
pointers(offset).getByteAtOffset(index * 1) != 0
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): Boolean =
arrays(offset).asInstanceOf[Array[Boolean]](index)
override def store(v: Boolean, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
pointers(offset).setByteAtOffset(index * 1, if (v) 1 else 0)
}
object CLCharDataIO extends CLValDataIO[Char] {
override def reductionType = (OpenCLType.Char, 1)
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): Char =
pointers(offset).getCharAtOffset(index * 2)
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): Char =
arrays(offset).asInstanceOf[Array[Char]](index)
override def store(v: Char, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
pointers(offset).setCharAtOffset(index * 2, v)
}
object CLLongDataIO extends CLValDataIO[Long] {
override def reductionType = (OpenCLType.Long, 1)
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): Long =
pointers(offset).getLongAtOffset(index * 8)
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): Long =
arrays(offset).asInstanceOf[Array[Long]](index)
override def store(v: Long, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
pointers(offset).setLongAtOffset(index * 8, v)
}
object CLFloatDataIO extends CLValDataIO[Float] {
override def reductionType = (OpenCLType.Float, 1)
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): Float =
pointers(offset).getFloatAtOffset(index * 4)
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): Float =
arrays(offset).asInstanceOf[Array[Float]](index)
override def store(v: Float, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
pointers(offset).setFloatAtOffset(index * 4, v)
}
object CLDoubleDataIO extends CLValDataIO[Double] {
override def reductionType = (OpenCLType.Double, 1)
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): Double =
pointers(offset).getDoubleAtOffset(index * 8)
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): Double =
arrays(offset).asInstanceOf[Array[Double]](index)
override def store(v: Double, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
pointers(offset).setDoubleAtOffset(index * 8, v)
}
/*
class CLRangeDataIO(implicit val t: ClassManifest[Int]) extends CLDataIO[Int] {
override val elementCount = 1
override val pointerIO: PointerIO[Int] =
PointerIO.getInstance(t.erasure)
override def elements: Seq[CLDataIO[Any]] =
Seq(this.asInstanceOf[CLDataIO[Any]])
override def openCLKernelArgDeclarations(argType: CLDataIO.ArgType, offset: Int): Seq[String] = {
assert(argType != )
Seq("int rangeLow" + offset)
}
override def openCLKernelNthItemExprs(argType: CLDataIO.ArgType, offset: Int, n: String) =
Seq(("(rangeLow" + offset + " + " + n + ")", List(0)))
override def openCLIntermediateKernelTupleElementsExprs(expr: String): Seq[(String, List[Int])] =
Seq((expr, List(0))) // TODO ?
override def openCLIthTupleElementNthItemExpr(argType: CLDataIO.ArgType, offset: Int, indexes: List[Int], n: String): String = {
if (indexes != List(0))
error("There is only one item in this array of " + this + " (trying to access item " + indexes + ")")
openCLKernelNthItemExprs(argType, offset, n)(0)._1
}
override def clType = "int"
override def toString = "int range"
override def createBuffers(length: Int)(implicit context: Context): Array[CLGuardedBuffer[Any]] =
Array(new CLGuardedBuffer[Int](2).asInstanceOf[CLGuardedBuffer[Any]])
override def extract(arrays: Array[CLGuardedBuffer[Any]], offset: Int, index: Int): CLFuture[Int] = {
val arr = arrays(offset).asInstanceOf[CLGuardedBuffer[Int]]
error("not implemented")
}
override def extract(pointers: Array[Pointer[Any]], offset: Int, index: Int): Int =
pointers(offset).asInstanceOf[Pointer[Int]].get(0) + index.toInt
override def extract(arrays: Array[Array[Any]], offset: Int, index: Int): Int =
arrays(offset).asInstanceOf[Array[Int]](0) + index.toInt
override def store(v: Int, pointers: Array[Pointer[Any]], offset: Int, index: Int): Unit =
error("Int ranges are immutable !")
override def store(v: Int, arrays: Array[CLGuardedBuffer[Any]], offset: Int, index: Int): Unit =
error("Int ranges are immutable !")
override def exprs(arrayExpr: String): Seq[String] =
Seq(arrayExpr)
override def toArray(arrays: Array[CLGuardedBuffer[Any]]): Array[Int] = {
assert(elementCount == arrays.length)
val Array(low, length, by) = arrays(0).asInstanceOf[CLGuardedBuffer[Int]].toArray
(low.toInt until length.toInt).toArray
}
}
*/
| nativelibs4java/ScalaCL | Old/Collections/src/main/scala/scalacl/impl/CLDataIO.scala | Scala | bsd-3-clause | 19,270 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs.ws.ahc
import java.util
import akka.stream.Materializer
import akka.util.{ ByteString, Timeout }
import org.specs2.concurrent.ExecutionEnv
import org.specs2.matcher.FutureMatchers
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import play.api.Application
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.oauth.{ ConsumerKey, OAuthCalculator, RequestToken }
import play.api.libs.ws._
import play.api.mvc._
import play.api.test.{ DefaultAwaitTimeout, FutureAwaits, Helpers, WithServer }
import play.shaded.ahc.io.netty.handler.codec.http.DefaultHttpHeaders
import play.shaded.ahc.org.asynchttpclient.Realm.AuthScheme
import play.shaded.ahc.org.asynchttpclient.cookie.{ Cookie => AHCCookie }
import play.shaded.ahc.org.asynchttpclient.{ Param, Request => AHCRequest, Response => AHCResponse }
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.implicitConversions
class AhcWSSpec(implicit ee: ExecutionEnv) extends Specification with Mockito with FutureMatchers with FutureAwaits with DefaultAwaitTimeout {
sequential
"Ahc WSClient" should {
"support several query string values for a parameter" in {
val client = mock[StandaloneAhcWSClient]
val r: AhcWSRequest = makeAhcRequest("http://playframework.com/").withQueryStringParameters("foo" -> "foo1", "foo" -> "foo2").asInstanceOf[AhcWSRequest]
val req: AHCRequest = r.underlying.buildRequest()
import scala.collection.JavaConverters._
val paramsList: Seq[Param] = req.getQueryParams.asScala
paramsList.exists(p => (p.getName == "foo") && (p.getValue == "foo1")) must beTrue
paramsList.exists(p => (p.getName == "foo") && (p.getValue == "foo2")) must beTrue
paramsList.count(p => p.getName == "foo") must beEqualTo(2)
}
/*
"AhcWSRequest.setHeaders using a builder with direct map" in new WithApplication {
val request = new AhcWSRequest(mock[AhcWSClient], "GET", None, None, Map.empty, EmptyBody, new RequestBuilder("GET"))
val headerMap: Map[String, Seq[String]] = Map("key" -> Seq("value"))
val ahcRequest = request.setHeaders(headerMap).build
ahcRequest.getHeaders.containsKey("key") must beTrue
}
"AhcWSRequest.setQueryString" in new WithApplication {
val request = new AhcWSRequest(mock[AhcWSClient], "GET", None, None, Map.empty, EmptyBody, new RequestBuilder("GET"))
val queryString: Map[String, Seq[String]] = Map("key" -> Seq("value"))
val ahcRequest = request.setQueryString(queryString).build
ahcRequest.getQueryParams().containsKey("key") must beTrue
}
"support several query string values for a parameter" in new WithApplication {
val req = WS.url("http://playframework.com/")
.withQueryString("foo" -> "foo1", "foo" -> "foo2").asInstanceOf[AhcWSRequestHolder]
.prepare().build
req.getQueryParams.get("foo").contains("foo1") must beTrue
req.getQueryParams.get("foo").contains("foo2") must beTrue
req.getQueryParams.get("foo").size must equalTo(2)
}
*/
"support http headers" in {
val client = mock[StandaloneAhcWSClient]
import scala.collection.JavaConverters._
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.addHttpHeaders("key" -> "value1", "key" -> "value2").asInstanceOf[AhcWSRequest]
.underlying.buildRequest()
req.getHeaders.getAll("key").asScala must containTheSameElementsAs(Seq("value1", "value2"))
}
}
def makeAhcRequest(url: String): AhcWSRequest = {
implicit val materializer = mock[Materializer]
val client = mock[StandaloneAhcWSClient]
val standalone = new StandaloneAhcWSRequest(client, "http://playframework.com/")
AhcWSRequest(standalone)
}
"not make Content-Type header if there is Content-Type in headers already" in {
import scala.collection.JavaConverters._
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.addHttpHeaders("content-type" -> "fake/contenttype; charset=utf-8")
.withBody(<aaa>value1</aaa>)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
req.getHeaders.getAll("Content-Type").asScala must_== Seq("fake/contenttype; charset=utf-8")
}
"Have form params on POST of content type application/x-www-form-urlencoded" in {
val client = mock[StandaloneAhcWSClient]
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.withBody(Map("param1" -> Seq("value1")))
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
(new String(req.getByteData, "UTF-8")) must_== ("param1=value1")
}
"Have form body on POST of content type text/plain" in {
val client = mock[StandaloneAhcWSClient]
val formEncoding = java.net.URLEncoder.encode("param1=value1", "UTF-8")
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.addHttpHeaders("Content-Type" -> "text/plain")
.withBody("HELLO WORLD")
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
(new String(req.getByteData, "UTF-8")) must be_==("HELLO WORLD")
val headers = req.getHeaders
headers.get("Content-Length") must beNull
}
"Have form body on POST of content type application/x-www-form-urlencoded explicitly set" in {
val client = mock[StandaloneAhcWSClient]
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.addHttpHeaders("Content-Type" -> "application/x-www-form-urlencoded") // set content type by hand
.withBody("HELLO WORLD") // and body is set to string (see #5221)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
(new String(req.getByteData, "UTF-8")) must be_==("HELLO WORLD") // should result in byte data.
}
"support a custom signature calculator" in {
val client = mock[StandaloneAhcWSClient]
var called = false
val calc = new play.shaded.ahc.org.asynchttpclient.SignatureCalculator with WSSignatureCalculator {
override def calculateAndAddSignature(
request: play.shaded.ahc.org.asynchttpclient.Request,
requestBuilder: play.shaded.ahc.org.asynchttpclient.RequestBuilderBase[_]): Unit = {
called = true
}
}
val req = makeAhcRequest("http://playframework.com/").sign(calc)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
called must beTrue
}
"Have form params on POST of content type application/x-www-form-urlencoded when signed" in {
val client = mock[StandaloneAhcWSClient]
import scala.collection.JavaConverters._
val consumerKey = ConsumerKey("key", "secret")
val requestToken = RequestToken("token", "secret")
val calc = OAuthCalculator(consumerKey, requestToken)
val req: AHCRequest = makeAhcRequest("http://playframework.com/").withBody(Map("param1" -> Seq("value1")))
.sign(calc)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
// Note we use getFormParams instead of getByteData here.
req.getFormParams.asScala must containTheSameElementsAs(List(new play.shaded.ahc.org.asynchttpclient.Param("param1", "value1")))
req.getByteData must beNull // should NOT result in byte data.
val headers = req.getHeaders
headers.get("Content-Length") must beNull
}
"Not remove a user defined content length header" in {
val client = mock[StandaloneAhcWSClient]
val consumerKey = ConsumerKey("key", "secret")
val requestToken = RequestToken("token", "secret")
val calc = OAuthCalculator(consumerKey, requestToken)
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.withBody(Map("param1" -> Seq("value1")))
.addHttpHeaders("Content-Length" -> "9001") // add a meaningless content length here...
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
(new String(req.getByteData, "UTF-8")) must be_==("param1=value1") // should result in byte data.
val headers = req.getHeaders
headers.get("Content-Length") must_== ("9001")
}
"Remove a user defined content length header if we are parsing body explicitly when signed" in {
val client = mock[StandaloneAhcWSClient]
import scala.collection.JavaConverters._
val consumerKey = ConsumerKey("key", "secret")
val requestToken = RequestToken("token", "secret")
val calc = OAuthCalculator(consumerKey, requestToken)
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.withBody(Map("param1" -> Seq("value1")))
.addHttpHeaders("Content-Length" -> "9001") // add a meaningless content length here...
.sign(calc) // this is signed, so content length is no longer valid per #5221
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
val headers = req.getHeaders
req.getByteData must beNull // should NOT result in byte data.
req.getFormParams.asScala must containTheSameElementsAs(List(new play.shaded.ahc.org.asynchttpclient.Param("param1", "value1")))
headers.get("Content-Length") must beNull // no content length!
}
"Verify Content-Type header is passed through correctly" in {
import scala.collection.JavaConverters._
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.addHttpHeaders("Content-Type" -> "text/plain; charset=US-ASCII")
.withBody("HELLO WORLD")
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
req.getHeaders.getAll("Content-Type").asScala must_== Seq("text/plain; charset=US-ASCII")
}
"POST binary data as is" in {
val binData = ByteString((0 to 511).map(_.toByte).toArray)
val req: AHCRequest = makeAhcRequest("http://playframework.com/").addHttpHeaders("Content-Type" -> "application/x-custom-bin-data").withBody(binData)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
ByteString(req.getByteData) must_== binData
}
"support a virtual host" in {
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.withVirtualHost("192.168.1.1")
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
req.getVirtualHost must be equalTo "192.168.1.1"
}
"support follow redirects" in {
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.withFollowRedirects(follow = true)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
req.getFollowRedirect must beEqualTo(true)
}
"support finite timeout" in {
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.withRequestTimeout(1000.millis)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
req.getRequestTimeout must be equalTo 1000
}
"support infinite timeout" in {
val req: AHCRequest = makeAhcRequest("http://playframework.com/")
.withRequestTimeout(Duration.Inf)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
req.getRequestTimeout must be equalTo -1
}
"not support negative timeout" in {
makeAhcRequest("http://playframework.com/").withRequestTimeout(-1.millis) should throwAn[IllegalArgumentException]
}
"not support a timeout greater than Int.MaxValue" in {
makeAhcRequest("http://playframework.com/").withRequestTimeout((Int.MaxValue.toLong + 1).millis) should throwAn[IllegalArgumentException]
}
"support a proxy server with basic" in {
val proxy = DefaultWSProxyServer(protocol = Some("https"), host = "localhost", port = 8080, principal = Some("principal"), password = Some("password"))
val req: AHCRequest = makeAhcRequest("http://playframework.com/").withProxyServer(proxy)
.asInstanceOf[AhcWSRequest].underlying.buildRequest()
val actual = req.getProxyServer
actual.getHost must be equalTo "localhost"
actual.getPort must be equalTo 8080
actual.getRealm.getPrincipal must be equalTo "principal"
actual.getRealm.getPassword must be equalTo "password"
actual.getRealm.getScheme must be equalTo AuthScheme.BASIC
}
"support a proxy server with NTLM" in {
val proxy = DefaultWSProxyServer(protocol = Some("ntlm"), host = "localhost", port = 8080, principal = Some("principal"), password = Some("password"), ntlmDomain = Some("somentlmdomain"))
val req: AHCRequest = makeAhcRequest("http://playframework.com/").withProxyServer(proxy).asInstanceOf[AhcWSRequest].underlying.buildRequest()
val actual = req.getProxyServer
actual.getHost must be equalTo "localhost"
actual.getPort must be equalTo 8080
actual.getRealm.getPrincipal must be equalTo "principal"
actual.getRealm.getPassword must be equalTo "password"
actual.getRealm.getNtlmDomain must be equalTo "somentlmdomain"
actual.getRealm.getScheme must be equalTo AuthScheme.NTLM
}
"Set Realm.UsePreemptiveAuth to false when WSAuthScheme.DIGEST being used" in {
val req = makeAhcRequest("http://playframework.com/")
.withAuth("usr", "pwd", WSAuthScheme.DIGEST)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
req.getRealm.isUsePreemptiveAuth must beFalse
}
"Set Realm.UsePreemptiveAuth to true when WSAuthScheme.DIGEST not being used" in {
val req = makeAhcRequest("http://playframework.com/")
.withAuth("usr", "pwd", WSAuthScheme.BASIC)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
req.getRealm.isUsePreemptiveAuth must beTrue
}
"support a proxy server" in {
val proxy = DefaultWSProxyServer(host = "localhost", port = 8080)
val req: AHCRequest = makeAhcRequest("http://playframework.com/").withProxyServer(proxy)
.asInstanceOf[AhcWSRequest].underlying
.buildRequest()
val actual = req.getProxyServer
actual.getHost must be equalTo "localhost"
actual.getPort must be equalTo 8080
actual.getRealm must beNull
}
def patchFakeApp = {
val routes: (Application) => PartialFunction[(String, String), Handler] = { app: Application =>
{
case ("PATCH", "/") =>
val action = app.injector.instanceOf(classOf[DefaultActionBuilder])
action {
Results.Ok(play.api.libs.json.Json.parse(
"""{
| "data": "body"
|}
""".stripMargin))
}
}
}
GuiceApplicationBuilder().appRoutes(routes).build()
}
"support patch method" in new WithServer(patchFakeApp) {
// NOTE: if you are using a client proxy like Privoxy or Polipo, your proxy may not support PATCH & return 400.
{
val wsClient = app.injector.instanceOf(classOf[play.api.libs.ws.WSClient])
val futureResponse = wsClient.url(s"http://localhost:${Helpers.testServerPort}/").patch("body")
// This test experiences CI timeouts. Give it more time.
val reallyLongTimeout = Timeout(defaultAwaitTimeout.duration * 3)
val rep = await(futureResponse)(reallyLongTimeout)
rep.status must ===(200)
(rep.json \\ "data").asOpt[String] must beSome("body")
}
}
def gzipFakeApp = {
import java.io._
import java.util.zip._
lazy val Action = ActionBuilder.ignoringBody
val routes: Application => PartialFunction[(String, String), Handler] = {
app =>
{
case ("GET", "/") => Action { request =>
request.headers.get("Accept-Encoding") match {
case Some(encoding) if encoding.contains("gzip") =>
val os = new ByteArrayOutputStream
val gzipOs = new GZIPOutputStream(os)
gzipOs.write("gziped response".getBytes("utf-8"))
gzipOs.close()
Results.Ok(os.toByteArray).as("text/plain").withHeaders("Content-Encoding" -> "gzip")
case _ =>
Results.Ok("plain response")
}
}
}
}
GuiceApplicationBuilder()
.configure("play.ws.compressionEnabled" -> true)
.appRoutes(routes)
.build()
}
"support gziped encoding" in new WithServer(gzipFakeApp) {
val client = app.injector.instanceOf[WSClient]
val req = client.url("http://localhost:" + port + "/").get()
val rep = Await.result(req, 1.second)
rep.body must ===("gziped response")
}
"Ahc WS Response" should {
"get cookies from an AHC response" in {
val ahcResponse: AHCResponse = mock[AHCResponse]
val (name, value, wrap, domain, path, maxAge, secure, httpOnly) =
("someName", "someValue", true, "example.com", "/", 1000L, false, false)
val ahcCookie: AHCCookie = new AHCCookie(name, value, wrap, domain, path, maxAge, secure, httpOnly)
ahcResponse.getCookies returns util.Arrays.asList(ahcCookie)
val response = makeAhcResponse(ahcResponse)
val cookies: Seq[WSCookie] = response.cookies
val cookie = cookies.head
cookie.name must ===(name)
cookie.value must ===(value)
cookie.domain must beSome(domain)
cookie.path must beSome(path)
cookie.maxAge must beSome(maxAge)
cookie.secure must beFalse
}
"get a single cookie from an AHC response" in {
val ahcResponse: AHCResponse = mock[AHCResponse]
val (name, value, wrap, domain, path, maxAge, secure, httpOnly) =
("someName", "someValue", true, "example.com", "/", 1000L, false, false)
val ahcCookie: AHCCookie = new AHCCookie(name, value, wrap, domain, path, maxAge, secure, httpOnly)
ahcResponse.getCookies returns util.Arrays.asList(ahcCookie)
val response = makeAhcResponse(ahcResponse)
val optionCookie = response.cookie("someName")
optionCookie must beSome[WSCookie].which {
cookie =>
cookie.name must ===(name)
cookie.value must ===(value)
cookie.domain must beSome(domain)
cookie.path must beSome(path)
cookie.maxAge must beSome(maxAge)
cookie.secure must beFalse
}
}
"return -1 values of expires and maxAge as None" in {
val ahcResponse: AHCResponse = mock[AHCResponse]
val ahcCookie: AHCCookie = new AHCCookie("someName", "value", true, "domain", "path", -1L, false, false)
ahcResponse.getCookies returns util.Arrays.asList(ahcCookie)
val response = makeAhcResponse(ahcResponse)
val optionCookie = response.cookie("someName")
optionCookie must beSome[WSCookie].which { cookie =>
cookie.maxAge must beNone
}
}
"get the body as bytes from the AHC response" in {
val ahcResponse: AHCResponse = mock[AHCResponse]
val bytes = ByteString(-87, -72, 96, -63, -32, 46, -117, -40, -128, -7, 61, 109, 80, 45, 44, 30)
ahcResponse.getResponseBodyAsBytes returns bytes.toArray
val response = makeAhcResponse(ahcResponse)
response.bodyAsBytes must_== bytes
}
"get headers from an AHC response in a case insensitive map" in {
val ahcResponse: AHCResponse = mock[AHCResponse]
val ahcHeaders = new DefaultHttpHeaders(true)
ahcHeaders.add("Foo", "bar")
ahcHeaders.add("Foo", "baz")
ahcHeaders.add("Bar", "baz")
ahcResponse.getHeaders returns ahcHeaders
val response = makeAhcResponse(ahcResponse)
val headers = response.headers
headers must beEqualTo(Map("Foo" -> Seq("bar", "baz"), "Bar" -> Seq("baz")))
headers.contains("foo") must beTrue
headers.contains("Foo") must beTrue
headers.contains("BAR") must beTrue
headers.contains("Bar") must beTrue
}
}
def makeAhcResponse(ahcResponse: AHCResponse): AhcWSResponse = {
AhcWSResponse(StandaloneAhcWSResponse(ahcResponse))
}
"Ahc WS Config" should {
"support overriding secure default values" in {
val ahcConfig = new AhcConfigBuilder().modifyUnderlying { builder =>
builder.setCompressionEnforced(false)
builder.setFollowRedirect(false)
}.build()
ahcConfig.isCompressionEnforced must beFalse
ahcConfig.isFollowRedirect must beFalse
ahcConfig.getConnectTimeout must_== 120000
ahcConfig.getRequestTimeout must_== 120000
ahcConfig.getReadTimeout must_== 120000
}
}
}
| Shenker93/playframework | framework/src/play-ahc-ws/src/test/scala/play/api/libs/ws/ahc/AhcWSSpec.scala | Scala | apache-2.0 | 20,104 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package phase
import org.scalatest._
import scalaz.Scalaz._
import scalaz._
class PhasesChainTest extends fixture.FunSpec with Matchers {
class FixtureParam {
var progress: MultiPhasedProgressMock = _
def apply[In, Out](chain: PhasesChain[In, Out], in: In): Out = {
progress = new MultiPhasedProgressMock
val runner = new ChainRunner(chain, progress)
runner.run(in)
}
def phasesInOutsEquals(expected: List[String]) {
progress.phasesIns shouldEqual expected
progress.phasesOuts shouldEqual expected
}
}
protected def withFixture(test: OneArgTest): Outcome = test(new FixtureParam)
describe("Simple Phase") {
it ("should work single") { run =>
var arg: Int = -1
val single = PhasesChain("single") {(i:Int) => arg = i; 1}
single.phasesCount shouldBe 1
val result = run(single, 0)
arg shouldBe 0
result shouldBe 1
run.phasesInOutsEquals("single" :: Nil)
}
it ("should work for chain") { run =>
var args = Map[String, Any]()
val chained =
PhasesChain("p1") { (arg:Int) => args += "p1" -> arg; 1} ::
PhasesChain("p2") { (arg:Int) => args += "p2" -> arg; 2} ::
PhasesChain("p3") { (arg:Int) => args += "p3" -> arg; 3}
chained.phasesCount shouldBe 3
val result = run(chained, 0)
args("p1") shouldBe 0
args("p2") shouldBe 1
args("p3") shouldBe 2
result shouldBe 3
run.phasesInOutsEquals("p1" :: "p2" :: "p3" :: Nil)
}
it ("should do nothing for nil phase") { run =>
val empty = PhasesChain.empty[Int]
empty.phasesCount shouldBe 0
val result = run(empty, 0)
result shouldBe 0
run.phasesInOutsEquals(Nil)
}
}
describe("Validating Phase") {
it ("should work single") { run =>
var arg: Int = -1
val single = ValidatingPhasesChain("single") {(i:Int) => arg = i; 1.success }
single.phasesCount shouldBe 1
val result = run(single, 0)
arg shouldBe 0
result shouldBe 1.success
run.phasesInOutsEquals("single" :: Nil)
}
it ("should work for chain") { run =>
var args = Map[String, Any]()
val chained =
ValidatingPhasesChain("p1") { (arg:Int) => args += "p1" -> arg; 1.success} ::
ValidatingPhasesChain("p2") { (arg:Int) => args += "p2" -> arg; 2.success} ::
ValidatingPhasesChain("p3") { (arg:Int) => args += "p3" -> arg; 3.success}
chained.phasesCount shouldBe 3
val result = run(chained, 0)
args("p1") shouldBe 0
args("p2") shouldBe 1
args("p3") shouldBe 2
result shouldBe 3.success
run.phasesInOutsEquals("p1" :: "p2" :: "p3" :: Nil)
}
it ("should break the chain if failure occurs") { run =>
var args = Map[String, Any]()
val chained =
ValidatingPhasesChain("p1") { (arg:Int) => args += "p1" -> arg; 1.success} ::
ValidatingPhasesChain("p2") { (arg:Int) => args += "p2" -> arg; "breaking".failure} ::
ValidatingPhasesChain("p3") { (arg:Int) => args += "p3" -> arg; 3.success} ::
ValidatingPhasesChain("p4") { (arg:Int) => args += "p4" -> arg; 4.success} ::
ValidatingPhasesChain("p5") { (arg:Int) => args += "p5" -> arg; 5.success}
chained.phasesCount shouldBe 5
val result = run(chained, 0)
args("p1") shouldBe 0
args("p2") shouldBe 1
args.get("p3") shouldBe empty
args.get("p4") shouldBe empty
args.get("p5") shouldBe empty
result shouldBe "breaking".failure
run.phasesInOutsEquals("p1" :: "p2" :: Nil)
run.progress.movedProgresses shouldBe 3
}
it ("should do nothing for nil phase") { run =>
val empty = ValidatingPhasesChain.empty[Int]
empty.phasesCount shouldBe 0
val result = run(empty, 0)
result shouldBe 0.success
run.phasesInOutsEquals(Nil)
}
}
describe("mixed phase and validating phase") {
it ("should proper move") { run =>
var args = Map[String, Any]()
val chainedValidating =
ValidatingPhasesChain("p1") { (arg:Int) => args += "p1" -> arg; 1.success} ::
ValidatingPhasesChain("p2") { (arg:Int) => args += "p2" -> arg; "breaking".failure} ::
ValidatingPhasesChain("p3") { (arg:Int) => args += "p3" -> arg; 3.success}
val chainedAll =
chainedValidating ::
PhasesChain("p4") { (arg: Validation[String, Int]) => args += "p4" -> arg; arg valueOr { _ => -1 }}
chainedAll.phasesCount shouldBe 4
val result = run(chainedAll, 0)
args("p1") shouldBe 0
args("p2") shouldBe 1
args.get("p3") shouldBe empty
args("p4") shouldBe "breaking".failure
result shouldBe -1
run.phasesInOutsEquals("p1" :: "p2" :: "p4" :: Nil)
run.progress.movedProgresses shouldBe 1
}
}
} | arkadius/scala-phases-chain | src/test/scala/phase/PhasesChainTest.scala | Scala | apache-2.0 | 5,473 |
package polyite.schedule
import isl.BasicMap
import isl.Isl.TypeAliases._
//class Dependence(val map : isl.BasicMap, val weakConstr : isl.BasicSet,
// val strongConstr : isl.BasicSet) {
class Dependence private () {
private var m : isl.BasicMap = null;
private var wConstr : isl.BasicSet = null
private var sConstr : isl.BasicSet = null
private var mStr : String = null;
private var wConstrStr : String = null
private var sConstrStr : String = null
private var tupleNameIn : String = null;
private var tupleNameOut : String = null;
private def this(map : isl.BasicMap, wConstr : isl.BasicSet,
sConstr : isl.BasicSet, mStr : String, wConstrStr : String,
sConstrStr : String, tupleNameIn : String, tupleNameOut : String) = {
this()
this.m = map
this.wConstr = wConstr
this sConstr = sConstr
this.mStr = mStr
this.wConstrStr = wConstrStr
this.sConstrStr = sConstrStr
this.tupleNameIn = tupleNameIn
this.tupleNameOut = tupleNameOut
}
def this(map : isl.BasicMap, weakConstr : isl.BasicSet, strongConstr : isl.BasicSet) = this(
map, weakConstr, strongConstr, map.toString(), weakConstr.toString(), strongConstr.toString(), map.getTupleName(T_IN), map.getTupleName(T_OUT))
def map : isl.BasicMap = m
def weakConstr : isl.BasicSet = wConstr
def strongConstr : isl.BasicSet = sConstr
override def toString() : String = mStr
override def equals(o : Any) : Boolean = {
if (o.isInstanceOf[Dependence]) {
return o.asInstanceOf[Dependence].mStr.equals(mStr)
}
return false
}
override def hashCode() : Int = {
return mStr.hashCode()
}
def getTupleNameIn() : String = tupleNameIn
def getTupleNameOut() : String = tupleNameOut
def transferToCtx(ctx : isl.Ctx) : Dependence = {
return new Dependence(isl.BasicMap.readFromStr(ctx, mStr),
isl.BasicSet.readFromStr(ctx, wConstrStr),
isl.BasicSet.readFromStr(ctx, sConstrStr), mStr, wConstrStr, sConstrStr, tupleNameIn, tupleNameOut)
}
} | stganser/polyite | src/polyite/schedule/Dependence.scala | Scala | mit | 2,040 |
package pregnaware.frontend
import akka.actor.{ActorContext, ActorRefFactory}
import akka.event.Logging._
import akka.util.Timeout
import com.typesafe.scalalogging.StrictLogging
import pregnaware.frontend.services.naming.{NamingServiceBackend, NamingServiceFrontEnd}
import pregnaware.frontend.services.user.{UserServiceBackend, UserServiceFrontEnd}
import pregnaware.utils.ExecutionWrapper
import spray.routing._
import scala.concurrent.ExecutionContext
/** Support user login */
abstract class FrontEndHttpService extends HttpService
with UserServiceFrontEnd
with NamingServiceFrontEnd
with ExecutionWrapper
with StrictLogging {
/** The routes defined by this service */
val routes =
pathPrefix(FrontEndHttpService.serviceName) {
logRequest("REST API", InfoLevel) {
logResponse("REST API", InfoLevel) {
userServiceRoutes ~ namingServiceRoutes
}
}
}
}
object FrontEndHttpService {
val serviceName = "FrontEndSvc"
def apply(
persistence: SessionPersistence, userSvc: UserServiceBackend, namingSvc: NamingServiceBackend)
(implicit ac: ActorContext, ec: ExecutionContext, to: Timeout): FrontEndHttpService = {
new FrontEndHttpService {
// Needed for ExecutionWrapper
implicit override final def context: ActorContext = ac
implicit override final def executor: ExecutionContext = ec
implicit override final def timeout: Timeout = to
// Needed for HttpService
implicit override final def actorRefFactory: ActorRefFactory = ac
override def getSessionPersistence = persistence
override def getUserService = userSvc
override def getNamingService = namingSvc
}
}
} | jds106/pregnaware | service/src/main/scala/pregnaware/frontend/FrontEndHttpService.scala | Scala | mit | 1,704 |
package ohnosequences.statika
package object aws {
type AnyLinuxAMICompatible = AnyCompatible { type Environment <: AnyLinuxAMIEnvironment }
// type AnyAMICompatible = AnyCompatible { type Environment <: AnyAMIEnvironment }
type AMICompatible[E <: AnyLinuxAMIEnvironment, B <: AnyBundle] = Compatible[E,B]
implicit def linuxAMICompSyntax[C <: AnyLinuxAMICompatible]
(comp: C): LinuxAMICompSyntax[C] = LinuxAMICompSyntax(comp)
}
| ohnosequences/aws-statika | src/main/scala/package.scala | Scala | agpl-3.0 | 443 |
package gh.test.gh3
import gh3.models.GH3Sender
import net.liftweb.json._
import org.scalatest.{FlatSpec, Matchers}
class GH3SenderTest extends FlatSpec with Matchers
{
"Valid sender" should "be correctly parsed" in
{
val json = parse(
"""
|{
| "login": "baxterthehacker",
| "id": 6752317,
| "avatar_url": "https://avatars.githubusercontent.com/u/6752317?v=3",
| "gravatar_id": "",
| "url": "https://api.github.com/users/baxterthehacker",
| "html_url": "https://github.com/baxterthehacker",
| "followers_url": "https://api.github.com/users/baxterthehacker/followers",
| "following_url": "https://api.github.com/users/baxterthehacker/following{/other_user}",
| "gists_url": "https://api.github.com/users/baxterthehacker/gists{/gist_id}",
| "starred_url": "https://api.github.com/users/baxterthehacker/starred{/owner}{/repo}",
| "subscriptions_url": "https://api.github.com/users/baxterthehacker/subscriptions",
| "organizations_url": "https://api.github.com/users/baxterthehacker/orgs",
| "repos_url": "https://api.github.com/users/baxterthehacker/repos",
| "events_url": "https://api.github.com/users/baxterthehacker/events{/privacy}",
| "received_events_url": "https://api.github.com/users/baxterthehacker/received_events",
| "type": "User",
| "site_admin": false
| }
""".stripMargin)
GH3Sender(json).isDefined shouldBe true
}
"An other valid sender" should "be correctly parsed" in
{
val json = parse(
"""
|{
| "login": "baxterthehacker",
| "id": 6752317,
| "avatar_url": "https://avatars.githubusercontent.com/u/6752317?v=3",
| "gravatar_id": "",
| "url": "https://api.github.com/users/baxterthehacker",
| "html_url": "https://github.com/baxterthehacker",
| "followers_url": "https://api.github.com/users/baxterthehacker/followers",
| "following_url": "https://api.github.com/users/baxterthehacker/following{/other_user}",
| "gists_url": "https://api.github.com/users/baxterthehacker/gists{/gist_id}",
| "starred_url": "https://api.github.com/users/baxterthehacker/starred{/owner}{/repo}",
| "subscriptions_url": "https://api.github.com/users/baxterthehacker/subscriptions",
| "organizations_url": "https://api.github.com/users/baxterthehacker/orgs",
| "repos_url": "https://api.github.com/users/baxterthehacker/repos",
| "events_url": "https://api.github.com/users/baxterthehacker/events{/privacy}",
| "received_events_url": "https://api.github.com/users/baxterthehacker/received_events",
| "type": "User",
| "site_admin": false
| }
""".stripMargin)
GH3Sender(json).isDefined shouldBe true
}
}
| mgoeminne/github_etl | src/test/scala/gh/test/gh3/GH3SenderTest.scala | Scala | mit | 3,325 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import joptsimple._
import java.util.Properties
import kafka.utils._
import org.I0Itec.zkclient.ZkClient
import scala.collection._
import scala.collection.JavaConversions._
import kafka.cluster.Broker
import kafka.log.LogConfig
import kafka.consumer.Whitelist
object TopicCommand {
def main(args: Array[String]): Unit = {
val opts = new TopicCommandOptions(args)
// should have exactly one action
val actions = Seq(opts.createOpt, opts.deleteOpt, opts.listOpt, opts.alterOpt, opts.describeOpt).count(opts.options.has _)
if(actions != 1) {
System.err.println("Command must include exactly one action: --list, --describe, --create, --delete, or --alter")
opts.parser.printHelpOn(System.err)
System.exit(1)
}
opts.checkArgs()
val zkClient = new ZkClient(opts.options.valueOf(opts.zkConnectOpt), 30000, 30000, ZKStringSerializer)
try {
if(opts.options.has(opts.createOpt))
createTopic(zkClient, opts)
else if(opts.options.has(opts.alterOpt))
alterTopic(zkClient, opts)
else if(opts.options.has(opts.deleteOpt))
deleteTopic(zkClient, opts)
else if(opts.options.has(opts.listOpt))
listTopics(zkClient, opts)
else if(opts.options.has(opts.describeOpt))
describeTopic(zkClient, opts)
} catch {
case e =>
println("Error while executing topic command " + e.getMessage)
println(Utils.stackTrace(e))
} finally {
zkClient.close()
}
}
private def getTopics(zkClient: ZkClient, opts: TopicCommandOptions): Seq[String] = {
val allTopics = ZkUtils.getAllTopics(zkClient).sorted
if (opts.options.has(opts.topicOpt)) {
val topicsSpec = opts.options.valueOf(opts.topicOpt)
val topicsFilter = new Whitelist(topicsSpec)
allTopics.filter(topicsFilter.isTopicAllowed)
} else
allTopics
}
def createTopic(zkClient: ZkClient, opts: TopicCommandOptions) {
val topic = opts.options.valueOf(opts.topicOpt)
val configs = parseTopicConfigsToBeAdded(opts)
if (opts.options.has(opts.replicaAssignmentOpt)) {
val assignment = parseReplicaAssignment(opts.options.valueOf(opts.replicaAssignmentOpt))
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, assignment, configs)
} else {
CommandLineUtils.checkRequiredArgs(opts.parser, opts.options, opts.partitionsOpt, opts.replicationFactorOpt)
val partitions = opts.options.valueOf(opts.partitionsOpt).intValue
val replicas = opts.options.valueOf(opts.replicationFactorOpt).intValue
AdminUtils.createTopic(zkClient, topic, partitions, replicas, configs)
}
println("Created topic \\"%s\\".".format(topic))
}
def alterTopic(zkClient: ZkClient, opts: TopicCommandOptions) {
val topics = getTopics(zkClient, opts)
topics.foreach { topic =>
if(opts.options.has(opts.configOpt) || opts.options.has(opts.deleteConfigOpt)) {
val configsToBeAdded = parseTopicConfigsToBeAdded(opts)
val configsToBeDeleted = parseTopicConfigsToBeDeleted(opts)
// compile the final set of configs
val configs = AdminUtils.fetchTopicConfig(zkClient, topic)
configs.putAll(configsToBeAdded)
configsToBeDeleted.foreach(config => configs.remove(config))
AdminUtils.changeTopicConfig(zkClient, topic, configs)
println("Updated config for topic \\"%s\\".".format(topic))
}
if(opts.options.has(opts.partitionsOpt)) {
println("WARNING: If partitions are increased for a topic that has a key, the partition " +
"logic or ordering of the messages will be affected")
val nPartitions = opts.options.valueOf(opts.partitionsOpt).intValue
val replicaAssignmentStr = opts.options.valueOf(opts.replicaAssignmentOpt)
AdminUtils.addPartitions(zkClient, topic, nPartitions, replicaAssignmentStr)
println("Adding partitions succeeded!")
}
}
}
def deleteTopic(zkClient: ZkClient, opts: TopicCommandOptions) {
val topics = getTopics(zkClient, opts)
topics.foreach { topic =>
AdminUtils.deleteTopic(zkClient, topic)
println("Topic \\"%s\\" queued for deletion.".format(topic))
}
}
def listTopics(zkClient: ZkClient, opts: TopicCommandOptions) {
val topics = getTopics(zkClient, opts)
for(topic <- topics)
println(topic)
}
def describeTopic(zkClient: ZkClient, opts: TopicCommandOptions) {
val topics = getTopics(zkClient, opts)
val reportUnderReplicatedPartitions = if (opts.options.has(opts.reportUnderReplicatedPartitionsOpt)) true else false
val reportUnavailablePartitions = if (opts.options.has(opts.reportUnavailablePartitionsOpt)) true else false
val reportOverriddenConfigs = if (opts.options.has(opts.topicsWithOverridesOpt)) true else false
val liveBrokers = ZkUtils.getAllBrokersInCluster(zkClient).map(_.id).toSet
for (topic <- topics) {
ZkUtils.getPartitionAssignmentForTopics(zkClient, List(topic)).get(topic) match {
case Some(topicPartitionAssignment) =>
val describeConfigs: Boolean = !reportUnavailablePartitions && !reportUnderReplicatedPartitions
val describePartitions: Boolean = !reportOverriddenConfigs
val sortedPartitions = topicPartitionAssignment.toList.sortWith((m1, m2) => m1._1 < m2._1)
if (describeConfigs) {
val configs = AdminUtils.fetchTopicConfig(zkClient, topic)
if (!reportOverriddenConfigs || configs.size() != 0) {
val numPartitions = topicPartitionAssignment.size
val replicationFactor = topicPartitionAssignment.head._2.size
println("Topic:%s\\tPartitionCount:%d\\tReplicationFactor:%d\\tConfigs:%s"
.format(topic, numPartitions, replicationFactor, configs.map(kv => kv._1 + "=" + kv._2).mkString(",")))
}
}
if (describePartitions) {
for ((partitionId, assignedReplicas) <- sortedPartitions) {
val inSyncReplicas = ZkUtils.getInSyncReplicasForPartition(zkClient, topic, partitionId)
val leader = ZkUtils.getLeaderForPartition(zkClient, topic, partitionId)
if ((!reportUnderReplicatedPartitions && !reportUnavailablePartitions) ||
(reportUnderReplicatedPartitions && inSyncReplicas.size < assignedReplicas.size) ||
(reportUnavailablePartitions && (!leader.isDefined || !liveBrokers.contains(leader.get)))) {
print("\\tTopic: " + topic)
print("\\tPartition: " + partitionId)
print("\\tLeader: " + (if(leader.isDefined) leader.get else "none"))
print("\\tReplicas: " + assignedReplicas.mkString(","))
println("\\tIsr: " + inSyncReplicas.mkString(","))
}
}
}
case None =>
println("Topic " + topic + " doesn't exist!")
}
}
}
def formatBroker(broker: Broker) = broker.id + " (" + broker.host + ":" + broker.port + ")"
def parseTopicConfigsToBeAdded(opts: TopicCommandOptions): Properties = {
val configsToBeAdded = opts.options.valuesOf(opts.configOpt).map(_.split("""\\s*=\\s*"""))
require(configsToBeAdded.forall(config => config.length == 2),
"Invalid topic config: all configs to be added must be in the format \\"key=val\\".")
val props = new Properties
configsToBeAdded.foreach(pair => props.setProperty(pair(0).trim, pair(1).trim))
LogConfig.validate(props)
props
}
def parseTopicConfigsToBeDeleted(opts: TopicCommandOptions): Seq[String] = {
if (opts.options.has(opts.deleteConfigOpt)) {
val configsToBeDeleted = opts.options.valuesOf(opts.deleteConfigOpt).map(_.trim())
val propsToBeDeleted = new Properties
configsToBeDeleted.foreach(propsToBeDeleted.setProperty(_, ""))
LogConfig.validateNames(propsToBeDeleted)
configsToBeDeleted
}
else
Seq.empty
}
def parseReplicaAssignment(replicaAssignmentList: String): Map[Int, List[Int]] = {
val partitionList = replicaAssignmentList.split(",")
val ret = new mutable.HashMap[Int, List[Int]]()
for (i <- 0 until partitionList.size) {
val brokerList = partitionList(i).split(":").map(s => s.trim().toInt)
ret.put(i, brokerList.toList)
if (ret(i).size != ret(0).size)
throw new AdminOperationException("Partition " + i + " has different replication factor: " + brokerList)
}
ret.toMap
}
class TopicCommandOptions(args: Array[String]) {
val parser = new OptionParser
val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the form host:port. " +
"Multiple URLS can be given to allow fail-over.")
.withRequiredArg
.describedAs("urls")
.ofType(classOf[String])
val listOpt = parser.accepts("list", "List all available topics.")
val createOpt = parser.accepts("create", "Create a new topic.")
val alterOpt = parser.accepts("alter", "Alter the configuration for the topic.")
val deleteOpt = parser.accepts("delete", "Delete the topic.")
val describeOpt = parser.accepts("describe", "List details for the given topics.")
val helpOpt = parser.accepts("help", "Print usage information.")
val topicOpt = parser.accepts("topic", "The topic to be create, alter, delete, or describe. Can also accept a regular " +
"expression except for --create option")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val configOpt = parser.accepts("config", "A topic configuration override for the topic being created or altered.")
.withRequiredArg
.describedAs("name=value")
.ofType(classOf[String])
val deleteConfigOpt = parser.accepts("deleteConfig", "A topic configuration override to be removed for an existing topic")
.withRequiredArg
.describedAs("name")
.ofType(classOf[String])
val partitionsOpt = parser.accepts("partitions", "The number of partitions for the topic being created or " +
"altered (WARNING: If partitions are increased for a topic that has a key, the partition logic or ordering of the messages will be affected")
.withRequiredArg
.describedAs("# of partitions")
.ofType(classOf[java.lang.Integer])
val replicationFactorOpt = parser.accepts("replication-factor", "The replication factor for each partition in the topic being created.")
.withRequiredArg
.describedAs("replication factor")
.ofType(classOf[java.lang.Integer])
val replicaAssignmentOpt = parser.accepts("replica-assignment", "A list of manual partition-to-broker assignments for the topic being created or altered.")
.withRequiredArg
.describedAs("broker_id_for_part1_replica1 : broker_id_for_part1_replica2 , " +
"broker_id_for_part2_replica1 : broker_id_for_part2_replica2 , ...")
.ofType(classOf[String])
val reportUnderReplicatedPartitionsOpt = parser.accepts("under-replicated-partitions",
"if set when describing topics, only show under replicated partitions")
val reportUnavailablePartitionsOpt = parser.accepts("unavailable-partitions",
"if set when describing topics, only show partitions whose leader is not available")
val topicsWithOverridesOpt = parser.accepts("topics-with-overrides",
"if set when describing topics, only show topics that have overridden configs")
val options = parser.parse(args : _*)
val allTopicLevelOpts: Set[OptionSpec[_]] = Set(alterOpt, createOpt, deleteOpt, describeOpt, listOpt)
def checkArgs() {
// check required args
CommandLineUtils.checkRequiredArgs(parser, options, zkConnectOpt)
if (!options.has(listOpt) && !options.has(describeOpt))
CommandLineUtils.checkRequiredArgs(parser, options, topicOpt)
// check invalid args
CommandLineUtils.checkInvalidArgs(parser, options, configOpt, allTopicLevelOpts -- Set(alterOpt, createOpt))
CommandLineUtils.checkInvalidArgs(parser, options, deleteConfigOpt, allTopicLevelOpts -- Set(alterOpt))
CommandLineUtils.checkInvalidArgs(parser, options, partitionsOpt, allTopicLevelOpts -- Set(alterOpt, createOpt))
CommandLineUtils.checkInvalidArgs(parser, options, replicationFactorOpt, allTopicLevelOpts -- Set(createOpt))
CommandLineUtils.checkInvalidArgs(parser, options, replicaAssignmentOpt,
allTopicLevelOpts -- Set(alterOpt, createOpt) + partitionsOpt + replicationFactorOpt)
CommandLineUtils.checkInvalidArgs(parser, options, reportUnderReplicatedPartitionsOpt,
allTopicLevelOpts -- Set(describeOpt) + reportUnavailablePartitionsOpt + topicsWithOverridesOpt)
CommandLineUtils.checkInvalidArgs(parser, options, reportUnavailablePartitionsOpt,
allTopicLevelOpts -- Set(describeOpt) + reportUnderReplicatedPartitionsOpt + topicsWithOverridesOpt)
CommandLineUtils.checkInvalidArgs(parser, options, topicsWithOverridesOpt,
allTopicLevelOpts -- Set(describeOpt) + reportUnderReplicatedPartitionsOpt + reportUnavailablePartitionsOpt)
}
}
}
| relango/kafka | core/src/main/scala/kafka/admin/TopicCommand.scala | Scala | apache-2.0 | 14,658 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.integration.spark.testsuite.complexType
import java.sql.Timestamp
import scala.collection.mutable
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
class TestCompactionComplexType extends QueryTest with BeforeAndAfterAll {
// scalastyle:off lineLength
private val compactionThreshold = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD,
CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD)
override protected def beforeAll(): Unit = {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD, "2,3")
}
override protected def afterAll(): Unit = {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD, compactionThreshold)
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
sql("DROP TABLE IF EXISTS compactComplex")
}
test("test INT with struct and array, Encoding INT-->BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql(
s"load data inpath '$resourcesPath/adap.csv' into table adaptive options('delimiter'=','," +
"'quotechar'='\\"','fileheader'='roll,student','complex_delimiter_level_1'='$'," +
"'complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(2, Row(600, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(3, Row(600, "abc", mutable.WrappedArray.make(Array(20, 30, 40))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500, 'name', 'abc', 'marks', array(20,30,40)))")
sql("insert into adaptive values(2,named_struct('id', 600, 'name', 'abc', 'marks', array(30,30,40)))")
sql("insert into adaptive values(3,named_struct('id', 700, 'name', 'abc', 'marks', array(40,30,40)))")
sql("insert into adaptive values(4,named_struct('id', 800, 'name', 'abc', 'marks', array(50,30,40)))")
sql("alter table adaptive compact 'major'").collect()
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(2, Row(600, "abc", mutable.WrappedArray.make(Array(30, 30, 40)))),
Row(3, Row(700, "abc", mutable.WrappedArray.make(Array(40, 30, 40)))),
Row(4, Row(800, "abc", mutable.WrappedArray.make(Array(50, 30, 40))))))
}
test("test INT with struct and array, Encoding INT-->SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql(s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(2,named_struct('id', 600, 'name', 'abc', 'marks', array(300,300,400)))")
sql("insert into adaptive values(3,named_struct('id', 700, 'name', 'abc', 'marks', array(400,300,400)))")
sql("insert into adaptive values(4,named_struct('id', 800, 'name', 'abc', 'marks', array(500,300,400)))")
sql("alter table adaptive compact 'major'").collect()
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(600, "abc", mutable.WrappedArray.make(Array(300, 300, 400)))),
Row(3, Row(700, "abc", mutable.WrappedArray.make(Array(400, 300, 400)))),
Row(4, Row(800, "abc", mutable.WrappedArray.make(Array(500, 300, 400))))))
}
test("test INT with struct and array, Encoding INT-->SHORT INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql(s"load data inpath '$resourcesPath/adap_int2.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,4000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,5000000,4000000)))")
sql("insert into adaptive values(4,named_struct('id', 200000, 'name', 'abc', 'marks', array(2000000,6000000,4000000)))")
sql("alter table adaptive compact 'major'").collect()
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 4000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 5000000, 4000000)))),
Row(4, Row(200000, "abc", mutable.WrappedArray.make(Array(2000000, 6000000, 4000000))))))
}
test("test INT with struct and array, Encoding INT-->INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql(s"load data inpath '$resourcesPath/adap_int3.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:int,name:string,marks:array<int>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(210,350,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(4,named_struct('id', 10000001, 'name', 'abd', 'marks', array(250,450,62000000)))")
sql("alter table adaptive compact 'major'").collect()
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(210, 350, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(4, Row(10000001, "abd", mutable.WrappedArray.make(Array(250, 450, 62000000))))))
}
test("test SMALLINT with struct and array SMALLINT --> BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:smallint,name:string," +
"marks:array<smallint>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 100, 'name', 'abc', 'marks', array(20,30,40)))")
sql("insert into adaptive values(2,named_struct('id', 200, 'name', 'abc', 'marks', array(30,40,50)))")
sql("insert into adaptive values(3,named_struct('id', 300, 'name', 'abd', 'marks', array(30,41,55)))")
sql("insert into adaptive values(4,named_struct('id', 400, 'name', 'abe', 'marks', array(30,42,56)))")
sql("alter table adaptive compact 'major'").collect()
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(100, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(2, Row(200, "abc", mutable.WrappedArray.make(Array(30, 40, 50)))),
Row(3, Row(300, "abd", mutable.WrappedArray.make(Array(30, 41, 55)))),
Row(4, Row(400, "abe", mutable.WrappedArray.make(Array(30, 42, 56))))))
}
test("test SMALLINT with struct and array SMALLINT --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:smallint,name:string," +
"marks:array<smallint>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(2,named_struct('id', 8000, 'name', 'abc', 'marks', array(300,410,500)))")
sql("insert into adaptive values(3,named_struct('id', 9000, 'name', 'abee', 'marks', array(310,420,400)))")
sql("insert into adaptive values(4,named_struct('id', 9900, 'name', 'abfffffffffffffff', 'marks', array(320,430,500)))")
sql("alter table adaptive compact 'major'").collect()
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(8000, "abc", mutable.WrappedArray.make(Array(300, 410, 500)))),
Row(3, Row(9000, "abee", mutable.WrappedArray.make(Array(310, 420, 400)))),
Row(4, Row(9900, "abfffffffffffffff", mutable.WrappedArray.make(Array(320, 430, 500))))))
sql("insert into adaptive values(5,named_struct('id', 500, 'name', 'abc', 'marks', array(200,310,400)))")
sql("insert into adaptive values(6,named_struct('id', 8000, 'name', 'abc', 'marks', array(300,310,500)))")
sql("insert into adaptive values(7,named_struct('id', 9000, 'name', 'abee', 'marks', array(310,320,400)))")
sql("insert into adaptive values(8,named_struct('id', 9900, 'name', 'abfffffffffffffffeeee', 'marks', array(320,330,500)))")
sql("alter table adaptive compact 'major'").collect()
sql("SHOW SEGMENTS FOR TABLE adaptive").collect()
sql("clean files for table adaptive").collect()
sql("SHOW SEGMENTS FOR TABLE adaptive").collect()
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(8000, "abc", mutable.WrappedArray.make(Array(300, 410, 500)))),
Row(3, Row(9000, "abee", mutable.WrappedArray.make(Array(310, 420, 400)))),
Row(4, Row(9900, "abfffffffffffffff", mutable.WrappedArray.make(Array(320, 430, 500)))),
Row(5, Row(500, "abc", mutable.WrappedArray.make(Array(200, 310, 400)))),
Row(6, Row(8000, "abc", mutable.WrappedArray.make(Array(300, 310, 500)))),
Row(7, Row(9000, "abee", mutable.WrappedArray.make(Array(310, 320, 400)))),
Row(8, Row(9900, "abfffffffffffffffeeee", mutable.WrappedArray.make(Array(320, 330, 500))))))
}
test("test BigInt with struct and array BIGINT --> BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:bigint,name:string," +
"marks:array<bigint>>) STORED AS carbondata")
sql("insert into adaptive values(11,named_struct('id', 1, 'name', 'abc', 'marks', array(21,30,40)))")
sql("insert into adaptive values(12,named_struct('id', 1, 'name', 'ab1', 'marks', array(22,30,40)))")
sql("insert into adaptive values(13,named_struct('id', 1, 'name', 'ab2', 'marks', array(23,30,40)))")
sql("insert into adaptive values(14,named_struct('id', 1, 'name', 'ab3', 'marks', array(24,30,40)))")
sql("insert into adaptive values(15,named_struct('id', 1, 'name', 'ab4', 'marks', array(25,30,40)))")
sql("insert into adaptive values(16,named_struct('id', 1, 'name', 'ab5', 'marks', array(26,30,40)))")
sql("insert into adaptive values(17,named_struct('id', 1, 'name', 'ab6', 'marks', array(27,30,40)))")
sql("insert into adaptive values(18,named_struct('id', 1, 'name', 'ab7', 'marks', array(28,30,40)))")
sql("insert into adaptive values(19,named_struct('id', 1, 'name', 'ab8', 'marks', array(29,30,40)))")
sql("insert into adaptive values(20,named_struct('id', 1, 'name', 'ab9', 'marks', array(30,30,40)))")
sql("insert into adaptive values(21,named_struct('id', 1, 'name', 'ab10', 'marks', array(31,30,40)))")
sql("insert into adaptive values(22,named_struct('id', 1, 'name', 'ab11', 'marks', array(32,30,40)))")
sql("alter table adaptive compact 'major'").collect()
sql("SHOW SEGMENTS FOR TABLE adaptive").collect()
sql("clean files for table adaptive").collect()
sql("SHOW SEGMENTS FOR TABLE adaptive").collect()
checkAnswer(sql("select * from adaptive"),
Seq(Row(11, Row(1, "abc", mutable.WrappedArray.make(Array(21, 30, 40)))),
Row(12, Row(1, "ab1", mutable.WrappedArray.make(Array(22, 30, 40)))),
Row(13, Row(1, "ab2", mutable.WrappedArray.make(Array(23, 30, 40)))),
Row(14, Row(1, "ab3", mutable.WrappedArray.make(Array(24, 30, 40)))),
Row(15, Row(1, "ab4", mutable.WrappedArray.make(Array(25, 30, 40)))),
Row(16, Row(1, "ab5", mutable.WrappedArray.make(Array(26, 30, 40)))),
Row(17, Row(1, "ab6", mutable.WrappedArray.make(Array(27, 30, 40)))),
Row(18, Row(1, "ab7", mutable.WrappedArray.make(Array(28, 30, 40)))),
Row(19, Row(1, "ab8", mutable.WrappedArray.make(Array(29, 30, 40)))),
Row(20, Row(1, "ab9", mutable.WrappedArray.make(Array(30, 30, 40)))),
Row(21, Row(1, "ab10", mutable.WrappedArray.make(Array(31, 30, 40)))),
Row(22, Row(1, "ab11", mutable.WrappedArray.make(Array(32, 30, 40))))
))
}
test("test BigInt with struct and array BIGINT --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:bigint,name:string," +
"marks:array<bigint>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(2,named_struct('id', 8000, 'name', 'abc', 'marks', array(300,400,500)))")
sql("insert into adaptive values(3,named_struct('id', 9000, 'name', 'abc', 'marks', array(300,400,500)))")
sql("insert into adaptive values(4,named_struct('id', 10000, 'name', 'abc', 'marks', array(300,400,500)))")
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(8000, "abc", mutable.WrappedArray.make(Array(300, 400, 500)))),
Row(3, Row(9000, "abc", mutable.WrappedArray.make(Array(300, 400, 500)))),
Row(4, Row(10000, "abc", mutable.WrappedArray.make(Array(300, 400, 500))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:BIGINT,name:string,marks:array<BIGINT>>)" +
" " +
"STORED AS carbondata")
(0 to 3).foreach { _ =>
sql(s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
}
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400))))
))
}
test("test BigInt with struct and array BIGINT --> SHORT INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:bigint,name:string," +
"marks:array<bigint>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))
))
sql("Drop table if exists adaptive")
sql("create table adaptive(roll int, student struct<id:BIGINT,name:string," +
"marks:array<BIGINT>>) STORED AS carbondata")
(0 to 3).foreach { _ =>
sql(s"load data inpath '$resourcesPath/adap_int2.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
}
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))
))
}
test("test BIGINT with struct and array, Encoding INT-->INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:BIGINT,name:string,marks:array<BIGINT>>)" +
" " +
"STORED AS carbondata")
(0 to 3).foreach { _ =>
sql(s"load data inpath '$resourcesPath/adap_int3.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
}
sql("alter table adaptive compact'major'")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:BIGINT,name:string,marks:array<BIGINT>>)" +
" " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name','abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name','abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name','abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name','abc', 'marks', array(200,300,52000000)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))
))
}
test("test Double with Struct and Array DOUBLE --> BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 1.323, 'name', 'abc', 'marks', array(2.2,3.3,4.4)))")
sql("insert into adaptive values(2,named_struct('id', 1.324, 'name', 'abc', 'marks', array(2.2,3.3,4.4)))")
sql("insert into adaptive values(3,named_struct('id', 1.325, 'name', 'abc', 'marks', array(2.2,3.3,4.4)))")
sql("insert into adaptive values(4,named_struct('id', 1.326, 'name', 'abc', 'marks', array(2.2,3.3,4.4)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.324, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.325, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(4, Row(1.326, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string,marks:array<double>>)" +
" " +
"STORED AS carbondata")
(0 to 3).foreach { _ =>
sql(s"load data inpath '$resourcesPath/adap_double1.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
}
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(2, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4)))),
Row(3, Row(1.323, "abc", mutable.WrappedArray.make(Array(2.2, 3.3, 4.4))))
))
}
test("test Double with Struct and Array DOUBLE --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 1.323, 'name', 'abc', 'marks', array(20.2,30.3,40.4)))")
sql("insert into adaptive values(2,named_struct('id', 1.324, 'name', 'abc', 'marks', array(20.2,30.3,40.5)))")
sql("insert into adaptive values(3,named_struct('id', 1.325, 'name', 'abc', 'marks', array(20.2,30.3,40.6)))")
sql("insert into adaptive values(4,named_struct('id', 1.326, 'name', 'abc', 'marks', array(20.2,30.3,40.7)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(1.324, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.5)))),
Row(3, Row(1.325, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.6)))),
Row(4, Row(1.326, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.7))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string,marks:array<double>>)" +
" " +
"STORED AS carbondata")
(0 to 3).foreach { _ =>
sql(s"load data inpath '$resourcesPath/adap_double2.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
}
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(3, Row(4.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(3, Row(4.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(3, Row(4.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4)))),
Row(3, Row(4.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 40.4))))
))
}
test("test Double with Struct and Array DOUBLE --> SHORT INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 10.323, 'name', 'abc', 'marks', array(20.2,30.3,501.423)))")
sql("insert into adaptive values(2,named_struct('id', 10.323, 'name', 'abc', 'marks', array(20.2,30.3,502.421)))")
sql("insert into adaptive values(3,named_struct('id', 10.323, 'name', 'abc', 'marks', array(20.2,30.3,503.422)))")
sql("insert into adaptive values(4,named_struct('id', 10.323, 'name', 'abc', 'marks', array(20.2,30.3,504.424)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(10.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 501.423)))),
Row(2, Row(10.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 502.421)))),
Row(3, Row(10.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 503.422)))),
Row(4, Row(10.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 504.424))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string,marks:array<double>>)" +
" " +
"STORED AS carbondata")
(0 to 3).foreach { _ =>
sql(s"load data inpath '$resourcesPath/adap_double3.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
}
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(3, Row(50.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(3, Row(50.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(3, Row(50.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423)))),
Row(3, Row(50.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 500.423))))
))
}
test("test Double with Struct and Array DOUBLE --> INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 1000.323, 'name', 'abc', 'marks', array(20.2,30.3,60000.423)))")
sql("insert into adaptive values(2,named_struct('id', 1000.324, 'name', 'abc', 'marks', array(20.2,30.3,70000.424)))")
sql("insert into adaptive values(3,named_struct('id', 1000.325, 'name', 'abc', 'marks', array(20.2,30.3,80000.425)))")
sql("insert into adaptive values(4,named_struct('id', 1000.326, 'name', 'abc', 'marks', array(20.2,30.3,90000.426)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 60000.423)))),
Row(2, Row(1000.324, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 70000.424)))),
Row(3, Row(1000.325, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 80000.425)))),
Row(4, Row(1000.326, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 90000.426))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string,marks:array<double>>)" +
" " +
"STORED AS carbondata")
(0 to 3).foreach { _ =>
sql(s"load data inpath '$resourcesPath/adap_double4.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
}
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(3, Row(50000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(3, Row(50000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(3, Row(50000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(1, Row(1.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(2, Row(2.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423)))),
Row(3, Row(50000.323, "abc", mutable.WrappedArray.make(Array(20.2, 30.3, 50000.423))))
))
}
test("test Double with Struct and Array DOUBLE --> DOUBLE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:double,name:string," +
"marks:array<double>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 1.797693134862315, 'name', 'abc', 'marks', array(2.2,30.3,1.797693134862315)))")
sql("insert into adaptive values(2,named_struct('id', 1.797693134862316, 'name', 'abc', 'marks', array(2.2,30.3,1.797693134862316)))")
sql("insert into adaptive values(3,named_struct('id', 1.797693134862317, 'name', 'abc', 'marks', array(2.2,30.3,1.797693134862317)))")
sql("insert into adaptive values(4,named_struct('id', 1.797693134862318, 'name', 'abc', 'marks', array(2.2,30.3,1.797693134862318)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1,
Row(1.797693134862315,
"abc",
mutable.WrappedArray.make(Array(2.2, 30.3, 1.797693134862315)))),
Row(2,
Row(1.797693134862316,
"abc",
mutable.WrappedArray.make(Array(2.2, 30.3, 1.797693134862316)))),
Row(3,
Row(1.797693134862317,
"abc",
mutable.WrappedArray.make(Array(2.2, 30.3, 1.797693134862317)))),
Row(4,
Row(1.797693134862318,
"abc",
mutable.WrappedArray.make(Array(2.2, 30.3, 1.797693134862318))))
))
}
test("test Decimal with Struct") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:decimal(3,2),name:string>)" +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 3.2, 'name', 'abc'))")
sql("select * from adaptive").collect()
}
test("test Decimal with Array") {
sql("Drop table if exists adaptive")
sql("create table adaptive(roll int, student struct<name:string," +
"marks:array<decimal>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('name', 'abc', 'marks', array(20.2,30.3,40.4)))")
sql("select * from adaptive").collect()
}
test("test Timestamp with Struct") {
sql("Drop table if exists adaptive")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(
"create table adaptive(roll int, student struct<id:timestamp,name:string>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', '2017-01-01 00:00:00', 'name', 'abc'))")
sql("insert into adaptive values(2,named_struct('id', '2017-01-02 00:00:00', 'name', 'abc'))")
sql("insert into adaptive values(3,named_struct('id', '2017-01-03 00:00:00', 'name', 'abc'))")
sql("insert into adaptive values(4,named_struct('id', '2017-01-04 00:00:00', 'name', 'abc'))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(Timestamp.valueOf("2017-01-01 00:00:00.0"), "abc")),
Row(2, Row(Timestamp.valueOf("2017-01-02 00:00:00.0"), "abc")),
Row(3, Row(Timestamp.valueOf("2017-01-03 00:00:00.0"), "abc")),
Row(4, Row(Timestamp.valueOf("2017-01-04 00:00:00.0"), "abc"))
))
}
test("test Timestamp with Array") {
sql("Drop table if exists adaptive")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(
"create table adaptive(roll int, student struct<name:string," +
"marks:array<timestamp>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('name', 'abc1', 'marks', array('2017-01-01 00:00:00.0','2018-01-01 00:00:00.0')))")
sql("insert into adaptive values(2,named_struct('name', 'abc2', 'marks', array('2017-01-02 00:00:00.0','2018-01-03 00:00:00.0')))")
sql("insert into adaptive values(3,named_struct('name', 'abc3', 'marks', array('2017-01-04 00:00:00.0','2018-01-05 00:00:00.0')))")
sql("insert into adaptive values(4,named_struct('name', 'abc4', 'marks', array('2017-01-06 00:00:00.0','2018-01-07 00:00:00.0')))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1,
Row("abc1",
mutable.WrappedArray
.make(Array(Timestamp.valueOf("2017-01-01 00:00:00.0"),
Timestamp.valueOf("2018-01-01 00:00:00.0"))))),
Row(2,
Row("abc2",
mutable.WrappedArray
.make(Array(Timestamp.valueOf("2017-01-02 00:00:00.0"),
Timestamp.valueOf("2018-01-03 00:00:00.0"))))),
Row(3,
Row("abc3",
mutable.WrappedArray
.make(Array(Timestamp.valueOf("2017-01-04 00:00:00.0"),
Timestamp.valueOf("2018-01-05 00:00:00.0"))))),
Row(4,
Row("abc4",
mutable.WrappedArray
.make(Array(Timestamp.valueOf("2017-01-06 00:00:00.0"),
Timestamp.valueOf("2018-01-07 00:00:00.0")))))
))
}
test("test DATE with Array") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<name:string," +
"marks:array<date>>) STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('name', 'abc', 'marks', array('2017-01-01')))")
sql("select * from adaptive").collect()
}
test("test LONG with Array and Struct Encoding LONG --> BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:long,name:string,marks:array<long>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(20,30,40)))")
sql("insert into adaptive values(2,named_struct('id', 11111, 'name', 'abc', 'marks', array(55,65,75)))")
sql("insert into adaptive values(3,named_struct('id', 11111, 'name', 'abc', 'marks', array(88,98,8)))")
sql("insert into adaptive values(4,named_struct('id', 11111, 'name', 'abc', 'marks', array(99,9,19)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(20, 30, 40)))),
Row(2, Row(11111, "abc", mutable.WrappedArray.make(Array(55, 65, 75)))),
Row(3, Row(11111, "abc", mutable.WrappedArray.make(Array(88, 98, 8)))),
Row(4, Row(11111, "abc", mutable.WrappedArray.make(Array(99, 9, 19))))
))
}
test("test LONG with Array and Struct Encoding LONG --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:long,name:string,marks:array<long>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(2,named_struct('id', 11111, 'name', 'abc', 'marks', array(201,301,401)))")
sql("insert into adaptive values(3,named_struct('id', 11111, 'name', 'abc', 'marks', array(202,302,402)))")
sql("insert into adaptive values(4,named_struct('id', 11111, 'name', 'abc', 'marks', array(203,303,403)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(11111, "abc", mutable.WrappedArray.make(Array(201, 301, 401)))),
Row(3, Row(11111, "abc", mutable.WrappedArray.make(Array(202, 302, 402)))),
Row(4, Row(11111, "abc", mutable.WrappedArray.make(Array(203, 303, 403))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
(0 to 3).foreach { i =>
sql(s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
}
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400))))
))
}
test("test LONG with struct and array, Encoding LONG-->SHORT INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql(s"load data inpath '$resourcesPath/adap_int2.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 50000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(2,named_struct('id', 70000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
sql("insert into adaptive values(3,named_struct('id', 100000, 'name', 'abc', 'marks', array(2000000,3000000,4000000)))")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(50000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(2, Row(70000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000)))),
Row(3, Row(100000, "abc", mutable.WrappedArray.make(Array(2000000, 3000000, 4000000))))))
}
test("test LONG with struct and array, Encoding LONG-->INT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql(s"load data inpath '$resourcesPath/adap_int3.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(7000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name', 'abc', 'marks', array(200,300,52000000)))")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(2, Row(700000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000)))),
Row(3, Row(10000000, "abc", mutable.WrappedArray.make(Array(200, 300, 52000000))))))
}
test("test LONG with struct and array, Encoding LONG-->LONG") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:LONG,name:string,marks:array<LONG>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 500000, 'name', 'abc', 'marks', array(200,300,52000000000)))")
sql("insert into adaptive values(2,named_struct('id', 700000, 'name', 'abc', 'marks', array(200,300,52000000000)))")
sql("insert into adaptive values(3,named_struct('id', 10000000, 'name', 'abc', 'marks', array(200,300,52000000000)))")
sql("select * from adaptive").collect()
}
test("test SHORT with Array and Struct Encoding SHORT -->BYTE") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:short,name:string,marks:array<short>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(" +
"1,named_struct('id', 11, 'name', 'abc', 'marks', array(20,30,40)))")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11, "abc", mutable.WrappedArray.make(Array(20, 30, 40))))))
}
test("test SHORT with Array and Struct Encoding SHORT --> SHORT") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:SHORT,name:string,marks:array<SHORT>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,400)))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,401)))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,402)))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,403)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 401)))),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 402)))),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 403))))
))
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:SHORT,name:string,marks:array<SHORT>>) " +
"STORED AS carbondata")
(0 to 3).foreach { i =>
sql(s"load data inpath '$resourcesPath/adap_int1.csv' into table adaptive " +
"options('delimiter'=',','quotechar'='\\"','fileheader'='roll,student'," +
"'complex_delimiter_level_1'='$','complex_delimiter_level_2'=':')")
}
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(1, Row(500, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(2, Row(700, "abc", mutable.WrappedArray.make(Array(200, 300, 400)))),
Row(3, Row(800, "abc", mutable.WrappedArray.make(Array(200, 300, 400))))
))
}
test("test Boolean with Struct and Array") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:boolean,name:string," +
"marks:array<boolean>>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', true, 'name', 'abc', 'marks', array(false,true,false)))")
sql("insert into adaptive values(1,named_struct('id', true, 'name', 'abc', 'marks', array(false,true,true)))")
sql("insert into adaptive values(1,named_struct('id', true, 'name', 'abc', 'marks', array(false,true,true)))")
sql("insert into adaptive values(1,named_struct('id', true, 'name', 'abc', 'marks', array(false,true,false)))")
sql("alter table adaptive compact 'major' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(true, "abc", mutable.WrappedArray.make(Array(false, true, false)))),
Row(1, Row(true, "abc", mutable.WrappedArray.make(Array(false, true, true)))),
Row(1, Row(true, "abc", mutable.WrappedArray.make(Array(false, true, true)))),
Row(1, Row(true, "abc", mutable.WrappedArray.make(Array(false, true, false))))
))
}
test("complex type compaction") {
sql("drop table if exists complexcarbontable")
sql("create table complexcarbontable(deviceInformationId int, channelsId string," +
"ROMSize string, purchasedate string, mobile struct<imei:string, imsi:string>," +
"MAC array<string>, locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string, " +
"ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>," +
"proddate struct<productionDate:string,activeDeactivedate:array<string>>, gamePointId " +
"double,contractNumber double) " +
"STORED AS carbondata"
)
sql(
s"LOAD DATA local inpath '$resourcesPath/complexdata.csv' INTO table " +
"complexcarbontable " +
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\\"', 'FILEHEADER'='deviceInformationId,channelsId," +
"ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
)
sql(
s"LOAD DATA local inpath '$resourcesPath/complexdata.csv' INTO table " +
"complexcarbontable " +
"OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\\"', 'FILEHEADER'='deviceInformationId,channelsId," +
"ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber'," +
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'=':')"
)
sql("alter table complexcarbontable compact 'minor'")
sql(
"select locationinfo,proddate from complexcarbontable where deviceInformationId=1 limit 1")
.collect()
checkAnswer(sql(
"select locationinfo,proddate from complexcarbontable where deviceInformationId=1 limit 1"),
Seq(Row(mutable
.WrappedArray
.make(Array(Row(7, "Chinese", "Hubei Province", "yichang", "yichang", "yichang"),
Row(7, "India", "New Delhi", "delhi", "delhi", "delhi"))),
Row("29-11-2015", mutable
.WrappedArray.make(Array("29-11-2015", "29-11-2015"))))))
sql("drop table if exists complexcarbontable")
}
test("test minor compaction with all complex types") {
sql("Drop table if exists adaptive")
sql(
"create table adaptive(roll int, student struct<id:SHORT,name:string,marks:array<SHORT>>, " +
"mapField map<int, string>) " +
"STORED AS carbondata")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,400)),map(1, 'Nalla', 2, 'Singh', 3, 'Gupta', 4, 'Kumar'))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,401)),map(11, 'Nalla', 12, 'Singh', 13, 'Gupta', 14, 'Kumar'))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,402)),map(21, 'Nalla', 22, 'Singh', 23, 'Gupta', 24, 'Kumar'))")
sql("insert into adaptive values(1,named_struct('id', 11111, 'name', 'abc', 'marks', array(200,300,403)),map(31, 'Nalla', 32, 'Singh', 33, 'Gupta', 34, 'Kumar'))")
sql("alter table adaptive compact 'minor' ")
checkAnswer(sql("select * from adaptive"),
Seq(Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 400))),
Map(1 -> "Nalla", 2 -> "Singh", 3 -> "Gupta", 4 -> "Kumar")),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 401))),
Map(11 -> "Nalla", 12 -> "Singh", 13 -> "Gupta", 14 -> "Kumar")),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 402))),
Map(21 -> "Nalla", 22 -> "Singh", 23 -> "Gupta", 24 -> "Kumar")),
Row(1, Row(11111, "abc", mutable.WrappedArray.make(Array(200, 300, 403))),
Map(31 -> "Nalla", 32 -> "Singh", 33 -> "Gupta", 34 -> "Kumar"))
))
sql("Drop table if exists adaptive")
}
test("Test major compaction with dictionary include for struct of array type") {
sql("DROP TABLE IF EXISTS compactComplex")
sql("CREATE TABLE compactComplex(CUST_ID string,YEAR int, MONTH int, AGE int, " +
"GENDER string,EDUCATED string,IS_MARRIED string," +
"STRUCT_OF_ARRAY struct<ID:int,CHECK_DATE:string,SNo:array<int>,sal1:array<double>," +
"state:array<string>,date1:array<string>>,CARD_COUNT int,DEBIT_COUNT int," +
"CREDIT_COUNT int, DEPOSIT double, HQ_DEPOSIT double) STORED AS carbondata")
(0 to 2).foreach { _ =>
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/structofarray.csv' INTO TABLE compactComplex " +
"OPTIONS('DELIMITER'=',','QUOTECHAR'='\\'," +
"'FILEHEADER'='CUST_ID,YEAR,MONTH,AGE, GENDER,EDUCATED,IS_MARRIED,STRUCT_OF_ARRAY," +
"CARD_COUNT,DEBIT_COUNT,CREDIT_COUNT, DEPOSIT,HQ_DEPOSIT'," +
"'COMPLEX_DELIMITER_LEVEL_1'='$', 'COMPLEX_DELIMITER_LEVEL_2'='&')")
}
sql("ALTER TABLE compactComplex COMPACT 'major'")
checkAnswer(sql("Select count(*) from compactComplex"), Row(63))
}
test("Test Compaction for complex types with table restructured") {
sql("drop table if exists compactComplex")
sql(
"""
| create table compactComplex (
| name string,
| age int,
| number string,
| structfield struct<a:array<int> ,b:int>
| )
| stored as carbondata
""".stripMargin)
sql("INSERT into compactComplex values('man',25,'222',named_struct('a', array(1000,2000), 'b', 1))")
sql("INSERT into compactComplex values('can',24,'333',named_struct('a', array(1000,2000), 'b', 2))")
sql("INSERT into compactComplex values('dan',25,'222',named_struct('a', array(1000,2000), 'b', 3))")
sql("ALTER TABLE compactComplex drop columns(age)")
sql("ALTER TABLE compactComplex COMPACT 'major'")
checkAnswer(sql("SELECT * FROM compactComplex"),
Seq(Row("man", "222", Row(mutable.WrappedArray.make(Array(1000, 2000)), 1)),
Row("can", "333", Row(mutable.WrappedArray.make(Array(1000, 2000)), 2)),
Row("dan", "222", Row(mutable.WrappedArray.make(Array(1000, 2000)), 3))
))
}
// scalastyle:on lineLength
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestCompactionComplexType.scala | Scala | apache-2.0 | 62,965 |
package com.seanshubin.learn.spark.core
import org.apache.spark.rdd.RDD
class ResilientDistributedDatasetLoaderStub(results: Map[String, Seq[String]]) extends ResilientDistributedDatasetLoader {
val sparkContext = SparkContextForIntegrationTests.sparkContext
override def loadFromPathPattern(pathPattern: String): RDD[String] = {
val lines = results(pathPattern)
val dataset = sparkContext.parallelize(lines)
dataset
}
}
| SeanShubin/learn-spark | core/src/test/scala/com/seanshubin/learn/spark/core/ResilientDistributedDatasetLoaderStub.scala | Scala | unlicense | 442 |
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa.lang.psi.stubs.elements.wrappers
import java.io.IOException
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IStubElementType, StubElement, StubInputStream}
import org.argus.cit.intellij.jawa.lang.JawaFileType
/**
* @author <a href="mailto:fgwei521@gmail.com">Fengguo Wei</a>
*/
abstract class IStubElementTypeWrapper[StubT <: StubElement[PsiT], PsiT <: PsiElement](debugName: String)
extends IStubElementType[StubT, PsiT](debugName, JawaFileType.JAWA_LANGUAGE) {
def createStub(psi: PsiT, parentStub: StubElement[_ <: PsiElement] ): StubT = createStubImpl (psi, parentStub)
def createStubImpl[ParentPsi <: PsiElement] (psi: PsiT, parentStub: StubElement[ParentPsi] ): StubT
@throws[IOException]
def deserialize (dataStream: StubInputStream, parentStub: StubElement[_ <: PsiElement] ): StubT =
deserializeImpl(dataStream, parentStub)
def deserializeImpl(dataStream: StubInputStream, parentStub: Any): StubT
} | arguslab/argus-cit-intellij | src/main/scala/org/argus/cit/intellij/jawa/lang/psi/stubs/elements/wrappers/IStubElementTypeWrapper.scala | Scala | epl-1.0 | 1,357 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.utils.TableTestBase
import org.apache.flink.table.utils.TableTestUtil._
import org.junit.Test
class QueryDecorrelationTest extends TableTestBase {
@Test
def testCorrelationScalarAggAndFilter(): Unit = {
val util = batchTestUtil()
val table = util.addTable[(Int, String, String, Int, Int)](
"emp",
'empno,
'ename,
'job,
'salary,
'deptno)
val table1 = util.addTable[(Int, String)]("dept", 'deptno, 'name)
val sql = "SELECT e1.empno\n" +
"FROM emp e1, dept d1 where e1.deptno = d1.deptno\n" +
"and e1.deptno < 10 and d1.deptno < 15\n" +
"and e1.salary > (select avg(salary) from emp e2 where e1.empno = e2.empno)"
val expectedQuery = unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
unaryNode(
"DataSetCalc",
batchTableNode(table),
term("select", "empno", "salary", "deptno"),
term("where", "<(deptno, 10)")
),
unaryNode(
"DataSetCalc",
batchTableNode(table1),
term("select", "deptno"),
term("where", "<(deptno, 15)")
),
term("where", "=(deptno, deptno0)"),
term("join", "empno", "salary", "deptno", "deptno0"),
term("joinType", "InnerJoin")
),
term("select", "empno", "salary")
),
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(table),
term("select", "empno", "salary"),
term("where", "IS NOT NULL(empno)")
),
term("groupBy", "empno"),
term("select", "empno", "AVG(salary) AS EXPR$0")
),
term("where", "AND(=(empno, empno0), >(salary, EXPR$0))"),
term("join", "empno", "salary", "empno0", "EXPR$0"),
term("joinType", "InnerJoin")
),
term("select", "empno")
)
util.verifySql(sql, expectedQuery)
}
@Test
def testDecorrelateWithMultiAggregate(): Unit = {
val util = batchTestUtil()
val table = util.addTable[(Int, String, String, Int, Int)](
"emp",
'empno,
'ename,
'job,
'salary,
'deptno)
val table1 = util.addTable[(Int, String)]("dept", 'deptno, 'name)
val sql = "select sum(e1.empno) from emp e1, dept d1 " +
"where e1.deptno = d1.deptno " +
"and e1.salary > (" +
" select avg(e2.salary) from emp e2 where e2.deptno = d1.deptno" +
")"
val expectedQuery = unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
unaryNode(
"DataSetCalc",
batchTableNode(table),
term("select", "empno", "salary", "deptno")
),
unaryNode(
"DataSetCalc",
batchTableNode(table1),
term("select", "deptno")
),
term("where", "=(deptno, deptno0)"),
term("join", "empno", "salary", "deptno", "deptno0"),
term("joinType", "InnerJoin")
),
term("select", "empno", "salary", "deptno0")
),
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(table),
term("select", "deptno", "salary"),
term("where", "IS NOT NULL(deptno)")
),
term("groupBy", "deptno"),
term("select", "deptno", "AVG(salary) AS EXPR$0")
),
term("where", "AND(=(deptno0, deptno), >(salary, EXPR$0))"),
term("join", "empno", "salary", "deptno0", "deptno", "EXPR$0"),
term("joinType", "InnerJoin")
),
term("select", "empno")
),
term("select", "SUM(empno) AS EXPR$0")
)
util.verifySql(sql, expectedQuery)
}
}
| hequn8128/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/plan/QueryDecorrelationTest.scala | Scala | apache-2.0 | 5,121 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector.catalog
import java.net.URI
import scala.collection.JavaConverters._
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.analysis.{EmptyFunctionRegistry, FakeV2SessionCatalog, NoSuchNamespaceException}
import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, InMemoryCatalog, SessionCatalog}
import org.apache.spark.sql.catalyst.plans.SQLHelper
import org.apache.spark.sql.connector.InMemoryTableCatalog
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.util.CaseInsensitiveStringMap
class CatalogManagerSuite extends SparkFunSuite with SQLHelper {
private def createSessionCatalog(): SessionCatalog = {
val catalog = new InMemoryCatalog()
catalog.createDatabase(
CatalogDatabase(SessionCatalog.DEFAULT_DATABASE, "", new URI("fake"), Map.empty),
ignoreIfExists = true)
new SessionCatalog(catalog, EmptyFunctionRegistry)
}
test("CatalogManager should reflect the changes of default catalog") {
val catalogManager = new CatalogManager(FakeV2SessionCatalog, createSessionCatalog())
assert(catalogManager.currentCatalog.name() == CatalogManager.SESSION_CATALOG_NAME)
assert(catalogManager.currentNamespace.sameElements(Array("default")))
withSQLConf("spark.sql.catalog.dummy" -> classOf[DummyCatalog].getName,
SQLConf.DEFAULT_CATALOG.key -> "dummy") {
// The current catalog should be changed if the default catalog is set.
assert(catalogManager.currentCatalog.name() == "dummy")
assert(catalogManager.currentNamespace.sameElements(Array("a", "b")))
}
}
test("CatalogManager should keep the current catalog once set") {
val catalogManager = new CatalogManager(FakeV2SessionCatalog, createSessionCatalog())
assert(catalogManager.currentCatalog.name() == CatalogManager.SESSION_CATALOG_NAME)
withSQLConf("spark.sql.catalog.dummy" -> classOf[DummyCatalog].getName) {
catalogManager.setCurrentCatalog("dummy")
assert(catalogManager.currentCatalog.name() == "dummy")
assert(catalogManager.currentNamespace.sameElements(Array("a", "b")))
withSQLConf("spark.sql.catalog.dummy2" -> classOf[DummyCatalog].getName,
SQLConf.DEFAULT_CATALOG.key -> "dummy2") {
// The current catalog shouldn't be changed if it's set before.
assert(catalogManager.currentCatalog.name() == "dummy")
}
}
}
test("current namespace should be updated when switching current catalog") {
val catalogManager = new CatalogManager(FakeV2SessionCatalog, createSessionCatalog())
withSQLConf("spark.sql.catalog.dummy" -> classOf[DummyCatalog].getName) {
catalogManager.setCurrentCatalog("dummy")
assert(catalogManager.currentNamespace.sameElements(Array("a", "b")))
catalogManager.setCurrentNamespace(Array("a"))
assert(catalogManager.currentNamespace.sameElements(Array("a")))
// If we set current catalog to the same catalog, current namespace should stay the same.
catalogManager.setCurrentCatalog("dummy")
assert(catalogManager.currentNamespace.sameElements(Array("a")))
// If we switch to a different catalog, current namespace should be reset.
withSQLConf("spark.sql.catalog.dummy2" -> classOf[DummyCatalog].getName) {
catalogManager.setCurrentCatalog("dummy2")
assert(catalogManager.currentNamespace.sameElements(Array("a", "b")))
}
}
}
test("set current namespace") {
val v1SessionCatalog = createSessionCatalog()
v1SessionCatalog.createDatabase(
CatalogDatabase(
"test", "", v1SessionCatalog.getDefaultDBPath("test"), Map.empty),
ignoreIfExists = false)
val catalogManager = new CatalogManager(FakeV2SessionCatalog, v1SessionCatalog)
// If the current catalog is session catalog, setting current namespace actually sets
// `SessionCatalog.currentDb`.
catalogManager.setCurrentNamespace(Array("test"))
assert(catalogManager.currentNamespace.sameElements(Array("test")))
assert(v1SessionCatalog.getCurrentDatabase == "test")
intercept[NoSuchNamespaceException] {
catalogManager.setCurrentNamespace(Array("ns1", "ns2"))
}
// when switching current catalog, `SessionCatalog.currentDb` should be reset.
withSQLConf("spark.sql.catalog.dummy" -> classOf[DummyCatalog].getName) {
catalogManager.setCurrentCatalog("dummy")
assert(v1SessionCatalog.getCurrentDatabase == "default")
catalogManager.setCurrentNamespace(Array("test2"))
assert(v1SessionCatalog.getCurrentDatabase == "default")
// Check namespace existence if currentCatalog implements SupportsNamespaces.
withSQLConf("spark.sql.catalog.testCatalog" -> classOf[InMemoryTableCatalog].getName) {
catalogManager.setCurrentCatalog("testCatalog")
catalogManager.currentCatalog.asInstanceOf[InMemoryTableCatalog]
.createNamespace(Array("test3"), Map.empty[String, String].asJava)
assert(v1SessionCatalog.getCurrentDatabase == "default")
catalogManager.setCurrentNamespace(Array("test3"))
assert(v1SessionCatalog.getCurrentDatabase == "default")
intercept[NoSuchNamespaceException] {
catalogManager.setCurrentNamespace(Array("ns1", "ns2"))
}
}
}
}
}
class DummyCatalog extends CatalogPlugin {
override def initialize(name: String, options: CaseInsensitiveStringMap): Unit = {
_name = name
}
private var _name: String = null
override def name(): String = _name
override def defaultNamespace(): Array[String] = Array("a", "b")
}
| witgo/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/CatalogManagerSuite.scala | Scala | apache-2.0 | 6,401 |
package dpla.ingestion3.harvesters.api
import java.net.URL
import com.databricks.spark.avro._
import dpla.ingestion3.confs.i3Conf
import dpla.ingestion3.utils.HttpUtils
import org.apache.http.client.utils.URIBuilder
import org.apache.log4j.Logger
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.json4s.DefaultFormats
import org.json4s.JsonAST.JValue
import org.json4s.jackson.JsonMethods.{compact, parse, render}
import scala.util.{Failure, Success, Try}
class IaHarvester (spark: SparkSession,
shortName: String,
conf: i3Conf,
harvestLogger: Logger)
extends ApiHarvester(spark, shortName, conf, harvestLogger) {
//
def mimeType: String = "application_json"
/**
*
*/
override protected val queryParams: Map[String, String] = Map(
"q" -> conf.harvest.query
).collect{ case (key, Some(value)) => key -> value } // remove None values
/**
*
*/
override def localHarvest(): DataFrame = {
implicit val formats: DefaultFormats.type = DefaultFormats
val iaCollections = conf.harvest.setlist.getOrElse("").split(",")
iaCollections.foreach(collection => {
// Mutable vars for controlling harvest loop
var continueHarvest = true
var cursor = ""
queryParams.updated("q", s"collection:$collection")
while(continueHarvest) getSinglePage(cursor, collection) match {
// Handle errors
case error: ApiError with ApiResponse =>
harvestLogger.error("Error returned by request %s\\n%s\\n%s".format(
error.errorSource.url.getOrElse("Undefined url"),
error.errorSource.queryParams,
error.message
))
continueHarvest = false
// Handle a successful response
case src: ApiSource with ApiResponse =>
src.text match {
case Some(docs) =>
Try { parse(docs) } match {
case Success(json: JValue) => {
val iaRecords = (json \\\\ "items").children.map(doc => {
val identifier = (doc \\\\ "identifier").toString
if (identifier.nonEmpty)
ApiRecord(identifier, compact(render(doc)))
else
harvestLogger.error(
s"""No identifier in original record
|URL: ${src.url.getOrElse("Not set")}
|Params: ${src.queryParams}
|Body: $doc
|""".stripMargin)
}).collect{case a: ApiRecord => a }
// @see ApiHarvester
saveOutRecords(iaRecords)
// Loop control
cursor = (json \\\\ "cursor").extractOrElse[String]("")
if (cursor.isEmpty)
continueHarvest = false
}
case Failure(f) => harvestLogger.error(s"Unable to parse response\\n" +
s"URL: ${src.url.getOrElse("Not set")}\\n" +
s"Params: ${src.queryParams}\\n" +
s"Body: $docs\\n" +
s"Error: ${f.getMessage}")
}
// Handle unknown case
case _ =>
harvestLogger.error(s"Response body is empty.\\n" +
s"URL: ${src.url.getOrElse("!!! URL not set !!!")}\\n" +
s"Params: ${src.queryParams}\\n" +
s"Body: ${src.text}")
continueHarvest = false
}
}
})
spark.read.avro(tmpOutStr)
}
/**
* Get a single-page, un-parsed response from the IA Scrape API, or an error if
* one occurs.
*
* @param cursor Uses cursor and not start/offset to paginate. Used to work around Solr
* deep-paging performance issues.
* @return ApiSource or ApiError
*/
private def getSinglePage(cursor: String, collection: String): ApiResponse = {
val url = buildUrl(
queryParams
.updated("cursor", cursor)
.updated("q", s"collection:$collection")
.filter{ case (k: String, v: String) => v.nonEmpty}
)
harvestLogger.info(s"Requesting ${url.toString}")
HttpUtils.makeGetRequest(url) match {
case Failure(e) =>
ApiError(e.toString, ApiSource(queryParams, Some(url.toString)))
case Success(response) => if (response.isEmpty) {
ApiError("Response body is empty", ApiSource(queryParams, Some(url.toString)))
} else {
ApiSource(queryParams, Some(url.toString), Some(response))
}
}
}
/**
* Constructs the URL for IA Scrape API requests
*
* @param params URL parameters
* @return
*/
def buildUrl(params: Map[String, String]): URL = {
val uriBuilder = new URIBuilder()
uriBuilder
.setScheme("https")
.setHost("archive.org")
.setPath("/services/search/v1/scrape")
.setParameter("q", params.getOrElse("q", "*:*"))
.setParameter("fields", "collection,contributor,creator,date,description,identifier,language,licenseurl,mediatype,publisher,rights,subject,title,volume")
// A blank or empty cursor valid is not allowed
if (params.get("cursor").isDefined)
uriBuilder.setParameter("cursor", params.getOrElse("cursor", ""))
uriBuilder.build().toURL
}
} | dpla/ingestion3 | src/main/scala/dpla/ingestion3/harvesters/api/IaHarvester.scala | Scala | mit | 5,395 |
package com.themillhousegroup.edn
import us.bpsm.edn.parser._
import us.bpsm.edn.parser.Parsers._
import us.bpsm.edn.Keyword._
import us.bpsm.edn.parser.Parser.Config
import us.bpsm.edn.Keyword
import org.slf4j.LoggerFactory
import com.themillhousegroup.sausagefactory._
import com.themillhousegroup.sausagefactory.reflection._
import com.themillhousegroup.sausagefactory.CaseClassConverter.FieldConverter
object EDNParser {
private[this] val logger = LoggerFactory.getLogger(getClass)
def apply() = {
new ScalaEDNParser(defaultConfiguration)
}
def apply(config: Config) = {
new ScalaEDNParser(config)
}
implicit def str2keyword(s: String): Keyword = newKeyword(s)
implicit def sym2keyword(s: Symbol): Keyword = newKeyword(s.name)
implicit def keyword2str(k: Keyword): String = k.getName
/**
* Recursively convert all keys in this map into Java-legal field names
* @param map
* @return
*/
def ensureLegalKeys(map: Map[String, Any]): Map[String, Any] = {
map.map {
case (k, v: Map[String, Any]) => legalizeKey(k) -> ensureLegalKeys(v)
case (k, v) => legalizeKey(k) -> v
}.toMap
}
/**
* EDN keys can have dashes and ?s in them (which are illegal for Scala/Java field names)
* If the map is going to end up needing to be Scala-legal, instances of these can be
* converted here into camelCase as Gosling intended :-)
*/
def legalizeKey(s: String) = {
import com.google.common.base.CaseFormat._
val removedQuestionMarks = removeIllegalCharacters(s)
val fixedDashes = LOWER_HYPHEN.to(LOWER_CAMEL, removedQuestionMarks)
logger.debug(s"Checking/converting $s to $fixedDashes")
fixedDashes
}
def removeIllegalCharacters(s: String) = s.replaceAll("[?]", "")
}
class ScalaEDNParser(config: Config) extends ReflectionHelpers {
val javaParser = Parsers.newParser(config)
private def isFinished(o: AnyRef) = Parser.END_OF_INPUT.equals(o)
private def immutableMap[K, V](m: Traversable[(K, V)]) = {
Map[K, V]() ++ m
}
/**
* A simple wrapper around the basic Java interface,
* this method keeps returning Some(T) until all values are exhausted
* @since 1.0.0
*/
def nextValue[T](pbr: Parseable): Option[T] = {
val v = javaParser.nextValue(pbr)
if (isFinished(v)) {
None
} else {
Option(v).asInstanceOf[Option[T]]
}
}
/**
* We recurse here wherever there is a chance
* that a nested object could be another
* collection that needs processing.
* The termination case is when we find a simple object.
*/
private def handleCollections(a: AnyRef)(mapHandler: Map[AnyRef, AnyRef] => Traversable[(String, AnyRef)])(traversableHandler: Traversable[AnyRef] => Traversable[AnyRef]): AnyRef = {
import scala.collection.JavaConverters._
a match {
case m: java.util.Map[AnyRef, AnyRef] => { mapHandler(immutableMap(m.asScala)) }
case m: java.util.List[AnyRef] => { traversableHandler(m.asScala.toList) }
case m: java.util.Set[AnyRef] => { traversableHandler(m.asScala.toSet) }
case o => o
}
}
private def streamCollection(a: AnyRef): AnyRef = {
handleCollections(a) {
_.toStream.map {
case (k: Keyword, v) => k.getName -> streamCollection(v)
case (s: String, v) => s -> streamCollection(v)
}
}(_.toStream.map(convertCollection))
}
private def convertCollection(a: AnyRef): AnyRef = {
handleCollections(a) {
_.map {
case (k: Keyword, v) => k.getName -> convertCollection(v)
case (s: String, v) => s -> convertCollection(v)
}
}(_.map(convertCollection))
}
private def processParseable(pbr: Parseable)(valueMapper: AnyRef => AnyRef): Iterator[(String, AnyRef)] = {
Stream.continually(
javaParser.nextValue(pbr))
.takeWhile(!isFinished(_)).sliding(2, 2).map { pair =>
pair.head match {
case k: Keyword => k.getName -> valueMapper(pair(1))
case s: String => s -> valueMapper(pair(1))
case _ => "" -> valueMapper(pair.head)
}
}
}
/**
* Treat an EDN block as if it was a Stream of key-value tuples.
* This may be suitable if you are dealing with an extremely large
* Parseable instance and are worried about memory usage.
* Nested collections will appear as a (String, Stream) tuple
* within the parent stream.
* If the entire EDN block is contained within {}, then
* it will be treated as a Stream with one tuple, "" -> (the content)
* @since 1.0.0
*/
def asStream(pbr: Parseable): Stream[(String, AnyRef)] = {
processParseable(pbr)(streamCollection).toStream
}
/**
* Treat an EDN block as if it was an immutable Map.
*
* Simple key-value pairs will have appropriate value types.
* Otherwise, there can be nested Map[String, AnyRef],
* Set[AnyRef] or Seq[AnyRef] collections nested inside.
*
* If the entire EDN block is contained within {}, then
* this "single-entry Map" will be dereferenced for convenience;
* so for example:
* { :x 1 :y 2 :z 3 } will result in a Map of size 3, rather
* than a Map with one entry of "" -> (Map of size 3)
* @since 1.0.0
*/
def asMap(pbr: Parseable): Map[String, AnyRef] = {
val m = immutableMap(processParseable(pbr)(convertCollection).toTraversable)
// Special case for the "root" map (if it exists)
if (m.size == 1 && (m.forall {
case (s, a) =>
s.isEmpty && a.isInstanceOf[Map[String, AnyRef]]
})) {
m.head._2.asInstanceOf[Map[String, AnyRef]]
} else m
}
import scala.reflect.runtime.universe._
import scala.reflect._
/**
* Reduces the amount of casting required when treating EDN files
* as a Map[String, AnyRef]. This function will attempt to coerce
* the contents of the provided Parseable into an instance of the
* given case class (all case classes extend Product, hence the signature).
*
* Fields in the EDN not found in the target class will be ignored.
* Fields in the target class MUST be present in the EDN, unless they
* are Option types, in which case they will be set to None.
*
* Case classes of arbitrary complexity (e.g. with lists, sets, maps,
* options, and other case classes nested inside) are supported.
*
* This functionality is a thin wrapper around
* https://github.com/themillhousegroup/sausagefactory
*
* @since 2.0.0
*/
def readInto[T <: Product: TypeTag](pbr: Parseable): scala.util.Try[T] = {
import com.themillhousegroup.sausagefactory.CaseClassConverter
val ednMap = asMap(pbr)
val legalMap = EDNParser.ensureLegalKeys(ednMap)
println(s"legalMap: $legalMap")
CaseClassConverter[T](legalMap, AlwaysMakeJavaLongsIntoInts)
}
val AlwaysMakeJavaLongsIntoInts: FieldConverter = {
case(t: Type, v: Any) if (isInt(t) && isJLong(v.getClass)) => {
v.asInstanceOf[Long].toInt
}
}
}
| themillhousegroup/edn-scala | src/main/scala/com/themillhousegroup/edn/EDNParser.scala | Scala | gpl-2.0 | 6,927 |
package com.twitter.finagle.netty4
package buoyant
import com.twitter.finagle.netty4.channel.{ConnectPromiseDelayListeners, BufferingChannelOutboundHandler}
import io.netty.channel._
import io.netty.handler.proxy.ProxyHandler
import java.net.SocketAddress
/**
*
*
* Modified from com.twitter.finagle.netty...
*/
private[finagle] class BufferingConnectDelay
extends ChannelDuplexHandler
with BufferingChannelOutboundHandler { self =>
import ConnectPromiseDelayListeners._
/*
* We manage two promises:
* - `inp` informs the inward pipeline when the channel is
* connected and initialized properly.
* - `outp` is satisfied when the outward pipeline is connected.
*
* Cancellations are propagated outward--failures, inward.
*
* Outbound writes are buffered until `outp` is satisfied. If the
* outward channel was connected successfully, the inward promise
* is satisfied and buffered outbound writes are written. If the
* outward channel fails to connect, the inward promise fails and
* the promise for each buffered request is satisfied with a
* connection (requeueable) connection exception.
*/
override def connect(
ctx: ChannelHandlerContext,
remote: SocketAddress,
local: SocketAddress,
inp: ChannelPromise
): Unit = {
val outp = ctx.newPromise()
inp.addListener(proxyCancellationsTo(outp, ctx))
outp.addListener(proxyFailuresTo(inp))
outp.addListener(proxyActiveTo(inp, ctx))
val _ = ctx.connect(remote, local, outp)
}
private[this] def proxyActiveTo(inp: ChannelPromise, ctx: ChannelHandlerContext) =
new ChannelFutureListener {
override def operationComplete(future: ChannelFuture): Unit =
if (future.isSuccess) {
val _ = ctx.pipeline.addLast(handleActiveTo(inp))
} else if (inp.tryFailure(future.cause)) {
failPendingWrites(future.cause)
}
}
private[this] def handleActiveTo(inp: ChannelPromise): ChannelHandler =
new ChannelInboundHandlerAdapter { drainer =>
override def channelActive(ctx: ChannelHandlerContext): Unit =
if (inp.trySuccess()) {
ctx.pipeline.remove(drainer)
val _ = ctx.pipeline.remove(self) // drains pending writes when removed
}
override def exceptionCaught(ctx: ChannelHandlerContext, exn: Throwable): Unit =
if (inp.tryFailure(exn)) failPendingWrites(exn)
}
}
| linkerd/linkerd | finagle/buoyant/src/main/scala/com/twitter/finagle/netty4/buoyant/BufferingConnectDelay.scala | Scala | apache-2.0 | 2,422 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import junit.framework.Assert._
import org.junit.Test
import java.util.Properties
import kafka.utils._
import kafka.log._
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.{Logging, ZkUtils, TestUtils}
import kafka.common.{InvalidTopicException, TopicExistsException, TopicAndPartition}
import kafka.server.{ConfigType, KafkaServer, KafkaConfig}
import java.io.File
import TestUtils._
class AdminTest extends ZooKeeperTestHarness with Logging {
@Test
def testReplicaAssignment() {
val brokerList = List(0, 1, 2, 3, 4)
// test 0 replication factor
intercept[AdminOperationException] {
AdminUtils.assignReplicasToBrokers(brokerList, 10, 0)
}
// test wrong replication factor
intercept[AdminOperationException] {
AdminUtils.assignReplicasToBrokers(brokerList, 10, 6)
}
// correct assignment
val expectedAssignment = Map(
0 -> List(0, 1, 2),
1 -> List(1, 2, 3),
2 -> List(2, 3, 4),
3 -> List(3, 4, 0),
4 -> List(4, 0, 1),
5 -> List(0, 2, 3),
6 -> List(1, 3, 4),
7 -> List(2, 4, 0),
8 -> List(3, 0, 1),
9 -> List(4, 1, 2))
val actualAssignment = AdminUtils.assignReplicasToBrokers(brokerList, 10, 3, 0)
val e = (expectedAssignment.toList == actualAssignment.toList)
assertTrue(expectedAssignment.toList == actualAssignment.toList)
}
@Test
def testManualReplicaAssignment() {
val brokers = List(0, 1, 2, 3, 4)
TestUtils.createBrokersInZk(zkUtils, brokers)
// duplicate brokers
intercept[IllegalArgumentException] {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, "test", Map(0->Seq(0,0)))
}
// inconsistent replication factor
intercept[IllegalArgumentException] {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, "test", Map(0->Seq(0,1), 1->Seq(0)))
}
// good assignment
val assignment = Map(0 -> List(0, 1, 2),
1 -> List(1, 2, 3))
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, "test", assignment)
val found = zkUtils.getPartitionAssignmentForTopics(Seq("test"))
assertEquals(assignment, found("test"))
}
@Test
def testTopicCreationInZK() {
val expectedReplicaAssignment = Map(
0 -> List(0, 1, 2),
1 -> List(1, 2, 3),
2 -> List(2, 3, 4),
3 -> List(3, 4, 0),
4 -> List(4, 0, 1),
5 -> List(0, 2, 3),
6 -> List(1, 3, 4),
7 -> List(2, 4, 0),
8 -> List(3, 0, 1),
9 -> List(4, 1, 2),
10 -> List(1, 2, 3),
11 -> List(1, 3, 4)
)
val leaderForPartitionMap = Map(
0 -> 0,
1 -> 1,
2 -> 2,
3 -> 3,
4 -> 4,
5 -> 0,
6 -> 1,
7 -> 2,
8 -> 3,
9 -> 4,
10 -> 1,
11 -> 1
)
val topic = "test"
TestUtils.createBrokersInZk(zkUtils, List(0, 1, 2, 3, 4))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// create leaders for all partitions
TestUtils.makeLeaderForPartition(zkUtils, topic, leaderForPartitionMap, 1)
val actualReplicaList = leaderForPartitionMap.keys.toArray.map(p => (p -> zkUtils.getReplicasForPartition(topic, p))).toMap
assertEquals(expectedReplicaAssignment.size, actualReplicaList.size)
for(i <- 0 until actualReplicaList.size)
assertEquals(expectedReplicaAssignment.get(i).get, actualReplicaList(i))
intercept[TopicExistsException] {
// shouldn't be able to create a topic that already exists
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
}
}
@Test
def testTopicCreationWithCollision() {
val topic = "test.topic"
val collidingTopic = "test_topic"
TestUtils.createBrokersInZk(zkUtils, List(0, 1, 2, 3, 4))
// create the topic
AdminUtils.createTopic(zkUtils, topic, 3, 1)
intercept[InvalidTopicException] {
// shouldn't be able to create a topic that collides
AdminUtils.createTopic(zkUtils, collidingTopic, 3, 1)
}
}
private def getBrokersWithPartitionDir(servers: Iterable[KafkaServer], topic: String, partitionId: Int): Set[Int] = {
servers.filter(server => new File(server.config.logDirs.head, topic + "-" + partitionId).exists)
.map(_.config.brokerId)
.toSet
}
@Test
def testPartitionReassignmentWithLeaderInNewReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1, 2))
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(0, 2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment attempt failed for [test, 0]", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas);
ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition, newReplicas,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted;
},
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
// in sync replicas should not have any replica that is not in the new assigned replicas
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
assertEquals("Partition should have been reassigned to 0, 2, 3", newReplicas, assignedReplicas)
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
servers.foreach(_.shutdown())
}
@Test
def testPartitionReassignmentWithLeaderNotInNewReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1, 2))
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(1, 2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas);
ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition, newReplicas,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted;
},
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 0, 2, 3", newReplicas, assignedReplicas)
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
servers.foreach(_.shutdown())
}
@Test
def testPartitionReassignmentNonOverlappingReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1))
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas);
ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition, newReplicas,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted;
},
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 2, 3", newReplicas, assignedReplicas)
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
servers.foreach(_.shutdown())
}
@Test
def testReassigningNonExistingPartition() {
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// reassign partition 0
val newReplicas = Seq(2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
assertFalse("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
val reassignedPartitions = zkUtils.getPartitionsBeingReassigned()
assertFalse("Partition should not be reassigned", reassignedPartitions.contains(topicAndPartition))
servers.foreach(_.shutdown())
}
@Test
def testResumePartitionReassignmentThatWasCompleted() {
val expectedReplicaAssignment = Map(0 -> List(0, 1))
val topic = "test"
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// put the partition in the reassigned path as well
// reassign partition 0
val newReplicas = Seq(0, 1)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
reassignPartitionsCommand.reassignPartitions
// create brokers
val servers = TestUtils.createBrokerConfigs(2, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// wait until reassignment completes
TestUtils.waitUntilTrue(() => !checkIfReassignPartitionPathExists(zkUtils),
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 0, 1", newReplicas, assignedReplicas)
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
// ensure that there are no under replicated partitions
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
servers.foreach(_.shutdown())
}
@Test
def testPreferredReplicaJsonData() {
// write preferred replica json data to zk path
val partitionsForPreferredReplicaElection = Set(TopicAndPartition("test", 1), TopicAndPartition("test2", 1))
PreferredReplicaLeaderElectionCommand.writePreferredReplicaElectionData(zkUtils, partitionsForPreferredReplicaElection)
// try to read it back and compare with what was written
val preferredReplicaElectionZkData = zkUtils.readData(ZkUtils.PreferredReplicaLeaderElectionPath)._1
val partitionsUndergoingPreferredReplicaElection =
PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(preferredReplicaElectionZkData)
assertEquals("Preferred replica election ser-de failed", partitionsForPreferredReplicaElection,
partitionsUndergoingPreferredReplicaElection)
}
@Test
def testBasicPreferredReplicaElection() {
val expectedReplicaAssignment = Map(1 -> List(0, 1, 2))
val topic = "test"
val partition = 1
val preferredReplica = 0
// create brokers
val serverConfigs = TestUtils.createBrokerConfigs(3, zkConnect, false).map(KafkaConfig.fromProps)
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
val servers = serverConfigs.reverseMap(s => TestUtils.createServer(s))
// broker 2 should be the leader since it was started first
val currentLeader = TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic, partition, oldLeaderOpt = None).get
// trigger preferred replica election
val preferredReplicaElection = new PreferredReplicaLeaderElectionCommand(zkUtils, Set(TopicAndPartition(topic, partition)))
preferredReplicaElection.moveLeaderToPreferredReplica()
val newLeader = TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic, partition, oldLeaderOpt = Some(currentLeader)).get
assertEquals("Preferred replica election failed", preferredReplica, newLeader)
servers.foreach(_.shutdown())
}
@Test
def testShutdownBroker() {
val expectedReplicaAssignment = Map(1 -> List(0, 1, 2))
val topic = "test"
val partition = 1
// create brokers
val serverConfigs = TestUtils.createBrokerConfigs(3, zkConnect, false).map(KafkaConfig.fromProps)
val servers = serverConfigs.reverseMap(s => TestUtils.createServer(s))
// create the topic
TestUtils.createTopic(zkUtils, topic, partitionReplicaAssignment = expectedReplicaAssignment, servers = servers)
val controllerId = zkUtils.getController()
val controller = servers.find(p => p.config.brokerId == controllerId).get.kafkaController
var partitionsRemaining = controller.shutdownBroker(2)
var activeServers = servers.filter(s => s.config.brokerId != 2)
try {
// wait for the update metadata request to trickle to the brokers
TestUtils.waitUntilTrue(() =>
activeServers.forall(_.apis.metadataCache.getPartitionInfo(topic,partition).get.leaderIsrAndControllerEpoch.leaderAndIsr.isr.size != 3),
"Topic test not created after timeout")
assertEquals(0, partitionsRemaining.size)
var partitionStateInfo = activeServers.head.apis.metadataCache.getPartitionInfo(topic,partition).get
var leaderAfterShutdown = partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader
assertEquals(0, leaderAfterShutdown)
assertEquals(2, partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.isr.size)
assertEquals(List(0,1), partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.isr)
partitionsRemaining = controller.shutdownBroker(1)
assertEquals(0, partitionsRemaining.size)
activeServers = servers.filter(s => s.config.brokerId == 0)
partitionStateInfo = activeServers.head.apis.metadataCache.getPartitionInfo(topic,partition).get
leaderAfterShutdown = partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader
assertEquals(0, leaderAfterShutdown)
assertTrue(servers.forall(_.apis.metadataCache.getPartitionInfo(topic,partition).get.leaderIsrAndControllerEpoch.leaderAndIsr.leader == 0))
partitionsRemaining = controller.shutdownBroker(0)
assertEquals(1, partitionsRemaining.size)
// leader doesn't change since all the replicas are shut down
assertTrue(servers.forall(_.apis.metadataCache.getPartitionInfo(topic,partition).get.leaderIsrAndControllerEpoch.leaderAndIsr.leader == 0))
}
finally {
servers.foreach(_.shutdown())
}
}
/**
* This test creates a topic with a few config overrides and checks that the configs are applied to the new topic
* then changes the config and checks that the new values take effect.
*/
@Test
def testTopicConfigChange() {
val partitions = 3
val topic = "my-topic"
val server = TestUtils.createServer(KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, zkConnect)))
def makeConfig(messageSize: Int, retentionMs: Long) = {
var props = new Properties()
props.setProperty(LogConfig.MaxMessageBytesProp, messageSize.toString)
props.setProperty(LogConfig.RetentionMsProp, retentionMs.toString)
props
}
def checkConfig(messageSize: Int, retentionMs: Long) {
TestUtils.retry(10000) {
for(part <- 0 until partitions) {
val logOpt = server.logManager.getLog(TopicAndPartition(topic, part))
assertTrue(logOpt.isDefined)
assertEquals(retentionMs, logOpt.get.config.retentionMs)
assertEquals(messageSize, logOpt.get.config.maxMessageSize)
}
}
}
try {
// create a topic with a few config overrides and check that they are applied
val maxMessageSize = 1024
val retentionMs = 1000*1000
AdminUtils.createTopic(server.zkUtils, topic, partitions, 1, makeConfig(maxMessageSize, retentionMs))
checkConfig(maxMessageSize, retentionMs)
// now double the config values for the topic and check that it is applied
val newConfig: Properties = makeConfig(2*maxMessageSize, 2 * retentionMs)
AdminUtils.changeTopicConfig(server.zkUtils, topic, makeConfig(2*maxMessageSize, 2 * retentionMs))
checkConfig(2*maxMessageSize, 2 * retentionMs)
// Verify that the same config can be read from ZK
val configInZk = AdminUtils.fetchEntityConfig(server.zkUtils, ConfigType.Topic, topic)
assertEquals(newConfig, configInZk)
} finally {
server.shutdown()
server.config.logDirs.foreach(CoreUtils.rm(_))
}
}
}
| vkroz/kafka | core/src/test/scala/unit/kafka/admin/AdminTest.scala | Scala | apache-2.0 | 20,004 |
package io.scalajs.nodejs
import io.scalajs.nodejs.http.{RequestOptions, ServerResponse}
import io.scalajs.util.PromiseHelper._
import io.scalajs.{RawOptions, nodejs}
import scala.concurrent.Future
import scala.scalajs.js.|
/**
* https package object
* @author lawrence.daniels@gmail.com
*/
package object https {
/**
* Https Extensions
* @author lawrence.daniels@gmail.com
*/
final implicit class HttpExtensions(val https: Https) extends AnyVal {
/**
* Like http.get() but for HTTPS.
*/
@inline
def getFuture(options: RequestOptions | RawOptions): Future[ServerResponse] = {
promiseCallback1[ServerResponse](https.get(options, _))
}
/**
* Like http.get() but for HTTPS.
*/
@inline
def getFuture(url: String): Future[ServerResponse] = {
promiseCallback1[ServerResponse](https.get(url, _))
}
/**
* Makes a request to a secure web server.
*/
@inline
def requestFuture(options: RequestOptions | RawOptions): Future[ServerResponse] = {
promiseWithError1[nodejs.Error, ServerResponse](https.request(options, _))
}
/**
* Makes a request to a secure web server.
*/
@inline
def requestFuture(url: String): Future[ServerResponse] = {
promiseWithError1[nodejs.Error, ServerResponse](https.request(url, _))
}
}
}
| scalajs-io/nodejs | app/common/src/main/scala/io/scalajs/nodejs/https/package.scala | Scala | apache-2.0 | 1,372 |
package mesosphere.marathon.upgrade
import akka.testkit.{ TestKit, TestActorRef }
import akka.actor.{ Props, ActorSystem }
import mesosphere.marathon.tasks.TaskTracker
import org.scalatest.{ BeforeAndAfter, BeforeAndAfterAll, Matchers, FunSuiteLike }
import org.apache.mesos.SchedulerDriver
import org.scalatest.mock.MockitoSugar
import mesosphere.marathon.Protos.MarathonTask
import scala.collection.mutable
import scala.concurrent.{ Await, Promise }
import scala.concurrent.duration._
import mesosphere.marathon.event.MesosStatusUpdateEvent
import org.mockito.Mockito._
import org.apache.mesos.Protos.TaskID
import mesosphere.marathon.TaskUpgradeCanceledException
import mesosphere.marathon.state.{ AppDefinition, PathId }
class TaskKillActorTest
extends TestKit(ActorSystem("System"))
with FunSuiteLike
with Matchers
with BeforeAndAfterAll
with BeforeAndAfter
with MockitoSugar {
var taskTracker: TaskTracker = _
var driver: SchedulerDriver = _
before {
taskTracker = mock[TaskTracker]
driver = mock[SchedulerDriver]
}
override def afterAll(): Unit = {
super.afterAll()
system.shutdown()
}
test("Kill tasks") {
val taskA = MarathonTask.newBuilder().setId("taskA_id").build()
val taskB = MarathonTask.newBuilder().setId("taskB_id").build()
val tasks = Set(taskA, taskB)
val promise = Promise[Unit]()
val ref = TestActorRef(Props(classOf[TaskKillActor], driver, PathId("/test"), taskTracker, system.eventStream, tasks, promise))
watch(ref)
system.eventStream.publish(MesosStatusUpdateEvent("", taskA.getId, "TASK_KILLED", PathId.empty, "", Nil, ""))
system.eventStream.publish(MesosStatusUpdateEvent("", taskB.getId, "TASK_KILLED", PathId.empty, "", Nil, ""))
Await.result(promise.future, 5.seconds) should be(())
verify(driver).killTask(TaskID.newBuilder().setValue(taskA.getId).build())
verify(driver).killTask(TaskID.newBuilder().setValue(taskB.getId).build())
expectTerminated(ref)
}
test("Kill tasks with empty task list") {
val tasks = Set[MarathonTask]()
val promise = Promise[Unit]()
val ref = TestActorRef(Props(classOf[TaskKillActor], driver, PathId("/test"), taskTracker, system.eventStream, tasks, promise))
watch(ref)
Await.result(promise.future, 5.seconds) should be(())
verifyZeroInteractions(driver)
expectTerminated(ref)
}
test("Cancelled") {
val taskA = MarathonTask.newBuilder().setId("taskA_id").build()
val taskB = MarathonTask.newBuilder().setId("taskB_id").build()
val tasks = Set(taskA, taskB)
val promise = Promise[Unit]()
val ref = system.actorOf(Props(classOf[TaskKillActor], driver, PathId("/test"), taskTracker, system.eventStream, tasks, promise))
watch(ref)
system.stop(ref)
intercept[TaskUpgradeCanceledException] {
Await.result(promise.future, 5.seconds)
}.getMessage should equal("The operation has been cancelled")
expectTerminated(ref)
}
test("Task synchronization") {
val app = AppDefinition(id = PathId("/app"), instances = 2)
val promise = Promise[Unit]()
val taskA = MarathonTask.newBuilder().setId("taskA_id").build()
val taskB = MarathonTask.newBuilder().setId("taskB_id").build()
val tasks = mutable.Set(taskA, taskB)
when(taskTracker.get(app.id))
.thenReturn(Set.empty[MarathonTask])
val ref = system.actorOf(Props(classOf[TaskKillActor], driver, app.id, taskTracker, system.eventStream, tasks.toSet, promise))
watch(ref)
Await.result(promise.future, 10.seconds) should be(())
expectTerminated(ref)
}
}
| tnachen/marathon | src/test/scala/mesosphere/marathon/upgrade/TaskKillActorTest.scala | Scala | apache-2.0 | 3,613 |
package io.estatico.effect
package instances
import java.util.concurrent.{ExecutorService, ScheduledExecutorService}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
import scalaz.{Nondeterminism, \\/}
import scalaz.concurrent.{Strategy, Task}
object ScalazTaskInstances extends ScalazTaskInstances
trait ScalazTaskInstances {
// Note that all of the exported names contain 'Scalaz' to avoid ambiguity with
// other Task implementations.
/** Default Async[Task] instance which uses a cache for the default ExecutorService. */
implicit def asyncScalazTask(
implicit
pool: ExecutorService = Strategy.DefaultExecutorService,
scheduler: ScheduledExecutorService = Strategy.DefaultTimeoutScheduler
): Async[Task] = {
if (pool == Strategy.DefaultExecutorService && scheduler == Strategy.DefaultTimeoutScheduler) {
defaultAsyncScalazTask
} else {
newAsyncScalazTask(pool, scheduler)
}
}
/** Cached instance for the default Task pool. */
private val defaultAsyncScalazTask: Async[Task]
= newAsyncScalazTask(Strategy.DefaultExecutorService, Strategy.DefaultTimeoutScheduler)
/** Constructs an Async[Task] instance from an ExecutorService. */
def newAsyncScalazTask(
implicit
pool: ExecutorService,
scheduler: ScheduledExecutorService
): Async[Task] = new Async[Task] {
// Passing the pool here to explicitly demonstrate why we need an implicit pool to begin with.
override def async[A](a: => A): Task[A] = Task(a)(pool)
override def asyncBoth[A, B](fa: Task[A], fb: Task[B]): Task[(A, B)]
= Nondeterminism[Task].both(fa, fb)
override def race[A, B](fa: Task[A], fb: Task[B]): Task[Either[A, B]]
= Nondeterminism[Task].choose(fa, fb).map(_.fold(t => Left(t._1), t => Right(t._2)))
override def background[A](fa: Task[A]): Task[Unit]
= Nondeterminism[Task].choose(Task.now(()), fa).map(_ => ())
override def timeoutMillis[A](fa: Task[A])(millis: Long): Task[A]
= fa.timed(millis)
}
/** Default FromFuture[Task] instance which uses a cache for the default ExecutionContext. */
implicit def fromFutureScalazTask(implicit ec: ExecutionContext): FromFuture[Task]
= if (ec == ExecutionContext.global) defaultFromFutureScalazTask else newFromFutureScalazTask(ec)
/** Cached instance for the default ExecutionContext. */
private val defaultFromFutureScalazTask: FromFuture[Task] = newFromFutureScalazTask(ExecutionContext.global)
/** Constructs a FromFuture[Task] instance from an ExecutionContext. */
def newFromFutureScalazTask(implicit ec: ExecutionContext): FromFuture[Task] = new FromFuture[Task] {
override def fromFuture[A](fa: Future[A]): Task[A] = {
Task.async { register =>
fa.onComplete {
case Failure(e) => register(\\/.left(e))
case Success(x) => register(\\/.right(x))
}
}
}
}
/** Default instance for Recoverable[Task] */
implicit val recoverableScalazTask: Recoverable[Task] = new Recoverable[Task] {
override def fromEither[A](either: Either[Throwable, A]): Task[A] = either.fold(Task.fail, Task.now)
override def fail[A](e: Throwable): Task[A] = Task.fail(e)
override def attempt[A](fa: Task[A]): Task[Either[Throwable, A]]
= fa.attempt.map(_.toEither)
override def attemptFold[A, B](fa: Task[A])(f: (Throwable) => B, g: A => B): Task[B]
= fa.attempt.map(_.fold(f, g))
override def attemptFoldWith[A, B](fa: Task[A])(f: Throwable => Task[B], g: A => Task[B]): Task[B]
= fa.attempt.flatMap(_.fold(f, g))
override def handle[A](fa: Task[A])(f: PartialFunction[Throwable, A]): Task[A]
= fa.handle(f)
override def handleWith[A](fa: Task[A])(f: PartialFunction[Throwable, Task[A]]): Task[A]
= fa.handleWith(f)
override def transform[A, B](fa: Task[A])(f: Throwable => Throwable, g: A => B): Task[B]
= fa.attempt.flatMap(x => Task.fromDisjunction(x.bimap(f, g)))
override def failMap[A](fa: Task[A])(f: Throwable => Throwable): Task[A]
= fa.handleWith { case e => Task.fail(f(e)) }
override def mergeEither[A](fa: Task[Either[Throwable, A]]): Task[A]
= fa.flatMap(_.fold(Task.fail, Task.now))
}
/** Default instance for Sync[Task] */
implicit val syncScalazTask: Sync[Task] = new Sync[Task] {
override def sync[A](a: A): Task[A] = Task.now(a)
}
}
| estatico/scala-effect | scalaz72/src/main/scala/io/estatico/effect/instances/ScalazTaskInstances.scala | Scala | apache-2.0 | 4,407 |
package com.bstek.designer.core.execution
/**
* Created with IntelliJ IDEA.
* User: robin
* Date: 13-10-18
* Time: 下午3:28
* To change this template use File | Settings | File Templates.
*/
class UpdateConfigRulesConfigurationProducer{
}
| OuYuBin/IDEADorado | dorado-core/src/com/bstek/designer/core/execution/UpdateConfigRulesConfigurationProducer.scala | Scala | apache-2.0 | 251 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.util.Locale
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, MonotonicallyIncreasingID, NamedExpression}
import org.apache.spark.sql.catalyst.expressions.aggregate.Count
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical.{FlatMapGroupsWithState, _}
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{IntegerType, LongType, MetadataBuilder}
import org.apache.spark.unsafe.types.CalendarInterval
/** A dummy command for testing unsupported operations. */
case class DummyCommand() extends Command
class UnsupportedOperationsSuite extends SparkFunSuite {
val attribute = AttributeReference("a", IntegerType, nullable = true)()
val watermarkMetadata = new MetadataBuilder()
.withMetadata(attribute.metadata)
.putLong(EventTimeWatermark.delayKey, 1000L)
.build()
val attributeWithWatermark = attribute.withMetadata(watermarkMetadata)
val batchRelation = LocalRelation(attribute)
val streamRelation = new TestStreamingRelation(attribute)
/*
=======================================================================================
BATCH QUERIES
=======================================================================================
*/
assertSupportedInBatchPlan("local relation", batchRelation)
assertNotSupportedInBatchPlan(
"streaming source",
streamRelation,
Seq("with streaming source", "start"))
assertNotSupportedInBatchPlan(
"select on streaming source",
streamRelation.select($"count(*)"),
Seq("with streaming source", "start"))
/*
=======================================================================================
STREAMING QUERIES
=======================================================================================
*/
// Batch plan in streaming query
testError(
"streaming plan - no streaming source",
Seq("without streaming source", "start")) {
UnsupportedOperationChecker.checkForStreaming(batchRelation.select($"count(*)"), Append)
}
// Commands
assertNotSupportedInStreamingPlan(
"commmands",
DummyCommand(),
outputMode = Append,
expectedMsgs = "commands" :: Nil)
// Aggregation: Multiple streaming aggregations not supported
def aggExprs(name: String): Seq[NamedExpression] = Seq(Count("*").as(name))
assertSupportedInStreamingPlan(
"aggregate - multiple batch aggregations",
Aggregate(Nil, aggExprs("c"), Aggregate(Nil, aggExprs("d"), batchRelation)),
Append)
assertSupportedInStreamingPlan(
"aggregate - multiple aggregations but only one streaming aggregation",
Aggregate(Nil, aggExprs("c"), batchRelation).join(
Aggregate(Nil, aggExprs("d"), streamRelation), joinType = Inner),
Update)
assertNotSupportedInStreamingPlan(
"aggregate - multiple streaming aggregations",
Aggregate(Nil, aggExprs("c"), Aggregate(Nil, aggExprs("d"), streamRelation)),
outputMode = Update,
expectedMsgs = Seq("multiple streaming aggregations"))
assertSupportedInStreamingPlan(
"aggregate - streaming aggregations in update mode",
Aggregate(Nil, aggExprs("d"), streamRelation),
outputMode = Update)
assertSupportedInStreamingPlan(
"aggregate - streaming aggregations in complete mode",
Aggregate(Nil, aggExprs("d"), streamRelation),
outputMode = Complete)
assertSupportedInStreamingPlan(
"aggregate - streaming aggregations with watermark in append mode",
Aggregate(Seq(attributeWithWatermark), aggExprs("d"), streamRelation),
outputMode = Append)
assertNotSupportedInStreamingPlan(
"aggregate - streaming aggregations without watermark in append mode",
Aggregate(Nil, aggExprs("d"), streamRelation),
outputMode = Append,
expectedMsgs = Seq("streaming aggregations", "without watermark"))
// Aggregation: Distinct aggregates not supported on streaming relation
val distinctAggExprs = Seq(Count("*").toAggregateExpression(isDistinct = true).as("c"))
assertSupportedInStreamingPlan(
"distinct aggregate - aggregate on batch relation",
Aggregate(Nil, distinctAggExprs, batchRelation),
outputMode = Append)
assertNotSupportedInStreamingPlan(
"distinct aggregate - aggregate on streaming relation",
Aggregate(Nil, distinctAggExprs, streamRelation),
outputMode = Complete,
expectedMsgs = Seq("distinct aggregation"))
val att = new AttributeReference(name = "a", dataType = LongType)()
// FlatMapGroupsWithState: Both function modes equivalent and supported in batch.
for (funcMode <- Seq(Append, Update)) {
assertSupportedInBatchPlan(
s"flatMapGroupsWithState - flatMapGroupsWithState($funcMode) on batch relation",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, funcMode, isMapGroupsWithState = false, null,
batchRelation))
assertSupportedInBatchPlan(
s"flatMapGroupsWithState - multiple flatMapGroupsWithState($funcMode)s on batch relation",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, funcMode, isMapGroupsWithState = false, null,
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, funcMode, isMapGroupsWithState = false,
null, batchRelation)))
}
// FlatMapGroupsWithState(Update) in streaming without aggregation
assertSupportedInStreamingPlan(
"flatMapGroupsWithState - flatMapGroupsWithState(Update) " +
"on streaming relation without aggregation in update mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = false, null,
streamRelation),
outputMode = Update)
assertNotSupportedInStreamingPlan(
"flatMapGroupsWithState - flatMapGroupsWithState(Update) " +
"on streaming relation without aggregation in append mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = false, null,
streamRelation),
outputMode = Append,
expectedMsgs = Seq("flatMapGroupsWithState in update mode", "Append"))
assertNotSupportedInStreamingPlan(
"flatMapGroupsWithState - flatMapGroupsWithState(Update) " +
"on streaming relation without aggregation in complete mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = false, null,
streamRelation),
outputMode = Complete,
// Disallowed by the aggregation check but let's still keep this test in case it's broken in
// future.
expectedMsgs = Seq("Complete"))
// FlatMapGroupsWithState(Update) in streaming with aggregation
for (outputMode <- Seq(Append, Update, Complete)) {
assertNotSupportedInStreamingPlan(
"flatMapGroupsWithState - flatMapGroupsWithState(Update) on streaming relation " +
s"with aggregation in $outputMode mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = false, null,
Aggregate(Seq(attributeWithWatermark), aggExprs("c"), streamRelation)),
outputMode = outputMode,
expectedMsgs = Seq("flatMapGroupsWithState in update mode", "with aggregation"))
}
// FlatMapGroupsWithState(Append) in streaming without aggregation
assertSupportedInStreamingPlan(
"flatMapGroupsWithState - flatMapGroupsWithState(Append) " +
"on streaming relation without aggregation in append mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Append, isMapGroupsWithState = false, null,
streamRelation),
outputMode = Append)
assertNotSupportedInStreamingPlan(
"flatMapGroupsWithState - flatMapGroupsWithState(Append) " +
"on streaming relation without aggregation in update mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Append, isMapGroupsWithState = false, null,
streamRelation),
outputMode = Update,
expectedMsgs = Seq("flatMapGroupsWithState in append mode", "update"))
// FlatMapGroupsWithState(Append) in streaming with aggregation
for (outputMode <- Seq(Append, Update, Complete)) {
assertSupportedInStreamingPlan(
"flatMapGroupsWithState - flatMapGroupsWithState(Append) " +
s"on streaming relation before aggregation in $outputMode mode",
Aggregate(
Seq(attributeWithWatermark),
aggExprs("c"),
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Append, isMapGroupsWithState = false, null,
streamRelation)),
outputMode = outputMode)
}
for (outputMode <- Seq(Append, Update)) {
assertNotSupportedInStreamingPlan(
"flatMapGroupsWithState - flatMapGroupsWithState(Append) " +
s"on streaming relation after aggregation in $outputMode mode",
FlatMapGroupsWithState(null, att, att, Seq(att), Seq(att), att, null, Append,
isMapGroupsWithState = false, null,
Aggregate(Seq(attributeWithWatermark), aggExprs("c"), streamRelation)),
outputMode = outputMode,
expectedMsgs = Seq("flatMapGroupsWithState", "after aggregation"))
}
assertNotSupportedInStreamingPlan(
"flatMapGroupsWithState - " +
"flatMapGroupsWithState(Update) on streaming relation in complete mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Append, isMapGroupsWithState = false, null,
streamRelation),
outputMode = Complete,
// Disallowed by the aggregation check but let's still keep this test in case it's broken in
// future.
expectedMsgs = Seq("Complete"))
// FlatMapGroupsWithState inside batch relation should always be allowed
for (funcMode <- Seq(Append, Update)) {
for (outputMode <- Seq(Append, Update)) { // Complete is not supported without aggregation
assertSupportedInStreamingPlan(
s"flatMapGroupsWithState - flatMapGroupsWithState($funcMode) on batch relation inside " +
s"streaming relation in $outputMode output mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, funcMode, isMapGroupsWithState = false,
null, batchRelation),
outputMode = outputMode
)
}
}
// multiple FlatMapGroupsWithStates
assertSupportedInStreamingPlan(
"flatMapGroupsWithState - multiple flatMapGroupsWithStates on streaming relation and all are " +
"in append mode",
FlatMapGroupsWithState(null, att, att, Seq(att), Seq(att), att, null, Append,
isMapGroupsWithState = false, null,
FlatMapGroupsWithState(null, att, att, Seq(att), Seq(att), att, null, Append,
isMapGroupsWithState = false, null, streamRelation)),
outputMode = Append)
assertNotSupportedInStreamingPlan(
"flatMapGroupsWithState - multiple flatMapGroupsWithStates on s streaming relation but some" +
" are not in append mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = false, null,
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Append, isMapGroupsWithState = false, null,
streamRelation)),
outputMode = Append,
expectedMsgs = Seq("multiple flatMapGroupsWithState", "append"))
// mapGroupsWithState
assertNotSupportedInStreamingPlan(
"mapGroupsWithState - mapGroupsWithState " +
"on streaming relation without aggregation in append mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = true, null,
streamRelation),
outputMode = Append,
// Disallowed by the aggregation check but let's still keep this test in case it's broken in
// future.
expectedMsgs = Seq("mapGroupsWithState", "append"))
assertNotSupportedInStreamingPlan(
"mapGroupsWithState - mapGroupsWithState " +
"on streaming relation without aggregation in complete mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = true, null,
streamRelation),
outputMode = Complete,
// Disallowed by the aggregation check but let's still keep this test in case it's broken in
// future.
expectedMsgs = Seq("Complete"))
for (outputMode <- Seq(Append, Update, Complete)) {
assertNotSupportedInStreamingPlan(
"mapGroupsWithState - mapGroupsWithState on streaming relation " +
s"with aggregation in $outputMode mode",
FlatMapGroupsWithState(null, att, att, Seq(att), Seq(att), att, null, Update,
isMapGroupsWithState = true, null,
Aggregate(Seq(attributeWithWatermark), aggExprs("c"), streamRelation)),
outputMode = outputMode,
expectedMsgs = Seq("mapGroupsWithState", "with aggregation"))
}
// multiple mapGroupsWithStates
assertNotSupportedInStreamingPlan(
"mapGroupsWithState - multiple mapGroupsWithStates on streaming relation and all are " +
"in append mode",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = true, null,
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = true, null,
streamRelation)),
outputMode = Append,
expectedMsgs = Seq("multiple mapGroupsWithStates"))
// mixing mapGroupsWithStates and flatMapGroupsWithStates
assertNotSupportedInStreamingPlan(
"mapGroupsWithState - " +
"mixing mapGroupsWithStates and flatMapGroupsWithStates on streaming relation",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = true, null,
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = false, null,
streamRelation)
),
outputMode = Append,
expectedMsgs = Seq("Mixing mapGroupsWithStates and flatMapGroupsWithStates"))
// mapGroupsWithState with event time timeout + watermark
assertNotSupportedInStreamingPlan(
"mapGroupsWithState - mapGroupsWithState with event time timeout without watermark",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = true,
EventTimeTimeout, streamRelation),
outputMode = Update,
expectedMsgs = Seq("watermark"))
assertSupportedInStreamingPlan(
"mapGroupsWithState - mapGroupsWithState with event time timeout with watermark",
FlatMapGroupsWithState(
null, att, att, Seq(att), Seq(att), att, null, Update, isMapGroupsWithState = true,
EventTimeTimeout, new TestStreamingRelation(attributeWithWatermark)),
outputMode = Update)
// Deduplicate
assertSupportedInStreamingPlan(
"Deduplicate - Deduplicate on streaming relation before aggregation",
Aggregate(
Seq(attributeWithWatermark),
aggExprs("c"),
Deduplicate(Seq(att), streamRelation)),
outputMode = Append)
assertNotSupportedInStreamingPlan(
"Deduplicate - Deduplicate on streaming relation after aggregation",
Deduplicate(Seq(att), Aggregate(Nil, aggExprs("c"), streamRelation)),
outputMode = Complete,
expectedMsgs = Seq("dropDuplicates"))
assertSupportedInStreamingPlan(
"Deduplicate - Deduplicate on batch relation inside a streaming query",
Deduplicate(Seq(att), batchRelation),
outputMode = Append
)
// Inner joins: Multiple stream-stream joins supported only in append mode
testBinaryOperationInStreamingPlan(
"single inner join in append mode",
_.join(_, joinType = Inner),
outputMode = Append,
streamStreamSupported = true)
testBinaryOperationInStreamingPlan(
"multiple inner joins in append mode",
(x: LogicalPlan, y: LogicalPlan) => {
x.join(y, joinType = Inner).join(streamRelation, joinType = Inner)
},
outputMode = Append,
streamStreamSupported = true)
testBinaryOperationInStreamingPlan(
"inner join in update mode",
_.join(_, joinType = Inner),
outputMode = Update,
streamStreamSupported = false,
expectedMsg = "inner join")
// Full outer joins: only batch-batch is allowed
testBinaryOperationInStreamingPlan(
"full outer join",
_.join(_, joinType = FullOuter),
streamStreamSupported = false,
batchStreamSupported = false,
streamBatchSupported = false)
// Left outer joins: *-stream not allowed
testBinaryOperationInStreamingPlan(
"left outer join",
_.join(_, joinType = LeftOuter),
batchStreamSupported = false,
streamStreamSupported = false,
expectedMsg = "outer join")
// Left outer joins: stream-stream allowed with join on watermark attribute
// Note that the attribute need not be watermarked on both sides.
assertSupportedInStreamingPlan(
s"left outer join with stream-stream relations and join on attribute with left watermark",
streamRelation.join(streamRelation, joinType = LeftOuter,
condition = Some(attributeWithWatermark === attribute)),
OutputMode.Append())
assertSupportedInStreamingPlan(
s"left outer join with stream-stream relations and join on attribute with right watermark",
streamRelation.join(streamRelation, joinType = LeftOuter,
condition = Some(attribute === attributeWithWatermark)),
OutputMode.Append())
assertNotSupportedInStreamingPlan(
s"left outer join with stream-stream relations and join on non-watermarked attribute",
streamRelation.join(streamRelation, joinType = LeftOuter,
condition = Some(attribute === attribute)),
OutputMode.Append(),
Seq("watermark in the join keys"))
// Left outer joins: stream-stream allowed with range condition yielding state value watermark
assertSupportedInStreamingPlan(
s"left outer join with stream-stream relations and state value watermark", {
val leftRelation = streamRelation
val rightTimeWithWatermark =
AttributeReference("b", IntegerType)().withMetadata(watermarkMetadata)
val rightRelation = new TestStreamingRelation(rightTimeWithWatermark)
leftRelation.join(
rightRelation,
joinType = LeftOuter,
condition = Some(attribute > rightTimeWithWatermark + 10))
},
OutputMode.Append())
// Left outer joins: stream-stream not allowed with insufficient range condition
assertNotSupportedInStreamingPlan(
s"left outer join with stream-stream relations and state value watermark", {
val leftRelation = streamRelation
val rightTimeWithWatermark =
AttributeReference("b", IntegerType)().withMetadata(watermarkMetadata)
val rightRelation = new TestStreamingRelation(rightTimeWithWatermark)
leftRelation.join(
rightRelation,
joinType = LeftOuter,
condition = Some(attribute < rightTimeWithWatermark + 10))
},
OutputMode.Append(),
Seq("appropriate range condition"))
// Left semi joins: stream-* not allowed
testBinaryOperationInStreamingPlan(
"left semi join",
_.join(_, joinType = LeftSemi),
streamStreamSupported = false,
batchStreamSupported = false,
expectedMsg = "left semi/anti joins")
// Left anti joins: stream-* not allowed
testBinaryOperationInStreamingPlan(
"left anti join",
_.join(_, joinType = LeftAnti),
streamStreamSupported = false,
batchStreamSupported = false,
expectedMsg = "left semi/anti joins")
// Right outer joins: stream-* not allowed
testBinaryOperationInStreamingPlan(
"right outer join",
_.join(_, joinType = RightOuter),
streamBatchSupported = false,
streamStreamSupported = false,
expectedMsg = "outer join")
// Right outer joins: stream-stream allowed with join on watermark attribute
// Note that the attribute need not be watermarked on both sides.
assertSupportedInStreamingPlan(
s"right outer join with stream-stream relations and join on attribute with left watermark",
streamRelation.join(streamRelation, joinType = RightOuter,
condition = Some(attributeWithWatermark === attribute)),
OutputMode.Append())
assertSupportedInStreamingPlan(
s"right outer join with stream-stream relations and join on attribute with right watermark",
streamRelation.join(streamRelation, joinType = RightOuter,
condition = Some(attribute === attributeWithWatermark)),
OutputMode.Append())
assertNotSupportedInStreamingPlan(
s"right outer join with stream-stream relations and join on non-watermarked attribute",
streamRelation.join(streamRelation, joinType = RightOuter,
condition = Some(attribute === attribute)),
OutputMode.Append(),
Seq("watermark in the join keys"))
// Right outer joins: stream-stream allowed with range condition yielding state value watermark
assertSupportedInStreamingPlan(
s"right outer join with stream-stream relations and state value watermark", {
val leftTimeWithWatermark =
AttributeReference("b", IntegerType)().withMetadata(watermarkMetadata)
val leftRelation = new TestStreamingRelation(leftTimeWithWatermark)
val rightRelation = streamRelation
leftRelation.join(
rightRelation,
joinType = RightOuter,
condition = Some(leftTimeWithWatermark + 10 < attribute))
},
OutputMode.Append())
// Right outer joins: stream-stream not allowed with insufficient range condition
assertNotSupportedInStreamingPlan(
s"right outer join with stream-stream relations and state value watermark", {
val leftTimeWithWatermark =
AttributeReference("b", IntegerType)().withMetadata(watermarkMetadata)
val leftRelation = new TestStreamingRelation(leftTimeWithWatermark)
val rightRelation = streamRelation
leftRelation.join(
rightRelation,
joinType = RightOuter,
condition = Some(leftTimeWithWatermark + 10 > attribute))
},
OutputMode.Append(),
Seq("appropriate range condition"))
// Cogroup: only batch-batch is allowed
testBinaryOperationInStreamingPlan(
"cogroup",
genCogroup,
streamStreamSupported = false,
batchStreamSupported = false,
streamBatchSupported = false)
def genCogroup(left: LogicalPlan, right: LogicalPlan): LogicalPlan = {
def func(k: Int, left: Iterator[Int], right: Iterator[Int]): Iterator[Int] = {
Iterator.empty
}
implicit val intEncoder = ExpressionEncoder[Int]
left.cogroup[Int, Int, Int, Int](
right,
func,
AppendColumns[Int, Int]((x: Int) => x, left).newColumns,
AppendColumns[Int, Int]((x: Int) => x, right).newColumns,
left.output,
right.output)
}
// Union: Mixing between stream and batch not supported
testBinaryOperationInStreamingPlan(
"union",
_.union(_),
streamBatchSupported = false,
batchStreamSupported = false)
// Except: *-stream not supported
testBinaryOperationInStreamingPlan(
"except",
_.except(_),
streamStreamSupported = false,
batchStreamSupported = false)
// Intersect: stream-stream not supported
testBinaryOperationInStreamingPlan(
"intersect",
_.intersect(_),
streamStreamSupported = false)
// Sort: supported only on batch subplans and after aggregation on streaming plan + complete mode
testUnaryOperatorInStreamingPlan("sort", Sort(Nil, true, _))
assertSupportedInStreamingPlan(
"sort - sort after aggregation in Complete output mode",
streamRelation.groupBy()(Count("*")).sortBy(),
Complete)
assertNotSupportedInStreamingPlan(
"sort - sort before aggregation in Complete output mode",
streamRelation.sortBy().groupBy()(Count("*")),
Complete,
Seq("sort", "aggregat", "complete"))
assertNotSupportedInStreamingPlan(
"sort - sort over aggregated data in Update output mode",
streamRelation.groupBy()(Count("*")).sortBy(),
Update,
Seq("sort", "aggregat", "complete")) // sort on aggregations is supported on Complete mode only
// Other unary operations
testUnaryOperatorInStreamingPlan(
"sample", Sample(0.1, 1, true, 1L, _), expectedMsg = "sampling")
testUnaryOperatorInStreamingPlan(
"window", Window(Nil, Nil, Nil, _), expectedMsg = "non-time-based windows")
// Output modes with aggregation and non-aggregation plans
testOutputMode(Append, shouldSupportAggregation = false, shouldSupportNonAggregation = true)
testOutputMode(Update, shouldSupportAggregation = true, shouldSupportNonAggregation = true)
testOutputMode(Complete, shouldSupportAggregation = true, shouldSupportNonAggregation = false)
// Unsupported expressions in streaming plan
assertNotSupportedInStreamingPlan(
"MonotonicallyIncreasingID",
streamRelation.select(MonotonicallyIncreasingID()),
outputMode = Append,
expectedMsgs = Seq("monotonically_increasing_id"))
/*
=======================================================================================
TESTING FUNCTIONS
=======================================================================================
*/
/**
* Test that an unary operator correctly fails support check when it has a streaming child plan,
* but not when it has batch child plan. There can be batch sub-plans inside a streaming plan,
* so it is valid for the operator to have a batch child plan.
*
* This test wraps the logical plan in a fake operator that makes the whole plan look like
* a streaming plan even if the child plan is a batch plan. This is to test that the operator
* supports having a batch child plan, forming a batch subplan inside a streaming plan.
*/
def testUnaryOperatorInStreamingPlan(
operationName: String,
logicalPlanGenerator: LogicalPlan => LogicalPlan,
outputMode: OutputMode = Append,
expectedMsg: String = ""): Unit = {
val expectedMsgs = if (expectedMsg.isEmpty) Seq(operationName) else Seq(expectedMsg)
assertNotSupportedInStreamingPlan(
s"$operationName with stream relation",
wrapInStreaming(logicalPlanGenerator(streamRelation)),
outputMode,
expectedMsgs)
assertSupportedInStreamingPlan(
s"$operationName with batch relation",
wrapInStreaming(logicalPlanGenerator(batchRelation)),
outputMode)
}
/**
* Test that a binary operator correctly fails support check when it has combinations of
* streaming and batch child plans. There can be batch sub-plans inside a streaming plan,
* so it is valid for the operator to have a batch child plan.
*/
def testBinaryOperationInStreamingPlan(
operationName: String,
planGenerator: (LogicalPlan, LogicalPlan) => LogicalPlan,
outputMode: OutputMode = Append,
streamStreamSupported: Boolean = true,
streamBatchSupported: Boolean = true,
batchStreamSupported: Boolean = true,
expectedMsg: String = ""): Unit = {
val expectedMsgs = if (expectedMsg.isEmpty) Seq(operationName) else Seq(expectedMsg)
if (streamStreamSupported) {
assertSupportedInStreamingPlan(
s"$operationName with stream-stream relations",
planGenerator(streamRelation, streamRelation),
outputMode)
} else {
assertNotSupportedInStreamingPlan(
s"$operationName with stream-stream relations",
planGenerator(streamRelation, streamRelation),
outputMode,
expectedMsgs)
}
if (streamBatchSupported) {
assertSupportedInStreamingPlan(
s"$operationName with stream-batch relations",
planGenerator(streamRelation, batchRelation),
outputMode)
} else {
assertNotSupportedInStreamingPlan(
s"$operationName with stream-batch relations",
planGenerator(streamRelation, batchRelation),
outputMode,
expectedMsgs)
}
if (batchStreamSupported) {
assertSupportedInStreamingPlan(
s"$operationName with batch-stream relations",
planGenerator(batchRelation, streamRelation),
outputMode)
} else {
assertNotSupportedInStreamingPlan(
s"$operationName with batch-stream relations",
planGenerator(batchRelation, streamRelation),
outputMode,
expectedMsgs)
}
assertSupportedInStreamingPlan(
s"$operationName with batch-batch relations",
planGenerator(batchRelation, batchRelation),
outputMode)
}
/** Test output mode with and without aggregation in the streaming plan */
def testOutputMode(
outputMode: OutputMode,
shouldSupportAggregation: Boolean,
shouldSupportNonAggregation: Boolean): Unit = {
// aggregation
if (shouldSupportAggregation) {
assertSupportedInStreamingPlan(
s"$outputMode output mode - aggregation",
streamRelation.groupBy("a")("count(*)"),
outputMode = outputMode)
} else {
assertNotSupportedInStreamingPlan(
s"$outputMode output mode - aggregation",
streamRelation.groupBy("a")("count(*)"),
outputMode = outputMode,
Seq("aggregation", s"$outputMode output mode"))
}
// non aggregation
if (shouldSupportNonAggregation) {
assertSupportedInStreamingPlan(
s"$outputMode output mode - no aggregation",
streamRelation.where($"a" > 1),
outputMode = outputMode)
} else {
assertNotSupportedInStreamingPlan(
s"$outputMode output mode - no aggregation",
streamRelation.where($"a" > 1),
outputMode = outputMode,
Seq("aggregation", s"$outputMode output mode"))
}
}
/**
* Assert that the logical plan is supported as subplan insider a streaming plan.
*
* To test this correctly, the given logical plan is wrapped in a fake operator that makes the
* whole plan look like a streaming plan. Otherwise, a batch plan may throw not supported
* exception simply for not being a streaming plan, even though that plan could exists as batch
* subplan inside some streaming plan.
*/
def assertSupportedInStreamingPlan(
name: String,
plan: LogicalPlan,
outputMode: OutputMode): Unit = {
test(s"streaming plan - $name: supported") {
UnsupportedOperationChecker.checkForStreaming(wrapInStreaming(plan), outputMode)
}
}
/**
* Assert that the logical plan is not supported inside a streaming plan.
*
* To test this correctly, the given logical plan is wrapped in a fake operator that makes the
* whole plan look like a streaming plan. Otherwise, a batch plan may throw not supported
* exception simply for not being a streaming plan, even though that plan could exists as batch
* subplan inside some streaming plan.
*/
def assertNotSupportedInStreamingPlan(
name: String,
plan: LogicalPlan,
outputMode: OutputMode,
expectedMsgs: Seq[String]): Unit = {
testError(
s"streaming plan - $name: not supported",
expectedMsgs :+ "streaming" :+ "DataFrame" :+ "Dataset" :+ "not supported") {
UnsupportedOperationChecker.checkForStreaming(wrapInStreaming(plan), outputMode)
}
}
/** Assert that the logical plan is supported as a batch plan */
def assertSupportedInBatchPlan(name: String, plan: LogicalPlan): Unit = {
test(s"batch plan - $name: supported") {
UnsupportedOperationChecker.checkForBatch(plan)
}
}
/** Assert that the logical plan is not supported as a batch plan */
def assertNotSupportedInBatchPlan(
name: String,
plan: LogicalPlan,
expectedMsgs: Seq[String]): Unit = {
testError(s"batch plan - $name: not supported", expectedMsgs) {
UnsupportedOperationChecker.checkForBatch(plan)
}
}
/**
* Test whether the body of code will fail. If it does fail, then check if it has expected
* messages.
*/
def testError(testName: String, expectedMsgs: Seq[String])(testBody: => Unit): Unit = {
test(testName) {
val e = intercept[AnalysisException] {
testBody
}
expectedMsgs.foreach { m =>
if (!e.getMessage.toLowerCase(Locale.ROOT).contains(m.toLowerCase(Locale.ROOT))) {
fail(s"Exception message should contain: '$m', " +
s"actual exception message:\\n\\t'${e.getMessage}'")
}
}
}
}
def wrapInStreaming(plan: LogicalPlan): LogicalPlan = {
new StreamingPlanWrapper(plan)
}
case class StreamingPlanWrapper(child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override def isStreaming: Boolean = true
}
case class TestStreamingRelation(output: Seq[Attribute]) extends LeafNode {
def this(attribute: Attribute) = this(Seq(attribute))
override def isStreaming: Boolean = true
}
}
| ddna1021/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/UnsupportedOperationsSuite.scala | Scala | apache-2.0 | 33,947 |
package org.scalaide.ui.internal.preferences
import net.miginfocom.layout.AC
import net.miginfocom.layout.CC
import net.miginfocom.layout.LC
import net.miginfocom.swt.MigLayout
import org.eclipse.core.resources.IProject
import org.eclipse.core.runtime.preferences.AbstractPreferenceInitializer
import org.eclipse.core.runtime.preferences.DefaultScope
import org.eclipse.core.runtime.preferences.InstanceScope
import org.eclipse.jdt.core.IJavaProject
import org.eclipse.jdt.internal.ui.preferences.PreferencesMessages
import org.eclipse.jface.dialogs.IInputValidator
import org.eclipse.jface.dialogs.InputDialog
import org.eclipse.jface.preference.IPreferenceStore
import org.eclipse.jface.preference.ListEditor
import org.eclipse.jface.window.Window
import org.eclipse.swt.events.SelectionEvent
import org.eclipse.swt.widgets.Button
import org.eclipse.swt.widgets.Composite
import org.eclipse.swt.widgets.Control
import org.eclipse.swt.widgets.Display
import org.eclipse.swt.widgets.Label
import org.eclipse.swt.widgets.Link
import org.eclipse.swt.SWT
import org.eclipse.ui.dialogs.PreferencesUtil
import org.eclipse.ui.dialogs.PropertyPage
import org.eclipse.ui.IWorkbench
import org.eclipse.ui.IWorkbenchPreferencePage
import org.scalaide.core.IScalaPlugin
import org.eclipse.jface.preference.RadioGroupFieldEditor
import org.eclipse.jface.preference.FieldEditor
import org.eclipse.jface.preference.BooleanFieldEditor
import org.eclipse.core.resources.ProjectScope
import org.scalaide.core.SdtConstants
class OrganizeImportsPreferencesPage extends FieldEditors {
import OrganizeImportsPreferences._
override def createContents(parent: Composite): Control = {
initUnderlyingPreferenceStore(SdtConstants.PluginId, IScalaPlugin().getPreferenceStore)
mkMainControl(parent)(createEditors)
}
def createEditors(control: Composite): Unit = {
fieldEditors += addNewFieldEditorWrappedInComposite(parent = control) { parent =>
new ListEditor(groupsKey, "Define the sorting order of import statements.", parent) {
allEnableDisableControls += getListControl(parent)
allEnableDisableControls += getButtonBoxControl(parent)
override def createList(items: Array[String]) = items.mkString("$")
override def parseString(stringList: String) = stringList.split("\\\\$")
override def getNewInputObject(): String = {
val dlg = new InputDialog(
Display.getCurrent().getActiveShell(),
"",
"Enter a package name:",
"",
new IInputValidator { override def isValid(text: String) = null })
if (dlg.open() == Window.OK) {
dlg.getValue()
} else {
null
}
}
}
}
fieldEditors += addNewFieldEditorWrappedInComposite(parent = control) { parent =>
val options = Array(
Array("one import statement per importee", ExpandImports.toString),
Array("collapse into single import statement", CollapseImports.toString),
Array("preserve existing groups", PreserveExistingGroups.toString)
)
new RadioGroupFieldEditor(expandCollapseKey, "Multiple imports from the same package or type:", 1, options, parent, true) {
allEnableDisableControls += getRadioBoxControl(parent)
allEnableDisableControls ++= getRadioBoxControl(parent).getChildren
}
}
fieldEditors += addNewFieldEditorWrappedInComposite(parent = control) { (parent =>
new ListEditor(wildcardsKey, "Always use wilcard imports when importing from these packages and objects:", parent) {
getDownButton.setVisible(false)
getUpButton.setVisible(false)
allEnableDisableControls += getListControl(parent)
allEnableDisableControls += getButtonBoxControl(parent)
override def createList(items: Array[String]) = items.mkString("$")
override def parseString(stringList: String) = stringList.split("\\\\$")
override def getNewInputObject(): String = {
val dlg = new InputDialog(
Display.getCurrent().getActiveShell(),
"",
"Enter a fully qualified package or type name:",
"",
new IInputValidator { override def isValid(text: String) = null })
if (dlg.open() == Window.OK) {
dlg.getValue()
} else {
null
}
}
})
}
fieldEditors += addNewFieldEditorWrappedInComposite(parent = control) { parent =>
new BooleanFieldEditor(omitScalaPackage, "Omit the scala package prefix", parent) {
allEnableDisableControls += getChangeControl(parent)
}
}
}
override def useProjectSpecifcSettingsKey = UseProjectSpecificSettingsKey
override def pageId = PageId
}
object OrganizeImportsPreferences extends Enumeration {
val UseProjectSpecificSettingsKey = "organizeimports.useProjectSpecificSettings"
val PageId = "org.scalaide.ui.preferences.editor.organizeImports"
val ExpandImports = Value("expand")
val CollapseImports = Value("collapse")
val PreserveExistingGroups = Value("preserve")
val groupsKey = "organizeimports.groups"
val wildcardsKey = "organizeimports.wildcards"
val expandCollapseKey = "organizeimports.expandcollapse"
val omitScalaPackage = "organizeimports.scalapackage"
private def getPreferenceStore(project: IProject): IPreferenceStore = {
val workspaceStore = IScalaPlugin().getPreferenceStore()
val projectStore = new PropertyStore(new ProjectScope(project), SdtConstants.PluginId)
val useProjectSettings = projectStore.getBoolean(UseProjectSpecificSettingsKey)
val prefStore = if (useProjectSettings) projectStore else workspaceStore
prefStore
}
def getGroupsForProject(project: IProject) = {
getPreferenceStore(project).getString(groupsKey).split("\\\\$")
}
def shouldOmitScalaPackage(project: IProject) = {
getPreferenceStore(project).getBoolean(omitScalaPackage)
}
def getWildcardImportsForProject(project: IProject) = {
getPreferenceStore(project).getString(wildcardsKey).split("\\\\$")
}
def getOrganizeImportStrategy(project: IProject) = {
getPreferenceStore(project).getString(expandCollapseKey) match {
case x if x == ExpandImports.toString => ExpandImports
case x if x == CollapseImports.toString => CollapseImports
case x if x == PreserveExistingGroups.toString => PreserveExistingGroups
}
}
}
class OrganizeImportsPreferencesInitializer extends AbstractPreferenceInitializer {
/** Actually initializes preferences */
override def initializeDefaultPreferences(): Unit = {
val node = DefaultScope.INSTANCE.getNode(SdtConstants.PluginId)
node.put(OrganizeImportsPreferences.omitScalaPackage, "false")
node.put(OrganizeImportsPreferences.groupsKey, "java$scala$org$com")
node.put(OrganizeImportsPreferences.wildcardsKey, "scalaz$scalaz.Scalaz")
node.put(OrganizeImportsPreferences.expandCollapseKey, OrganizeImportsPreferences.ExpandImports.toString)
}
}
| romanowski/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/ui/internal/preferences/OrganizeImportsPreferences.scala | Scala | bsd-3-clause | 7,068 |
package korolev.server
import java.nio.ByteBuffer
import korolev.Router
/**
* @author Aleksey Fomkin <aleksey.fomkin@gmail.com>
*/
case class Request(
path: Router.Path,
params: Map[String, String],
cookie: String => Option[String],
headers: Seq[(String, String)],
body: ByteBuffer
)
| PhilAndrew/JumpMicro | JMCloner/src/main/scala/korolev/server/Request.scala | Scala | mit | 301 |
/*
* MIT License
*
* Copyright (c) 2016 mbr targeting GmbH
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package eu.m6r.druid.client.granularities
import org.scalatest._
class QueryGranularitySpec extends FlatSpec with Matchers {
val validGranularities =
Seq("NONE",
"SECOND",
"MINUTE",
"FIVE_MINUTE",
"TEN_MINUTE",
"FIFTEEN_MINUTE",
"HOUR",
"SIX_HOUR",
"DAY",
"WEEK",
"MONTH",
"YEAR")
"Query Granularities" should "return object name from toString" in {
QueryGranularity.NONE.toString should be("NONE")
QueryGranularity.SECOND.toString should be("SECOND")
QueryGranularity.MINUTE.toString should be("MINUTE")
QueryGranularity.FIVE_MINUTE.toString should be("FIVE_MINUTE")
QueryGranularity.TEN_MINUTE.toString should be("TEN_MINUTE")
QueryGranularity.FIFTEEN_MINUTE.toString should be("FIFTEEN_MINUTE")
QueryGranularity.HOUR.toString should be("HOUR")
QueryGranularity.SIX_HOUR.toString should be("SIX_HOUR")
QueryGranularity.DAY.toString should be("DAY")
QueryGranularity.WEEK.toString should be("WEEK")
QueryGranularity.MONTH.toString should be("MONTH")
QueryGranularity.YEAR.toString should be("YEAR")
}
it should "return object from string" in {
validGranularities
.map(s => (QueryGranularity.fromString(s), s))
.foreach(t => t._1.toString should be(t._2))
}
}
| mbrtargeting/druid-client | src/test/scala/eu/m6r/druid/client/granularities/QueryGranularitySpec.scala | Scala | mit | 2,455 |
// These are meant to be typed into the REPL. You can also run
// scala -Xnojline < repl-session.scala to run them all at once.
import java.awt.event._
import javax.swing._
var counter = 0
val button = new JButton("Increment")
button.addActionListener(new ActionListener {
override def actionPerformed(event: ActionEvent) {
counter += 1
}
})
implicit def makeAction(action: (ActionEvent) => Unit) =
new ActionListener {
override def actionPerformed(event: ActionEvent) { action(event) }
}
button.addActionListener((event: ActionEvent) => counter += 1)
button.addActionListener((event: ActionEvent) => println(counter))
button.addActionListener((event: ActionEvent) => if (counter > 9) System.exit(0))
val frame = new JFrame
frame.add(button)
frame.pack()
frame.setVisible(true)
| P7h/ScalaPlayground | Scala for the Impatient/examples/ch12/sec07/repl-session.scala | Scala | apache-2.0 | 802 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.toad.verticle
import io.vertx.core.http.HttpServerOptions
import io.vertx.core.net._
import io.vertx.core.{AbstractVerticle, AsyncResult, Handler, Promise}
import io.vertx.ext.auth.AuthProvider
import io.vertx.ext.auth.shiro.{ShiroAuth, ShiroAuthOptions, ShiroAuthRealmType}
import io.vertx.ext.web.Router
import io.vertx.ext.web.handler.StaticHandler
import io.vertx.ext.web.handler.sockjs.{SockJSBridgeOptions, SockJSHandler}
import org.slf4j.{Logger, LoggerFactory}
import sun.security.tools.keytool.CertAndKeyGen
import sun.security.x509.X500Name
import java.io.{FileOutputStream, IOException}
import java.security._
import java.security.cert.{Certificate, CertificateException}
import java.util.Date
/**
* A [[io.vertx.core.Verticle]] which starts up the HTTP server for the web application UI. Based on the given
* configuration, the web server may be configured for SSL using a self-generated SSL cert or a provided SSL certificate
* file. The application accepts P12, PEM, and JKS files.
*
* The web server also configures handlers for the Auth Service and the
* [[io.vertx.ext.web.handler.sockjs.SockJSHandler]]event bus bridge.
*
* @author
* <a href="https://github.com/InfoSec812">Deven Phillips</a>
*/
class WebSSLCapableServerVerticle extends AbstractVerticle with DefaultBridgeOptions {
val logger: Logger = LoggerFactory.getLogger(getClass.getSimpleName)
/**
* Start this [[io.vertx.core.Verticle]] asynchronously and notify the deploying verticle on success or failure
*
* @param startFuture
* A [[java.util.concurrent.Future]] with which to notify the deploying [[io.vertx.core.Verticle]] about success or
* failure
* @throws Exception
* If there is an uncaught error.
*/
@throws[Exception]
// scalastyle:off
override def start(startFuture: Promise[Void]): Unit = {
val config = context.config().getJsonObject("webserver")
val authCfg = context.config().getJsonObject("authentication")
Option(config) match {
case Some(_) => logger.info("SSL Config is not null")
case None =>
val ex = new IllegalArgumentException(
"Missing required SSL Configuration parameters in io.vertx.core.Context.config()")
logger.error(
"Authorization configuration was null, please supply a valid JsonObject via DeployOptions.setConfig when deploying this Verticle",
ex)
throw ex
}
val bindAddress = config.getString("bind-address", "0.0.0.0")
val bindPort = config.getInteger("bind-port", 8080)
val webRoot: String = config.getString("webroot", "webroot/")
logger.info(s"Using webroot => $webRoot")
val router = Router.router(vertx)
val sockjs = SockJSHandler.create(vertx)
val opts: SockJSBridgeOptions = bridgeOptions
sockjs.bridge(opts)
var authProvider: AuthProvider = null
if (authCfg.containsKey("auth-provider")) {
authCfg.getString("auth-provider") match {
case "ldap" =>
val opts = new ShiroAuthOptions().setType(ShiroAuthRealmType.LDAP).setConfig(authCfg)
authProvider = ShiroAuth.create(vertx, opts);
case "jdbc" =>
case _ =>
}
}
router.route("/eventbus/*").handler(sockjs)
router.route().handler(StaticHandler.create(webRoot).setIndexPage("index.html"))
// import java.util.function._
// import scala.compat.java8.FunctionConverters._
// If SSL is requested, prepare the SSL configuration off of the event bus to prevent blocking.
if (config.containsKey("ssl") && config.getBoolean("ssl")) {
val fut = new Handler[Promise[HttpServerOptions]] {
override def handle(future: Promise[HttpServerOptions]): Unit = {
val httpOpts = new HttpServerOptions()
if (config.containsKey("certificate-path")) {
val certPath = config.getString("certificate-path")
// Use a Java Keystore File
if (certPath.toLowerCase().endsWith("jks") && config.getString("certificate-password") != null) {
httpOpts.setKeyStoreOptions(
new JksOptions()
.setPassword(config.getString("certificate-password"))
.setPath(certPath))
httpOpts.setSsl(true)
// Use a PKCS12 keystore
} else if (config.getString("certificate-password") != null &&
certPath.matches("^.*\\\\.(pfx|p12|PFX|P12)$")) {
httpOpts.setPfxKeyCertOptions(
new PfxOptions()
.setPassword(config.getString("certificate-password"))
.setPath(certPath))
httpOpts.setSsl(true)
// Use a PEM key/cert pair
} else if (certPath.matches("^.*\\\\.(pem|PEM)$")) {
httpOpts.setPemKeyCertOptions(
new PemKeyCertOptions()
.setCertPath(certPath)
.setKeyPath(certPath))
httpOpts.setSsl(true)
} else {
startFuture.fail("A certificate file was provided, but a password for that file was not.")
}
} else
try {
// Generate a self-signed key pair and certificate
logger.info("Attempting self-signed SSL")
val store = KeyStore.getInstance("JKS")
store.load(null, null)
val keypair = new CertAndKeyGen("RSA", "SHA256WithRSA", null)
val x500Name = new X500Name("localhost", "IT", "unknown", "unknown", "unknown", "unknown")
keypair.generate(1024)
val privKey = keypair.getPrivateKey
val chain = new Array[Certificate](1) // ( 1) ;
val cert: Certificate = keypair.getSelfCertificate(x500Name, new Date(), 365 * 24 * 60 * 60)
chain(0) = cert
store.setKeyEntry("selfsigned", privKey, "changeit".toCharArray, chain)
store.store(new FileOutputStream(".keystore"), "changeit".toCharArray)
httpOpts.setKeyStoreOptions(new JksOptions().setPath(".keystore").setPassword("changeit"))
httpOpts.setSsl(true)
} catch {
case ex @ (_: KeyStoreException | _: IOException | _: NoSuchAlgorithmException | _: CertificateException |
_: NoSuchProviderException | _: InvalidKeyException | _: SignatureException) =>
logger.error("Failed to generate a self-signed cert and other SSL configuration methods failed.", ex)
startFuture.fail(ex)
}
future.complete(httpOpts)
}
}
val result = new Handler[AsyncResult[HttpServerOptions]] {
override def handle(event: AsyncResult[HttpServerOptions]): Unit = {
if (event.succeeded()) {
vertx.createHttpServer(event.result()).requestHandler(router).listen(bindPort, bindAddress)
logger.info(s"SSL Web server now listening on @ $bindAddress:$bindPort")
}
}
}
vertx.executeBlocking(fut, result)
} else {
// No SSL requested, start a non-SSL HTTP server.
vertx.createHttpServer().requestHandler(router).listen(bindPort, bindAddress)
logger.info("(Non-SSL) Web server now listening")
startFuture.complete()
}
} // scalastyle:on
}
| adarro/ddo-calc | incubating/toad-api/src/main/scala/io/truthencode/toad/verticle/WebSSLCapableServerVerticle.scala | Scala | apache-2.0 | 7,958 |
The simplest possible `Applicative` we can use is `Id`:
type Id[A] = A
We already know this forms a `Monad`, so it's also an applicative functor:
val idMonad = new Monad[Id] {
def unit[A](a: => A) = a
override def flatMap[A,B](a: A)(f: A => B): B = f(a)
}
We can now implement `map` by calling `traverse`, picking `Id` as the `Applicative`:
def map[A,B](fa: F[A])(f: A => B): F[B] =
traverse[Id, A, B](xs)(f)(idMonad)
This implementation is suggestive of laws for `traverse`, since we expect this implementation to obey the usual functor laws. See the chapter notes for discussion of the laws for `Traverse`.
Note that we can define `traverse` in terms of `sequence` and `map`, which means that a valid `Traverse` instance may define `sequence` and `map`, or just `traverse`:
trait Traverse[F[_]] extends Functor[F] {
def traverse[G[_]:Applicative,A,B](fa: F[A])(f: A => G[B]): G[F[B]] =
sequence(map(fa)(f))
def sequence[G[_]:Applicative,A](fma: F[G[A]]): G[F[A]] =
traverse(fma)(ma => ma)
def map[A,B](fa: F[A])(f: A => B): F[B] =
traverse[Id, A, B](fa)(f)(idMonad)
} | willcodejavaforfood/fpinscala | answerkey/applicative/13.answer.scala | Scala | mit | 1,162 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.codegen
import org.apache.flink.api.common.functions.{MapFunction, RichMapFunction}
import org.apache.flink.configuration.Configuration
import org.apache.flink.metrics.MetricGroup
import org.apache.flink.table.api.{TableConfig, TableException}
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.FunctionCodeGenerator.generateFunction
import org.apache.flink.table.dataformat.BinaryStringUtil.safeToString
import org.apache.flink.table.dataformat.{BinaryString, BinaryStringUtil, Decimal, GenericRow}
import org.apache.flink.table.functions.{FunctionContext, UserDefinedFunction}
import org.apache.flink.table.types.logical.RowType
import org.apache.calcite.avatica.util.ByteString
import org.apache.calcite.rex.{RexBuilder, RexExecutor, RexNode}
import org.apache.calcite.sql.`type`.SqlTypeName
import org.apache.commons.lang3.StringEscapeUtils
import java.io.File
import scala.collection.JavaConverters._
/**
* Evaluates constant expressions with code generator.
*
* @param allowChangeNullability If the reduced expr's nullability can be changed, e.g. a null
* literal is definitely nullable and the other literals are
* not null.
*/
class ExpressionReducer(
config: TableConfig,
allowChangeNullability: Boolean = false)
extends RexExecutor {
private val EMPTY_ROW_TYPE = RowType.of()
private val EMPTY_ROW = new GenericRow(0)
override def reduce(
rexBuilder: RexBuilder,
constExprs: java.util.List[RexNode],
reducedValues: java.util.List[RexNode]): Unit = {
val literals = constExprs.asScala.map(e => (e.getType.getSqlTypeName, e)).flatMap {
// we don't support object literals yet, we skip those constant expressions
case (SqlTypeName.ANY, _) |
(SqlTypeName.ROW, _) |
(SqlTypeName.ARRAY, _) |
(SqlTypeName.MAP, _) |
(SqlTypeName.MULTISET, _) => None
case (_, e) => Some(e)
}
val literalTypes = literals.map(e => FlinkTypeFactory.toLogicalType(e.getType))
val resultType = RowType.of(literalTypes: _*)
// generate MapFunction
val ctx = new ConstantCodeGeneratorContext(config)
val exprGenerator = new ExprCodeGenerator(ctx, false)
.bindInput(EMPTY_ROW_TYPE)
val literalExprs = literals.map(exprGenerator.generateExpression)
val result = exprGenerator.generateResultExpression(
literalExprs, resultType, classOf[GenericRow])
val generatedFunction = generateFunction[MapFunction[GenericRow, GenericRow]](
ctx,
"ExpressionReducer",
classOf[MapFunction[GenericRow, GenericRow]],
s"""
|${result.code}
|return ${result.resultTerm};
|""".stripMargin,
resultType,
EMPTY_ROW_TYPE)
val function = generatedFunction.newInstance(getClass.getClassLoader)
val richMapFunction = function match {
case r: RichMapFunction[GenericRow, GenericRow] => r
case _ => throw new TableException("RichMapFunction[GenericRow, GenericRow] required here")
}
val parameters = if (config.getConf != null) config.getConf else new Configuration()
val reduced = try {
richMapFunction.open(parameters)
// execute
richMapFunction.map(EMPTY_ROW)
} finally {
richMapFunction.close()
}
// add the reduced results or keep them unreduced
var i = 0
var reducedIdx = 0
while (i < constExprs.size()) {
val unreduced = constExprs.get(i)
unreduced.getType.getSqlTypeName match {
// we insert the original expression for object literals
case SqlTypeName.ANY |
SqlTypeName.ROW |
SqlTypeName.ARRAY |
SqlTypeName.MAP |
SqlTypeName.MULTISET =>
reducedValues.add(unreduced)
case SqlTypeName.VARCHAR | SqlTypeName.CHAR =>
val escapeVarchar = StringEscapeUtils
.escapeJava(safeToString(reduced.getField(reducedIdx).asInstanceOf[BinaryString]))
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, escapeVarchar, unreduced))
reducedIdx += 1
case SqlTypeName.VARBINARY | SqlTypeName.BINARY =>
val reducedValue = reduced.getField(reducedIdx)
val value = if (null != reducedValue) {
new ByteString(reduced.getField(reducedIdx).asInstanceOf[Array[Byte]])
} else {
reducedValue
}
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, value, unreduced))
reducedIdx += 1
case SqlTypeName.DECIMAL =>
val reducedValue = reduced.getField(reducedIdx)
val value = if (reducedValue != null) {
reducedValue.asInstanceOf[Decimal].toBigDecimal
} else {
reducedValue
}
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, value, unreduced))
reducedIdx += 1
case _ =>
val reducedValue = reduced.getField(reducedIdx)
// RexBuilder handle double literal incorrectly, convert it into BigDecimal manually
val value = if (reducedValue != null &&
unreduced.getType.getSqlTypeName == SqlTypeName.DOUBLE) {
new java.math.BigDecimal(reducedValue.asInstanceOf[Number].doubleValue())
} else {
reducedValue
}
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, value, unreduced))
reducedIdx += 1
}
i += 1
}
}
// We may skip the reduce if the original constant is invalid and casted as a null literal,
// cause now this may change the RexNode's and it's parent node's nullability.
def maySkipNullLiteralReduce(
rexBuilder: RexBuilder,
value: Object,
unreduced: RexNode): RexNode = {
if (!allowChangeNullability
&& value == null
&& !unreduced.getType.isNullable) {
return unreduced
}
// used for table api to '+' of two strings.
val valueArg = if (SqlTypeName.CHAR_TYPES.contains(unreduced.getType.getSqlTypeName) &&
value != null) {
value.toString
} else {
value
}
// if allowChangeNullability is allowed, we can reduce the outer abstract cast if the unreduced
// expr type is nullable.
val targetType = if (allowChangeNullability && unreduced.getType.isNullable) {
rexBuilder.getTypeFactory.createTypeWithNullability(unreduced.getType, false)
} else {
unreduced.getType
}
rexBuilder.makeLiteral(
valueArg,
targetType,
true)
}
}
/**
* A [[ConstantFunctionContext]] allows to obtain user-defined configuration information set
* in [[TableConfig]].
*
* @param parameters User-defined configuration set in [[TableConfig]].
*/
private class ConstantFunctionContext(parameters: Configuration) extends FunctionContext(null) {
override def getMetricGroup: MetricGroup = {
throw new UnsupportedOperationException(
"getMetricGroup is not supported when reducing expression")
}
override def getCachedFile(name: String): File = {
throw new UnsupportedOperationException(
"getCachedFile is not supported when reducing expression")
}
/**
* Gets the user-defined configuration value associated with the given key as a string.
*
* @param key key pointing to the associated value
* @param defaultValue default value which is returned in case user-defined configuration
* value is null or there is no value associated with the given key
* @return (default) value associated with the given key
*/
override def getJobParameter(key: String, defaultValue: String): String = {
parameters.getString(key, defaultValue)
}
}
/**
* Constant expression code generator context.
*/
private class ConstantCodeGeneratorContext(tableConfig: TableConfig)
extends CodeGeneratorContext(tableConfig) {
override def addReusableFunction(
function: UserDefinedFunction,
functionContextClass: Class[_ <: FunctionContext] = classOf[FunctionContext],
runtimeContextTerm: String = null): String = {
super.addReusableFunction(function, classOf[ConstantFunctionContext], "parameters")
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/codegen/ExpressionReducer.scala | Scala | apache-2.0 | 9,069 |
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: blah@cliffano.com
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package io.swagger.client.model
import play.api.libs.json._
case class FreeStyleProject (
`class`: Option[String],
name: Option[String],
url: Option[String],
color: Option[String],
actions: Option[Seq[FreeStyleProjectactions]],
description: Option[String],
displayName: Option[String],
displayNameOrNull: Option[String],
fullDisplayName: Option[String],
fullName: Option[String],
buildable: Option[Boolean],
builds: Option[Seq[FreeStyleBuild]],
firstBuild: Option[FreeStyleBuild],
healthReport: Option[Seq[FreeStyleProjecthealthReport]],
inQueue: Option[Boolean],
keepDependencies: Option[Boolean],
lastBuild: Option[FreeStyleBuild],
lastCompletedBuild: Option[FreeStyleBuild],
lastFailedBuild: Option[String],
lastStableBuild: Option[FreeStyleBuild],
lastSuccessfulBuild: Option[FreeStyleBuild],
lastUnstableBuild: Option[String],
lastUnsuccessfulBuild: Option[String],
nextBuildNumber: Option[Int],
queueItem: Option[String],
concurrentBuild: Option[Boolean],
scm: Option[NullSCM]
)
object FreeStyleProject {
implicit val format: Format[FreeStyleProject] = Json.format
}
| cliffano/swaggy-jenkins | clients/scala-lagom-server/generated/src/main/scala/io/swagger/client/model/FreeStyleProject.scala | Scala | mit | 1,910 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.rdd
import java.util.concurrent.ExecutorService
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.execution.command.CompactionModel
import org.apache.carbondata.events.OperationContext
import org.apache.carbondata.processing.loading.model.CarbonLoadModel
object CompactionFactory {
/**
* Returns appropriate Compactable object.
*/
def getCompactor(carbonLoadModel: CarbonLoadModel,
compactionModel: CompactionModel,
executor: ExecutorService,
sqlContext: SQLContext,
storeLocation: String,
mergedLoads: java.util.List[String],
operationContext: OperationContext): Compactor = {
new CarbonTableCompactor(
carbonLoadModel,
compactionModel,
executor,
sqlContext,
storeLocation,
mergedLoads,
operationContext)
}
}
| jackylk/incubator-carbondata | integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CompactionFactory.scala | Scala | apache-2.0 | 1,665 |
package freeslick.profile.utils
import com.typesafe.config.{ ConfigException, Config }
trait TableSpaceConfig {
def connectionConfig: Option[Config] = None
protected lazy val tableTableSpace = try {
connectionConfig.map(_.getString("tableTableSpace"))
} catch {
case _: ConfigException.Missing => None
}
protected lazy val indexTableSpace = try {
connectionConfig.map(_.getString("indexTableSpace"))
} catch {
case _: ConfigException.Missing => None
}
}
| fommil/freeslick | src/main/scala/freeslick/profile/utils/TableSpaceConfig.scala | Scala | lgpl-3.0 | 488 |
/*
* Copyright (C) 2015 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.eventuate.chaos
import java.net.InetAddress
import akka.actor._
import com.rbmhtechnology.eventuate._
import com.rbmhtechnology.eventuate.log.cassandra._
import com.typesafe.config.ConfigFactory
import scala.io.StdIn
import scala.util._
import scala.concurrent.duration._
object ChaosActor extends App with ChaosCommands {
def defaultConfig(seed: InetAddress) = ConfigFactory.parseString(
s"""
|akka.actor.provider = "akka.remote.RemoteActorRefProvider"
|akka.remote.enabled-transports = ["akka.remote.netty.tcp"]
|akka.remote.netty.tcp.hostname = "127.0.0.1"
|akka.remote.netty.tcp.port = 2552
|akka.test.single-expect-default = 10s
|akka.loglevel = "ERROR"
|
|eventuate.log.cassandra.contact-points = ["${seed.getHostName}"]
|eventuate.log.cassandra.replication-factor = 3
""".stripMargin)
def runChaosActor(seed: InetAddress): Unit = {
val system = ActorSystem("chaos", defaultConfig(seed))
val log = system.actorOf(CassandraEventLog.props("chaos"))
val actor = system.actorOf(Props(new ChaosActor(log)))
StdIn.readLine()
system.stop(actor)
}
seedAddress() match {
case Failure(err) => err.printStackTrace()
case Success(seed) => runChaosActor(seed)
}
}
class ChaosActor(val eventLog: ActorRef) extends EventsourcedActor {
val id = "chaos"
// persistent state
var state: Int = 0
// transient state
var failures: Int = 0
override def onCommand: Receive = {
case i: Int => persist(i) {
case Success(i) =>
onEvent(i)
scheduleCommand()
case Failure(e) =>
failures += 1
println(s"persist failure $failures: ${e.getMessage}")
scheduleCommand()
}
}
override def onEvent: Receive = {
case i: Int =>
state += i
println(s"state = $state (recovery = $recovering)")
}
import context.dispatcher
override def preStart(): Unit = {
super.preStart()
scheduleCommand()
}
override def postStop(): Unit = {
schedule.foreach(_.cancel())
super.postStop()
}
private def scheduleCommand(): Unit =
schedule = Some(context.system.scheduler.scheduleOnce(2.seconds, self, 1))
private var schedule: Option[Cancellable] = None
}
| linearregression/eventuate-chaos | src/test/scala/com/rbmhtechnology/eventuate/chaos/ChaosActor.scala | Scala | apache-2.0 | 2,954 |
package org.jetbrains.plugins.scala.failed.resolve
import org.jetbrains.plugins.scala.PerfCycleTests
import org.junit.experimental.categories.Category
/**
* Created by kate on 3/29/16.
*/
@Category(Array(classOf[PerfCycleTests]))
class Scalaz extends FailedResolveTest("scalaz"){
override protected def additionalLibraries(): Array[String] = Array("scalaz")
def testSCL5842A(): Unit = doTest()
def testSCL5842B(): Unit = doTest()
def testSCL9752(): Unit = doTest()
def testSCL7213(): Unit = doTest()
def testSCL10087(): Unit = doTest()
def testSCL7227(): Unit = doTest()
}
| whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/failed/resolve/Scalaz.scala | Scala | apache-2.0 | 600 |
case class Simple(id: Double, name: String) | raghavgautam/js2Code | src/test/resources/output/scala/Simple.scala | Scala | apache-2.0 | 43 |
package com.twitter.finagle.mdns
import com.twitter.finagle.{Announcer, Announcement, Group, Resolver}
import com.twitter.util.{Future, Return, Throw, Try}
import java.lang.management.ManagementFactory
import java.net.{InetSocketAddress, SocketAddress}
import scala.collection.mutable
class MDNSAddressException(addr: String)
extends Exception("Invalid MDNS address \"%s\"".format(addr))
private case class MdnsRecord(
name: String,
regType: String,
domain: String,
addr: InetSocketAddress)
private trait MDNSAnnouncerIface {
def announce(
addr: InetSocketAddress,
name: String,
regType: String,
domain: String): Future[Announcement]
}
private trait MDNSResolverIface {
def resolve(regType: String, domain: String): Try[Group[MdnsRecord]]
}
private object MDNS {
lazy val pid = ManagementFactory.getRuntimeMXBean.getName.split("@") match {
case Array(pid, _) => pid
case _ => "unknown"
}
def mkName(ps: Any*) = ps.mkString("/")
def parse(addr: String) = addr.split("\\.") match {
case Array(name, app, prot, domain) => (name, app + "." + prot, domain)
case _ => throw new MDNSAddressException(addr)
}
}
class MDNSAnnouncer extends Announcer {
import MDNS._
val scheme = "mdns"
private[this] val announcer: MDNSAnnouncerIface = try {
new DNSSDAnnouncer
} catch {
case _: ClassNotFoundException => new JmDNSAnnouncer
case e => throw e
}
/**
* Announce an address via MDNS.
*
* The addr must be in the style of `[name]._[group]._tcp.local.`
* (e.g. myservice._twitter._tcp.local.). In order to ensure uniqueness
* the final name will be [name]/[port]/[pid].
*/
def announce(ia: InetSocketAddress, addr: String): Future[Announcement] = {
val (name, regType, domain) = parse(addr)
val serviceName = mkName(name, ia.getPort, pid)
announcer.announce(ia, serviceName, regType, domain)
}
}
class MDNSResolver extends Resolver {
import MDNS._
val scheme = "mdns"
private[this] val resolver: MDNSResolverIface = try {
new DNSSDResolver
} catch {
case _: ClassNotFoundException => new JmDNSResolver
case e => throw e
}
/**
* Resolve a service via mdns
*
* The address must be in the style of `[name]._[group]._tcp.local.`
* (e.g. "myservice._twitter._tcp.local.").
*/
def resolve(addr: String): Try[Group[SocketAddress]] = {
val (name, regType, domain) = parse(addr)
resolver.resolve(regType, domain) map { group =>
group collect {
case record if record.name.startsWith(name) => record.addr
}
}
}
}
| firebase/finagle | finagle-mdns/src/main/scala/com/twitter/finagle/mdns/MDNS.scala | Scala | apache-2.0 | 2,592 |
package scala.c.engine
class SignTest extends StandardTest {
"unsigned test 1" should "print the correct results" in {
val code = """
void main() {
unsigned int x = 2147483647;
printf("%d\\n", x);
}"""
checkResults(code)
}
"a char signed corner case" should "print the correct results" in {
val code = """
#include <stdio.h>
#include <stddef.h>
int my_read_char(const char *buffer, size_t *offs) {
if (buffer[*offs] != '\\0') {
return buffer[*offs++]; /* here's the trap */
} else {
return 255;
}
}
int my_read_char2(const char *buffer, size_t *offs) {
if (buffer[*offs] != '\\0') {
return (unsigned char) buffer[*offs++];
} else {
return 255;
}
}
void main() {
char blah[10] = {1, 5, 10, 100, 200, 0};
size_t offset = 1;
int i = 0;
for (i = 0; i < 5; i++) {
printf("%d\\n", my_read_char(blah, &offset));
printf("%d\\n", my_read_char2(blah, &offset));
offset += 1;
}
}"""
checkResults(code)
}
"unsigned test involving negatives" should "print the correct results" in {
val code = """
void main() {
unsigned int x = -10;
unsigned int y = 10 + x;
printf("%d\\n", y);
}"""
checkResults(code)
}
}
| bdwashbu/AstViewer | tests/scala/c/engine/SignTest.scala | Scala | gpl-3.0 | 1,409 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.