code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/**
* Copyright 2016, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperations.readwritedataframe
import io.deepsense.deeplang.exceptions.DeepLangException
sealed abstract class FileScheme(val scheme: String) {
def pathPrefix: String = scheme + "://"
}
object FileScheme {
case object HTTP extends FileScheme("http")
case object HTTPS extends FileScheme("https")
case object FTP extends FileScheme("ftp")
case object HDFS extends FileScheme("hdfs")
case object File extends FileScheme("file")
case object Library extends FileScheme("library")
// TODO Autoderive values. There is macro-library for extracting sealed case objects.
val values = Seq(HTTP, HTTPS, FTP, HDFS, File, Library)
val supportedByParquet = Seq(HDFS)
def fromPath(path: String): FileScheme = {
val matchingFileSchema = values.find(schema => path.startsWith(schema.pathPrefix))
matchingFileSchema.getOrElse(throw UnknownFileSchemaForPath(path))
}
}
case class FilePath(fileScheme: FileScheme, pathWithoutScheme: String) {
def fullPath: String = fileScheme.pathPrefix + pathWithoutScheme
def verifyScheme(assertedFileScheme: FileScheme): Unit = assert(fileScheme == assertedFileScheme)
}
object FilePath {
def apply(fullPath: String): FilePath = {
val schema = FileScheme.fromPath(fullPath)
val pathWithoutSchema = fullPath.substring(schema.pathPrefix.length)
FilePath(schema, pathWithoutSchema)
}
def unapply(fullPath: String): Option[(FileScheme, String)] = unapply(FilePath(fullPath))
}
case class UnknownFileSchemaForPath(path: String) extends DeepLangException({
val allSchemes = FileScheme.values.map(_.scheme).mkString("(", ", ", ")")
s"Unknown file scheme for path $path. Known file schemes: $allSchemes"
})
|
deepsense-io/seahorse-workflow-executor
|
deeplang/src/main/scala/io/deepsense/deeplang/doperations/readwritedataframe/FileScheme.scala
|
Scala
|
apache-2.0
| 2,315
|
package swallow.core
/**
* Created by zhouqihua on 2017/6/30.
*/
object KMActorMessages {
final case class TestFlow(flow: KMFlow)
final case class ClusterSuperviseFlow(flow: KMFlow)
final case class MasterSubmitNewFlow(flow: KMFlow)
final case class MasterDispatchNewFlow(flow: KMFlow)
final case class MasterAggregateFlow(flow: KMFlow)
final case class SenderTransmitFlow(flow: KMFlow)
final case class SenderCompleteFlow(flow: KMFlow)
final case class ReceiverGetFlow(flow: KMFlow)
}
|
kimihe/Swallow
|
swallow/src/main/scala/swallow/core/KMActorMessages.scala
|
Scala
|
apache-2.0
| 515
|
package org.jetbrains.plugins.scala.lang.resolve2
/**
* Pavel.Fatin, 02.02.2010
*/
class QualifierSourceMediateTest extends ResolveTestBase {
override def folderPath: String = {
super.folderPath + "qualifier/source/mediate/"
}
def testCaseClass() = doTest()
def testCaseClassObject() = doTest()
//TODO
// def testCaseClassObjectSyntetic = doTest
def testCaseObject() = doTest()
//TODO
// def testCaseObjectSyntetic = doTest
def testClass() = doTest()
def testObject() = doTest()
def testTrait() = doTest()
}
|
ilinum/intellij-scala
|
test/org/jetbrains/plugins/scala/lang/resolve2/QualifierSourceMediateTest.scala
|
Scala
|
apache-2.0
| 539
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.hadoop.mapreduce.RecordReader
import org.apache.spark.sql.sources.v2.reader.PartitionReader
class PartitionRecordReader[T](
private[this] var rowReader: RecordReader[_, T]) extends PartitionReader[T] {
override def next(): Boolean = rowReader.nextKeyValue()
override def get(): T = rowReader.getCurrentValue
override def close(): Unit = rowReader.close()
}
class PartitionRecordReaderWithProject[X, T](
private[this] var rowReader: RecordReader[_, X],
project: X => T) extends PartitionReader[T] {
override def next(): Boolean = rowReader.nextKeyValue()
override def get(): T = project(rowReader.getCurrentValue)
override def close(): Unit = rowReader.close()
}
|
WindCanDie/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/PartitionRecordReader.scala
|
Scala
|
apache-2.0
| 1,568
|
package wakfutcp.protocol.messages.server
import wakfutcp.protocol.{Codec, ServerMessage}
final case class ClientPublicKeyMessage(
salt: Long,
publicKey: Array[Byte]
) extends ServerMessage {
override val id = 1034
}
object ClientPublicKeyMessage {
import Codec._
import cats.syntax.apply._
implicit val codec: Codec[ClientPublicKeyMessage] =
(long, remainingBytes)
.imapN(apply)(Function.unlift(unapply))
}
|
OpenWakfu/wakfutcp
|
protocol/src/main/scala/wakfutcp/protocol/messages/server/ClientPublicKeyMessage.scala
|
Scala
|
mit
| 434
|
package codesample.scala.coursera.course1.week2
import scala.annotation.tailrec
object HighOrderFuncsAndCurrying {
def sum(intFunc: Int => Int, left: Int, right: Int) : Int = {
if (left > right) 0
else intFunc(left) + sum(intFunc, left + 1, right)
}
def sumTail(f: Int => Int, left: Int, right: Int): Int = {
@tailrec
def loop(left: Int, acc: Int): Int = {
if (left > right) acc
else loop(left + 1, acc + f(left))
}
loop(left, 0)
}
def sumCurrying(intFunc: Int => Int) : (Int, Int) => Int = {
def sumCurryingF(left: Int, right: Int): Int = {
if (left > right) 0
else intFunc(left) + sumCurryingF(left + 1, right)
}
sumCurryingF
}
def sumScalaCurrying(intFunc: Int => Int)(left: Int, right: Int): Int = {
if (left > right) 0
else intFunc(left) + sumScalaCurrying(intFunc)(left + 1, right)
}
def mapAndReduce(intFunc: Int => Int)(left: Int, right: Int)(identity: Int, collect: (Int, Int) => Int) : Int = {
if (left > right) identity
else collect(intFunc(left), mapAndReduce(intFunc)(left + 1, right)(identity, collect))
}
def easierMapAndReduce(intFunc: Int => Int, combine: (Int, Int) => Int, identity: Int)(left: Int, right: Int): Int = {
if (left > right) identity
else combine(intFunc(left), mapAndReduceTailed(intFunc, combine, identity)(left+1, right))
}
def mapAndReduceTailed(intFunc: Int => Int, combine: (Int, Int) => Int, identity: Int)(left: Int, right: Int): Int = {
@tailrec
def loop(left: Int, accum: Int) : Int = {
if (left > right) accum
else {
loop(left + 1, combine(accum, intFunc(left)))
}
}
loop(left, identity)
}
def sumWithMapReduce(intFunc: Int => Int)(left: Int, right: Int): Int = {
easierMapAndReduce(intFunc, (x, y) => x+y, 0)(left, right)
}
def sumWithMapReduceTailed(intFunc: Int => Int)(left: Int, right: Int): Int = {
mapAndReduceTailed(intFunc, (x, y) => x+y, 0)(left, right)
}
def productOfSquaredWithMapReduceTailed(left: Int, right: Int): Int = {
mapAndReduceTailed(x => x * x, (x, y) => x*y, 1)(left, right)
}
def product(intFunc: Int => Int)(left: Int, right: Int): Int = {
if (left > right) 1
else intFunc(left) * product(intFunc)(left + 1, right)
}
def main(args: Array[String]): Unit = {
def id (element: Int) = element
def square(element: Int) = element * element
println(s"sum from 0 to 10 is: ${sum(id, 0, 10)}")
println(s"sum of squares from 0 to 10 is ${sum(square, 0, 10)}")
println(s"sum of cubes from 0 to 10 is ${sum(x => x * x * x, 0, 10)}")
println(s"tailrec: sum from 0 to 10 is: ${sumTail(x => x, 0, 10)}")
println(s"tailrec: sum of squares from 0 to 10 is ${sumTail(x => x * x, 0, 10)}")
println(s"tailrec: sum of cubes from 0 to 10 is ${sumTail(x => x * x * x, 0, 10)}")
println(s"sumCurrying: sum from 0 to 10 is: ${sumCurrying(id) (0, 10)}")
println(s"sumCurrying: sum of squares from 0 to 10 is ${sumCurrying(square) (0, 10)}")
println(s"sumCurrying: sum of cubes from 0 to 10 is ${sumCurrying(x => x * x * x) (0, 10)}")
println(s"sumScalaCurrying: sum from 0 to 10 is: ${sumScalaCurrying(id) (0, 10)}")
println(s"sumScalaCurrying: sum of squares from 0 to 10 is ${sumScalaCurrying(square) (0, 10)}")
println(s"sumScalaCurrying: sum of cubes from 0 to 10 is ${sumScalaCurrying(x => x * x * x) (0, 10)}")
println(s"mappedAndCollected: sum from 0 to 10 is: ${mapAndReduce(x => x) (0,10) (0, (x, y) => x+y) }")
println(s"mapAndReduceTailed: sum from 0 to 10 is: ${mapAndReduceTailed(x => x, (x, y) => x+y, 0) (0,10)}")
println(s"sumWithMapReduce: sum from 0 to 10 is: ${sumWithMapReduce(x => x) (0,10)}")
println(s"productOfSquaredWithMapReduceTailed: sum from 0 to 10 is: ${productOfSquaredWithMapReduceTailed(1,3)}")
}
}
|
aquatir/remember_java_api
|
code-sample-scala/scala-coursera/src/main/scala/codesample/scala/coursera/course1/week2/HighOrderFuncsAndCurrying.scala
|
Scala
|
mit
| 3,869
|
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pagespecs.pages
import langswitch.Languages.{English, Welsh}
import langswitch.{Language, Languages}
import org.openqa.selenium.WebDriver
import org.scalatestplus.selenium.WebBrowser.xpath
import testsupport.RichMatchers._
class YouNeedToRequestAccessToSelfAssessmentPage(baseUrl: BaseUrl)(implicit webDriver: WebDriver) extends BasePage(baseUrl) {
import org.scalatestplus.selenium.WebBrowser._
override def path: String = "/pay-what-you-owe-in-instalments/eligibility/access-your-self-assessment-online"
override def assertPageIsDisplayed(implicit lang: Language): Unit = probing {
readPath() shouldBe path
readGlobalHeaderText().stripSpaces shouldBe Expected.GlobalHeaderText().stripSpaces
val expectedLines = Expected.MainText().stripSpaces().split("\\n")
assertContentMatchesExpectedLines(expectedLines)
}
def expectedHeadingContent(language: Language): String = language match {
case Languages.English => "You need to request access to Self Assessment"
case Languages.Welsh => "Mae’n rhaid i chi wneud cais i gael at eich cyfrif Hunanasesiad"
}
def clickTheButton() = {
val button = xpath("//*[@id=\\"start\\"]/div/button")
click on button
}
object Expected {
object GlobalHeaderText {
def apply()(implicit language: Language): String = language match {
case English => globalHeaderTextEnglish
case Welsh => globalHeaderTextWelsh
}
private val globalHeaderTextEnglish = """Set up a Self Assessment payment plan"""
private val globalHeaderTextWelsh = """Trefnu cynllun talu"""
}
object MainText {
def apply()(implicit language: Language): String = language match {
case English => mainTextEnglish
case Welsh => mainTextWelsh
}
private val mainTextEnglish =
"""You have not yet requested access to your Self Assessment online.
|You need to do this before you can continue to set up your payment plan.
|Request access to Self Assessment
|"""
.stripMargin
private val mainTextWelsh =
"""Nid ydych wedi gwneud cais i gael at eich cyfrif Hunanasesiad ar-lein hyd yn hyn.
|Mae angen i chi wneud hyn cyn y gallwch fynd yn eich blaen i drefnu’ch cynllun talu.
|Gwneud cais i gael mynediad at Hunanasesiad
|"""
.stripMargin
}
}
}
|
hmrc/self-service-time-to-pay-frontend
|
test/pagespecs/pages/YouNeedToRequestAccessToSelfAssessmentPage.scala
|
Scala
|
apache-2.0
| 3,009
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree
import java.io.IOException
import scala.collection.mutable
import scala.collection.JavaConverters._
import org.apache.spark.Logging
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.configuration.Strategy
import org.apache.spark.mllib.tree.configuration.Algo._
import org.apache.spark.mllib.tree.configuration.QuantileStrategy._
import org.apache.spark.mllib.tree.impl.{BaggedPoint, DecisionTreeMetadata, NodeIdCache,
TimeTracker, TreePoint}
import org.apache.spark.mllib.tree.impurity.Impurities
import org.apache.spark.mllib.tree.model._
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
import org.apache.spark.util.random.SamplingUtils
/**
* :: Experimental ::
* A class that implements a [[http://en.wikipedia.org/wiki/Random_forest Random Forest]]
* learning algorithm for classification and regression.
* It supports both continuous and categorical features.
*
* The settings for featureSubsetStrategy are based on the following references:
* - log2: tested in Breiman (2001)
* - sqrt: recommended by Breiman manual for random forests
* - The defaults of sqrt (classification) and onethird (regression) match the R randomForest
* package.
* @see [[http://www.stat.berkeley.edu/~breiman/randomforest2001.pdf Breiman (2001)]]
* @see [[http://www.stat.berkeley.edu/~breiman/Using_random_forests_V3.1.pdf Breiman manual for
* random forests]]
*
* @param strategy The configuration parameters for the random forest algorithm which specify
* the type of algorithm (classification, regression, etc.), feature type
* (continuous, categorical), depth of the tree, quantile calculation strategy,
* etc.
* @param numTrees If 1, then no bootstrapping is used. If > 1, then bootstrapping is done.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "sqrt" for classification and
* to "onethird" for regression.
* @param seed Random seed for bootstrapping and choosing feature subsets.
*/
@Experimental
private class RandomForest (
private val strategy: Strategy,
private val numTrees: Int,
featureSubsetStrategy: String,
private val seed: Int)
extends Serializable with Logging {
/*
ALGORITHM
This is a sketch of the algorithm to help new developers.
The algorithm partitions data by instances (rows).
On each iteration, the algorithm splits a set of nodes. In order to choose the best split
for a given node, sufficient statistics are collected from the distributed data.
For each node, the statistics are collected to some worker node, and that worker selects
the best split.
This setup requires discretization of continuous features. This binning is done in the
findSplitsBins() method during initialization, after which each continuous feature becomes
an ordered discretized feature with at most maxBins possible values.
The main loop in the algorithm operates on a queue of nodes (nodeQueue). These nodes
lie at the periphery of the tree being trained. If multiple trees are being trained at once,
then this queue contains nodes from all of them. Each iteration works roughly as follows:
On the master node:
- Some number of nodes are pulled off of the queue (based on the amount of memory
required for their sufficient statistics).
- For random forests, if featureSubsetStrategy is not "all," then a subset of candidate
features are chosen for each node. See method selectNodesToSplit().
On worker nodes, via method findBestSplits():
- The worker makes one pass over its subset of instances.
- For each (tree, node, feature, split) tuple, the worker collects statistics about
splitting. Note that the set of (tree, node) pairs is limited to the nodes selected
from the queue for this iteration. The set of features considered can also be limited
based on featureSubsetStrategy.
- For each node, the statistics for that node are aggregated to a particular worker
via reduceByKey(). The designated worker chooses the best (feature, split) pair,
or chooses to stop splitting if the stopping criteria are met.
On the master node:
- The master collects all decisions about splitting nodes and updates the model.
- The updated model is passed to the workers on the next iteration.
This process continues until the node queue is empty.
Most of the methods in this implementation support the statistics aggregation, which is
the heaviest part of the computation. In general, this implementation is bound by either
the cost of statistics computation on workers or by communicating the sufficient statistics.
*/
strategy.assertValid()
require(numTrees > 0, s"RandomForest requires numTrees > 0, but was given numTrees = $numTrees.")
require(RandomForest.supportedFeatureSubsetStrategies.contains(featureSubsetStrategy),
s"RandomForest given invalid featureSubsetStrategy: $featureSubsetStrategy." +
s" Supported values: ${RandomForest.supportedFeatureSubsetStrategies.mkString(", ")}.")
/**
* Method to train a decision tree model over an RDD
* @param input Training data: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]]
* @return a random forest model that can be used for prediction
*/
def run(input: RDD[LabeledPoint]): RandomForestModel = {
val timer = new TimeTracker()
timer.start("total")
timer.start("init")
val retaggedInput = input.retag(classOf[LabeledPoint])
val metadata =
DecisionTreeMetadata.buildMetadata(retaggedInput, strategy, numTrees, featureSubsetStrategy)
logDebug("algo = " + strategy.algo)
logDebug("numTrees = " + numTrees)
logDebug("seed = " + seed)
logDebug("maxBins = " + metadata.maxBins)
logDebug("featureSubsetStrategy = " + featureSubsetStrategy)
logDebug("numFeaturesPerNode = " + metadata.numFeaturesPerNode)
logDebug("subsamplingRate = " + strategy.subsamplingRate)
// Find the splits and the corresponding bins (interval between the splits) using a sample
// of the input data.
timer.start("findSplitsBins")
val (splits, bins) = DecisionTree.findSplitsBins(retaggedInput, metadata)
timer.stop("findSplitsBins")
logDebug("numBins: feature: number of bins")
logDebug(Range(0, metadata.numFeatures).map { featureIndex =>
s"\\t$featureIndex\\t${metadata.numBins(featureIndex)}"
}.mkString("\\n"))
// Bin feature values (TreePoint representation).
// Cache input RDD for speedup during multiple passes.
val treeInput = TreePoint.convertToTreeRDD(retaggedInput, bins, metadata)
val withReplacement = if (numTrees > 1) true else false
val baggedInput
= BaggedPoint.convertToBaggedRDD(treeInput,
strategy.subsamplingRate, numTrees,
withReplacement, seed).persist(StorageLevel.MEMORY_AND_DISK)
// depth of the decision tree
val maxDepth = strategy.maxDepth
require(maxDepth <= 30,
s"DecisionTree currently only supports maxDepth <= 30, but was given maxDepth = $maxDepth.")
// Max memory usage for aggregates
// TODO: Calculate memory usage more precisely.
val maxMemoryUsage: Long = strategy.maxMemoryInMB * 1024L * 1024L
logDebug("max memory usage for aggregates = " + maxMemoryUsage + " bytes.")
val maxMemoryPerNode = {
val featureSubset: Option[Array[Int]] = if (metadata.subsamplingFeatures) {
// Find numFeaturesPerNode largest bins to get an upper bound on memory usage.
Some(metadata.numBins.zipWithIndex.sortBy(- _._1)
.take(metadata.numFeaturesPerNode).map(_._2))
} else {
None
}
RandomForest.aggregateSizeForNode(metadata, featureSubset) * 8L
}
require(maxMemoryPerNode <= maxMemoryUsage,
s"RandomForest/DecisionTree given maxMemoryInMB = ${strategy.maxMemoryInMB}," +
" which is too small for the given features." +
s" Minimum value = ${maxMemoryPerNode / (1024L * 1024L)}")
timer.stop("init")
/*
* The main idea here is to perform group-wise training of the decision tree nodes thus
* reducing the passes over the data from (# nodes) to (# nodes / maxNumberOfNodesPerGroup).
* Each data sample is handled by a particular node (or it reaches a leaf and is not used
* in lower levels).
*/
// Create an RDD of node Id cache.
// At first, all the rows belong to the root nodes (node Id == 1).
val nodeIdCache = if (strategy.useNodeIdCache) {
Some(NodeIdCache.init(
data = baggedInput,
numTrees = numTrees,
checkpointInterval = strategy.checkpointInterval,
initVal = 1))
} else {
None
}
// FIFO queue of nodes to train: (treeIndex, node)
val nodeQueue = new mutable.Queue[(Int, Node)]()
val rng = new scala.util.Random()
rng.setSeed(seed)
// Allocate and queue root nodes.
val topNodes: Array[Node] = Array.fill[Node](numTrees)(Node.emptyNode(nodeIndex = 1))
Range(0, numTrees).foreach(treeIndex => nodeQueue.enqueue((treeIndex, topNodes(treeIndex))))
while (nodeQueue.nonEmpty) {
// Collect some nodes to split, and choose features for each node (if subsampling).
// Each group of nodes may come from one or multiple trees, and at multiple levels.
val (nodesForGroup, treeToNodeToIndexInfo) =
RandomForest.selectNodesToSplit(nodeQueue, maxMemoryUsage, metadata, rng)
// Sanity check (should never occur):
assert(nodesForGroup.size > 0,
s"RandomForest selected empty nodesForGroup. Error for unknown reason.")
// Choose node splits, and enqueue new nodes as needed.
timer.start("findBestSplits")
DecisionTree.findBestSplits(baggedInput, metadata, topNodes, nodesForGroup,
treeToNodeToIndexInfo, splits, bins, nodeQueue, timer, nodeIdCache = nodeIdCache)
timer.stop("findBestSplits")
}
baggedInput.unpersist()
timer.stop("total")
logInfo("Internal timing for DecisionTree:")
logInfo(s"$timer")
// Delete any remaining checkpoints used for node Id cache.
if (nodeIdCache.nonEmpty) {
try {
nodeIdCache.get.deleteAllCheckpoints()
} catch {
case e: IOException =>
logWarning(s"delete all checkpoints failed. Error reason: ${e.getMessage}")
}
}
val trees = topNodes.map(topNode => new DecisionTreeModel(topNode, strategy.algo))
new RandomForestModel(strategy.algo, trees)
}
}
@Since("1.2.0")
object RandomForest extends Serializable with Logging {
/**
* Method to train a decision tree model for binary or multiclass classification.
*
* @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]].
* Labels should take values {0, 1, ..., numClasses-1}.
* @param strategy Parameters for training each tree in the forest.
* @param numTrees Number of trees in the random forest.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "sqrt".
* @param seed Random seed for bootstrapping and choosing feature subsets.
* @return a random forest model that can be used for prediction
*/
@Since("1.2.0")
def trainClassifier(
input: RDD[LabeledPoint],
strategy: Strategy,
numTrees: Int,
featureSubsetStrategy: String,
seed: Int): RandomForestModel = {
require(strategy.algo == Classification,
s"RandomForest.trainClassifier given Strategy with invalid algo: ${strategy.algo}")
val rf = new RandomForest(strategy, numTrees, featureSubsetStrategy, seed)
rf.run(input)
}
/**
* Method to train a decision tree model for binary or multiclass classification.
*
* @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]].
* Labels should take values {0, 1, ..., numClasses-1}.
* @param numClasses number of classes for classification.
* @param categoricalFeaturesInfo Map storing arity of categorical features.
* E.g., an entry (n -> k) indicates that feature n is categorical
* with k categories indexed from 0: {0, 1, ..., k-1}.
* @param numTrees Number of trees in the random forest.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "sqrt".
* @param impurity Criterion used for information gain calculation.
* Supported values: "gini" (recommended) or "entropy".
* @param maxDepth Maximum depth of the tree.
* E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.
* (suggested value: 4)
* @param maxBins maximum number of bins used for splitting features
* (suggested value: 100)
* @param seed Random seed for bootstrapping and choosing feature subsets.
* @return a random forest model that can be used for prediction
*/
@Since("1.2.0")
def trainClassifier(
input: RDD[LabeledPoint],
numClasses: Int,
categoricalFeaturesInfo: Map[Int, Int],
numTrees: Int,
featureSubsetStrategy: String,
impurity: String,
maxDepth: Int,
maxBins: Int,
seed: Int = Utils.random.nextInt()): RandomForestModel = {
val impurityType = Impurities.fromString(impurity)
val strategy = new Strategy(Classification, impurityType, maxDepth,
numClasses, maxBins, Sort, categoricalFeaturesInfo)
trainClassifier(input, strategy, numTrees, featureSubsetStrategy, seed)
}
/**
* Java-friendly API for [[org.apache.spark.mllib.tree.RandomForest$#trainClassifier]]
*/
@Since("1.2.0")
def trainClassifier(
input: JavaRDD[LabeledPoint],
numClasses: Int,
categoricalFeaturesInfo: java.util.Map[java.lang.Integer, java.lang.Integer],
numTrees: Int,
featureSubsetStrategy: String,
impurity: String,
maxDepth: Int,
maxBins: Int,
seed: Int): RandomForestModel = {
trainClassifier(input.rdd, numClasses,
categoricalFeaturesInfo.asInstanceOf[java.util.Map[Int, Int]].asScala.toMap,
numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
}
/**
* Method to train a decision tree model for regression.
*
* @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]].
* Labels are real numbers.
* @param strategy Parameters for training each tree in the forest.
* @param numTrees Number of trees in the random forest.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "onethird".
* @param seed Random seed for bootstrapping and choosing feature subsets.
* @return a random forest model that can be used for prediction
*/
@Since("1.2.0")
def trainRegressor(
input: RDD[LabeledPoint],
strategy: Strategy,
numTrees: Int,
featureSubsetStrategy: String,
seed: Int): RandomForestModel = {
require(strategy.algo == Regression,
s"RandomForest.trainRegressor given Strategy with invalid algo: ${strategy.algo}")
val rf = new RandomForest(strategy, numTrees, featureSubsetStrategy, seed)
rf.run(input)
}
/**
* Method to train a decision tree model for regression.
*
* @param input Training dataset: RDD of [[org.apache.spark.mllib.regression.LabeledPoint]].
* Labels are real numbers.
* @param categoricalFeaturesInfo Map storing arity of categorical features.
* E.g., an entry (n -> k) indicates that feature n is categorical
* with k categories indexed from 0: {0, 1, ..., k-1}.
* @param numTrees Number of trees in the random forest.
* @param featureSubsetStrategy Number of features to consider for splits at each node.
* Supported: "auto", "all", "sqrt", "log2", "onethird".
* If "auto" is set, this parameter is set based on numTrees:
* if numTrees == 1, set to "all";
* if numTrees > 1 (forest) set to "onethird".
* @param impurity Criterion used for information gain calculation.
* Supported values: "variance".
* @param maxDepth Maximum depth of the tree.
* E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.
* (suggested value: 4)
* @param maxBins maximum number of bins used for splitting features
* (suggested value: 100)
* @param seed Random seed for bootstrapping and choosing feature subsets.
* @return a random forest model that can be used for prediction
*/
@Since("1.2.0")
def trainRegressor(
input: RDD[LabeledPoint],
categoricalFeaturesInfo: Map[Int, Int],
numTrees: Int,
featureSubsetStrategy: String,
impurity: String,
maxDepth: Int,
maxBins: Int,
seed: Int = Utils.random.nextInt()): RandomForestModel = {
val impurityType = Impurities.fromString(impurity)
val strategy = new Strategy(Regression, impurityType, maxDepth,
0, maxBins, Sort, categoricalFeaturesInfo)
trainRegressor(input, strategy, numTrees, featureSubsetStrategy, seed)
}
/**
* Java-friendly API for [[org.apache.spark.mllib.tree.RandomForest$#trainRegressor]]
*/
@Since("1.2.0")
def trainRegressor(
input: JavaRDD[LabeledPoint],
categoricalFeaturesInfo: java.util.Map[java.lang.Integer, java.lang.Integer],
numTrees: Int,
featureSubsetStrategy: String,
impurity: String,
maxDepth: Int,
maxBins: Int,
seed: Int): RandomForestModel = {
trainRegressor(input.rdd,
categoricalFeaturesInfo.asInstanceOf[java.util.Map[Int, Int]].asScala.toMap,
numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
}
/**
* List of supported feature subset sampling strategies.
*/
@Since("1.2.0")
val supportedFeatureSubsetStrategies: Array[String] =
Array("auto", "all", "sqrt", "log2", "onethird")
private[tree] class NodeIndexInfo(
val nodeIndexInGroup: Int,
val featureSubset: Option[Array[Int]]) extends Serializable
/**
* Pull nodes off of the queue, and collect a group of nodes to be split on this iteration.
* This tracks the memory usage for aggregates and stops adding nodes when too much memory
* will be needed; this allows an adaptive number of nodes since different nodes may require
* different amounts of memory (if featureSubsetStrategy is not "all").
*
* @param nodeQueue Queue of nodes to split.
* @param maxMemoryUsage Bound on size of aggregate statistics.
* @return (nodesForGroup, treeToNodeToIndexInfo).
* nodesForGroup holds the nodes to split: treeIndex --> nodes in tree.
*
* treeToNodeToIndexInfo holds indices selected features for each node:
* treeIndex --> (global) node index --> (node index in group, feature indices).
* The (global) node index is the index in the tree; the node index in group is the
* index in [0, numNodesInGroup) of the node in this group.
* The feature indices are None if not subsampling features.
*/
private[tree] def selectNodesToSplit(
nodeQueue: mutable.Queue[(Int, Node)],
maxMemoryUsage: Long,
metadata: DecisionTreeMetadata,
rng: scala.util.Random): (Map[Int, Array[Node]], Map[Int, Map[Int, NodeIndexInfo]]) = {
// Collect some nodes to split:
// nodesForGroup(treeIndex) = nodes to split
val mutableNodesForGroup = new mutable.HashMap[Int, mutable.ArrayBuffer[Node]]()
val mutableTreeToNodeToIndexInfo =
new mutable.HashMap[Int, mutable.HashMap[Int, NodeIndexInfo]]()
var memUsage: Long = 0L
var numNodesInGroup = 0
while (nodeQueue.nonEmpty && memUsage < maxMemoryUsage) {
val (treeIndex, node) = nodeQueue.head
// Choose subset of features for node (if subsampling).
val featureSubset: Option[Array[Int]] = if (metadata.subsamplingFeatures) {
Some(SamplingUtils.reservoirSampleAndCount(Range(0,
metadata.numFeatures).iterator, metadata.numFeaturesPerNode, rng.nextLong)._1)
} else {
None
}
// Check if enough memory remains to add this node to the group.
val nodeMemUsage = RandomForest.aggregateSizeForNode(metadata, featureSubset) * 8L
if (memUsage + nodeMemUsage <= maxMemoryUsage) {
nodeQueue.dequeue()
mutableNodesForGroup.getOrElseUpdate(treeIndex, new mutable.ArrayBuffer[Node]()) += node
mutableTreeToNodeToIndexInfo
.getOrElseUpdate(treeIndex, new mutable.HashMap[Int, NodeIndexInfo]())(node.id)
= new NodeIndexInfo(numNodesInGroup, featureSubset)
}
numNodesInGroup += 1
memUsage += nodeMemUsage
}
// Convert mutable maps to immutable ones.
val nodesForGroup: Map[Int, Array[Node]] = mutableNodesForGroup.mapValues(_.toArray).toMap
val treeToNodeToIndexInfo = mutableTreeToNodeToIndexInfo.mapValues(_.toMap).toMap
(nodesForGroup, treeToNodeToIndexInfo)
}
/**
* Get the number of values to be stored for this node in the bin aggregates.
* @param featureSubset Indices of features which may be split at this node.
* If None, then use all features.
*/
private[tree] def aggregateSizeForNode(
metadata: DecisionTreeMetadata,
featureSubset: Option[Array[Int]]): Long = {
val totalBins = if (featureSubset.nonEmpty) {
featureSubset.get.map(featureIndex => metadata.numBins(featureIndex).toLong).sum
} else {
metadata.numBins.map(_.toLong).sum
}
if (metadata.isClassification) {
metadata.numClasses * totalBins
} else {
3 * totalBins
}
}
}
|
practice-vishnoi/dev-spark-1
|
mllib/src/main/scala/org/apache/spark/mllib/tree/RandomForest.scala
|
Scala
|
apache-2.0
| 24,380
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import java.io.{ByteArrayOutputStream, CharConversionException}
import java.nio.charset.MalformedInputException
import scala.collection.mutable.ArrayBuffer
import scala.util.control.NonFatal
import com.fasterxml.jackson.core._
import org.apache.spark.SparkUpgradeException
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.{InternalRow, NoopFilters, StructFilters}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.catalyst.util.LegacyDateFormats.FAST_DATE_FORMAT
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
import org.apache.spark.util.Utils
/**
* Constructs a parser for a given schema that translates a json string to an [[InternalRow]].
*/
class JacksonParser(
schema: DataType,
val options: JSONOptions,
allowArrayAsStructs: Boolean,
filters: Seq[Filter] = Seq.empty) extends Logging {
import JacksonUtils._
import com.fasterxml.jackson.core.JsonToken._
// A `ValueConverter` is responsible for converting a value from `JsonParser`
// to a value in a field for `InternalRow`.
private type ValueConverter = JsonParser => AnyRef
// `ValueConverter`s for the root schema for all fields in the schema
private val rootConverter = makeRootConverter(schema)
private val factory = options.buildJsonFactory()
private lazy val timestampFormatter = TimestampFormatter(
options.timestampFormat,
options.zoneId,
options.locale,
legacyFormat = FAST_DATE_FORMAT,
isParsing = true)
private lazy val dateFormatter = DateFormatter(
options.dateFormat,
options.zoneId,
options.locale,
legacyFormat = FAST_DATE_FORMAT,
isParsing = true)
/**
* Create a converter which converts the JSON documents held by the `JsonParser`
* to a value according to a desired schema. This is a wrapper for the method
* `makeConverter()` to handle a row wrapped with an array.
*/
private def makeRootConverter(dt: DataType): JsonParser => Iterable[InternalRow] = {
dt match {
case st: StructType => makeStructRootConverter(st)
case mt: MapType => makeMapRootConverter(mt)
case at: ArrayType => makeArrayRootConverter(at)
}
}
private def makeStructRootConverter(st: StructType): JsonParser => Iterable[InternalRow] = {
val elementConverter = makeConverter(st)
val fieldConverters = st.map(_.dataType).map(makeConverter).toArray
val jsonFilters = if (SQLConf.get.jsonFilterPushDown) {
new JsonFilters(filters, st)
} else {
new NoopFilters
}
(parser: JsonParser) => parseJsonToken[Iterable[InternalRow]](parser, st) {
case START_OBJECT => convertObject(parser, st, fieldConverters, jsonFilters, isRoot = true)
// SPARK-3308: support reading top level JSON arrays and take every element
// in such an array as a row
//
// For example, we support, the JSON data as below:
//
// [{"a":"str_a_1"}]
// [{"a":"str_a_2"}, {"b":"str_b_3"}]
//
// resulting in:
//
// List([str_a_1,null])
// List([str_a_2,null], [null,str_b_3])
//
case START_ARRAY if allowArrayAsStructs =>
val array = convertArray(parser, elementConverter)
// Here, as we support reading top level JSON arrays and take every element
// in such an array as a row, this case is possible.
if (array.numElements() == 0) {
Array.empty[InternalRow]
} else {
array.toArray[InternalRow](schema)
}
case START_ARRAY =>
throw new RuntimeException("Parsing JSON arrays as structs is forbidden.")
}
}
private def makeMapRootConverter(mt: MapType): JsonParser => Iterable[InternalRow] = {
val fieldConverter = makeConverter(mt.valueType)
(parser: JsonParser) => parseJsonToken[Iterable[InternalRow]](parser, mt) {
case START_OBJECT => Some(InternalRow(convertMap(parser, fieldConverter)))
}
}
private def makeArrayRootConverter(at: ArrayType): JsonParser => Iterable[InternalRow] = {
val elemConverter = makeConverter(at.elementType)
(parser: JsonParser) => parseJsonToken[Iterable[InternalRow]](parser, at) {
case START_ARRAY => Some(InternalRow(convertArray(parser, elemConverter)))
case START_OBJECT if at.elementType.isInstanceOf[StructType] =>
// This handles the case when an input JSON object is a structure but
// the specified schema is an array of structures. In that case, the input JSON is
// considered as an array of only one element of struct type.
// This behavior was introduced by changes for SPARK-19595.
//
// For example, if the specified schema is ArrayType(new StructType().add("i", IntegerType))
// and JSON input as below:
//
// [{"i": 1}, {"i": 2}]
// [{"i": 3}]
// {"i": 4}
//
// The last row is considered as an array with one element, and result of conversion:
//
// Seq(Row(1), Row(2))
// Seq(Row(3))
// Seq(Row(4))
//
val st = at.elementType.asInstanceOf[StructType]
val fieldConverters = st.map(_.dataType).map(makeConverter).toArray
Some(InternalRow(new GenericArrayData(convertObject(parser, st, fieldConverters).toArray)))
}
}
private val decimalParser = ExprUtils.getDecimalParser(options.locale)
/**
* Create a converter which converts the JSON documents held by the `JsonParser`
* to a value according to a desired schema.
*/
def makeConverter(dataType: DataType): ValueConverter = dataType match {
case BooleanType =>
(parser: JsonParser) => parseJsonToken[java.lang.Boolean](parser, dataType) {
case VALUE_TRUE => true
case VALUE_FALSE => false
}
case ByteType =>
(parser: JsonParser) => parseJsonToken[java.lang.Byte](parser, dataType) {
case VALUE_NUMBER_INT => parser.getByteValue
}
case ShortType =>
(parser: JsonParser) => parseJsonToken[java.lang.Short](parser, dataType) {
case VALUE_NUMBER_INT => parser.getShortValue
}
case IntegerType =>
(parser: JsonParser) => parseJsonToken[java.lang.Integer](parser, dataType) {
case VALUE_NUMBER_INT => parser.getIntValue
}
case LongType =>
(parser: JsonParser) => parseJsonToken[java.lang.Long](parser, dataType) {
case VALUE_NUMBER_INT => parser.getLongValue
}
case FloatType =>
(parser: JsonParser) => parseJsonToken[java.lang.Float](parser, dataType) {
case VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT =>
parser.getFloatValue
case VALUE_STRING if parser.getTextLength >= 1 =>
// Special case handling for NaN and Infinity.
parser.getText match {
case "NaN" => Float.NaN
case "Infinity" => Float.PositiveInfinity
case "-Infinity" => Float.NegativeInfinity
case other => throw new RuntimeException(
s"Cannot parse $other as ${FloatType.catalogString}.")
}
}
case DoubleType =>
(parser: JsonParser) => parseJsonToken[java.lang.Double](parser, dataType) {
case VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT =>
parser.getDoubleValue
case VALUE_STRING if parser.getTextLength >= 1 =>
// Special case handling for NaN and Infinity.
parser.getText match {
case "NaN" => Double.NaN
case "Infinity" => Double.PositiveInfinity
case "-Infinity" => Double.NegativeInfinity
case other =>
throw new RuntimeException(s"Cannot parse $other as ${DoubleType.catalogString}.")
}
}
case StringType =>
(parser: JsonParser) => parseJsonToken[UTF8String](parser, dataType) {
case VALUE_STRING =>
UTF8String.fromString(parser.getText)
case _ =>
// Note that it always tries to convert the data as string without the case of failure.
val writer = new ByteArrayOutputStream()
Utils.tryWithResource(factory.createGenerator(writer, JsonEncoding.UTF8)) {
generator => generator.copyCurrentStructure(parser)
}
UTF8String.fromBytes(writer.toByteArray)
}
case TimestampType =>
(parser: JsonParser) => parseJsonToken[java.lang.Long](parser, dataType) {
case VALUE_STRING if parser.getTextLength >= 1 =>
try {
timestampFormatter.parse(parser.getText)
} catch {
case NonFatal(e) =>
// If fails to parse, then tries the way used in 2.0 and 1.x for backwards
// compatibility.
val str = DateTimeUtils.cleanLegacyTimestampStr(UTF8String.fromString(parser.getText))
DateTimeUtils.stringToTimestamp(str, options.zoneId).getOrElse(throw e)
}
case VALUE_NUMBER_INT =>
parser.getLongValue * 1000000L
}
case DateType =>
(parser: JsonParser) => parseJsonToken[java.lang.Integer](parser, dataType) {
case VALUE_STRING if parser.getTextLength >= 1 =>
try {
dateFormatter.parse(parser.getText)
} catch {
case NonFatal(e) =>
// If fails to parse, then tries the way used in 2.0 and 1.x for backwards
// compatibility.
val str = DateTimeUtils.cleanLegacyTimestampStr(UTF8String.fromString(parser.getText))
DateTimeUtils.stringToDate(str, options.zoneId).getOrElse {
// In Spark 1.5.0, we store the data as number of days since epoch in string.
// So, we just convert it to Int.
try {
RebaseDateTime.rebaseJulianToGregorianDays(parser.getText.toInt)
} catch {
case _: NumberFormatException => throw e
}
}.asInstanceOf[Integer]
}
}
case BinaryType =>
(parser: JsonParser) => parseJsonToken[Array[Byte]](parser, dataType) {
case VALUE_STRING => parser.getBinaryValue
}
case dt: DecimalType =>
(parser: JsonParser) => parseJsonToken[Decimal](parser, dataType) {
case (VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT) =>
Decimal(parser.getDecimalValue, dt.precision, dt.scale)
case VALUE_STRING if parser.getTextLength >= 1 =>
val bigDecimal = decimalParser(parser.getText)
Decimal(bigDecimal, dt.precision, dt.scale)
}
case CalendarIntervalType => (parser: JsonParser) =>
parseJsonToken[CalendarInterval](parser, dataType) {
case VALUE_STRING =>
IntervalUtils.safeStringToInterval(UTF8String.fromString(parser.getText))
}
case st: StructType =>
val fieldConverters = st.map(_.dataType).map(makeConverter).toArray
(parser: JsonParser) => parseJsonToken[InternalRow](parser, dataType) {
case START_OBJECT => convertObject(parser, st, fieldConverters).get
}
case at: ArrayType =>
val elementConverter = makeConverter(at.elementType)
(parser: JsonParser) => parseJsonToken[ArrayData](parser, dataType) {
case START_ARRAY => convertArray(parser, elementConverter)
}
case mt: MapType =>
val valueConverter = makeConverter(mt.valueType)
(parser: JsonParser) => parseJsonToken[MapData](parser, dataType) {
case START_OBJECT => convertMap(parser, valueConverter)
}
case udt: UserDefinedType[_] =>
makeConverter(udt.sqlType)
case _ =>
(parser: JsonParser) =>
// Here, we pass empty `PartialFunction` so that this case can be
// handled as a failed conversion. It will throw an exception as
// long as the value is not null.
parseJsonToken[AnyRef](parser, dataType)(PartialFunction.empty[JsonToken, AnyRef])
}
/**
* This method skips `FIELD_NAME`s at the beginning, and handles nulls ahead before trying
* to parse the JSON token using given function `f`. If the `f` failed to parse and convert the
* token, call `failedConversion` to handle the token.
*/
private def parseJsonToken[R >: Null](
parser: JsonParser,
dataType: DataType)(f: PartialFunction[JsonToken, R]): R = {
parser.getCurrentToken match {
case FIELD_NAME =>
// There are useless FIELD_NAMEs between START_OBJECT and END_OBJECT tokens
parser.nextToken()
parseJsonToken[R](parser, dataType)(f)
case null | VALUE_NULL => null
case other => f.applyOrElse(other, failedConversion(parser, dataType))
}
}
private val allowEmptyString = SQLConf.get.getConf(SQLConf.LEGACY_ALLOW_EMPTY_STRING_IN_JSON)
/**
* This function throws an exception for failed conversion. For empty string on data types
* except for string and binary types, this also throws an exception.
*/
private def failedConversion[R >: Null](
parser: JsonParser,
dataType: DataType): PartialFunction[JsonToken, R] = {
// SPARK-25040: Disallows empty strings for data types except for string and binary types.
// But treats empty strings as null for certain types if the legacy config is enabled.
case VALUE_STRING if parser.getTextLength < 1 && allowEmptyString =>
dataType match {
case FloatType | DoubleType | TimestampType | DateType =>
throw new RuntimeException(
s"Failed to parse an empty string for data type ${dataType.catalogString}")
case _ => null
}
case VALUE_STRING if parser.getTextLength < 1 =>
throw new RuntimeException(
s"Failed to parse an empty string for data type ${dataType.catalogString}")
case token =>
// We cannot parse this token based on the given data type. So, we throw a
// RuntimeException and this exception will be caught by `parse` method.
throw new RuntimeException(
s"Failed to parse a value for data type ${dataType.catalogString} (current token: $token).")
}
/**
* Parse an object from the token stream into a new Row representing the schema.
* Fields in the json that are not defined in the requested schema will be dropped.
*/
private def convertObject(
parser: JsonParser,
schema: StructType,
fieldConverters: Array[ValueConverter],
structFilters: StructFilters = new NoopFilters(),
isRoot: Boolean = false): Option[InternalRow] = {
val row = new GenericInternalRow(schema.length)
var badRecordException: Option[Throwable] = None
var skipRow = false
structFilters.reset()
while (!skipRow && nextUntil(parser, JsonToken.END_OBJECT)) {
schema.getFieldIndex(parser.getCurrentName) match {
case Some(index) =>
try {
row.update(index, fieldConverters(index).apply(parser))
skipRow = structFilters.skipRow(row, index)
} catch {
case e: SparkUpgradeException => throw e
case NonFatal(e) if isRoot =>
badRecordException = badRecordException.orElse(Some(e))
parser.skipChildren()
}
case None =>
parser.skipChildren()
}
}
if (skipRow) {
None
} else if (badRecordException.isEmpty) {
Some(row)
} else {
throw PartialResultException(row, badRecordException.get)
}
}
/**
* Parse an object as a Map, preserving all fields.
*/
private def convertMap(
parser: JsonParser,
fieldConverter: ValueConverter): MapData = {
val keys = ArrayBuffer.empty[UTF8String]
val values = ArrayBuffer.empty[Any]
while (nextUntil(parser, JsonToken.END_OBJECT)) {
keys += UTF8String.fromString(parser.getCurrentName)
values += fieldConverter.apply(parser)
}
// The JSON map will never have null or duplicated map keys, it's safe to create a
// ArrayBasedMapData directly here.
ArrayBasedMapData(keys.toArray, values.toArray)
}
/**
* Parse an object as a Array.
*/
private def convertArray(
parser: JsonParser,
fieldConverter: ValueConverter): ArrayData = {
val values = ArrayBuffer.empty[Any]
while (nextUntil(parser, JsonToken.END_ARRAY)) {
values += fieldConverter.apply(parser)
}
new GenericArrayData(values.toArray)
}
/**
* Parse the JSON input to the set of [[InternalRow]]s.
*
* @param recordLiteral an optional function that will be used to generate
* the corrupt record text instead of record.toString
*/
def parse[T](
record: T,
createParser: (JsonFactory, T) => JsonParser,
recordLiteral: T => UTF8String): Iterable[InternalRow] = {
try {
Utils.tryWithResource(createParser(factory, record)) { parser =>
// a null first token is equivalent to testing for input.trim.isEmpty
// but it works on any token stream and not just strings
parser.nextToken() match {
case null => None
case _ => rootConverter.apply(parser) match {
case null => throw new RuntimeException("Root converter returned null")
case rows => rows.toSeq
}
}
}
} catch {
case e: SparkUpgradeException => throw e
case e @ (_: RuntimeException | _: JsonProcessingException | _: MalformedInputException) =>
// JSON parser currently doesn't support partial results for corrupted records.
// For such records, all fields other than the field configured by
// `columnNameOfCorruptRecord` are set to `null`.
throw BadRecordException(() => recordLiteral(record), () => None, e)
case e: CharConversionException if options.encoding.isEmpty =>
val msg =
"""JSON parser cannot handle a character in its input.
|Specifying encoding as an input option explicitly might help to resolve the issue.
|""".stripMargin + e.getMessage
val wrappedCharException = new CharConversionException(msg)
wrappedCharException.initCause(e)
throw BadRecordException(() => recordLiteral(record), () => None, wrappedCharException)
case PartialResultException(row, cause) =>
throw BadRecordException(
record = () => recordLiteral(record),
partialResult = () => Some(row),
cause)
}
}
}
|
witgo/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonParser.scala
|
Scala
|
apache-2.0
| 19,373
|
/* Copyright 2009-2018 EPFL, Lausanne */
package inox
package ast
import scala.collection.mutable.{Map => MutableMap, Set => MutableSet}
trait Types { self: Trees =>
trait Typed extends Printable {
def getType(implicit s: Symbols): Type
def isTyped(implicit s: Symbols): Boolean = getType != Untyped
}
protected trait CachingTyped extends Typed {
private[this] var cache: (Symbols, Type) = (null, null)
final def getType(implicit s: Symbols): Type = {
val (symbols, tpe) = cache
if (s eq symbols) tpe else {
val tpe = computeType
cache = s -> tpe
tpe
}
}
protected def computeType(implicit s: Symbols): Type
}
protected def unveilUntyped(tpe: Type): Type = {
val NAryType(tps, _) = tpe
if (tps exists (_ == Untyped)) Untyped else tpe
}
abstract class Type extends Tree with Typed {
private[this] var simple: Boolean = false
private[this] var cache: (Symbols, Type) = (null, null)
private def setSimple(): this.type = { simple = true; this }
final def getType(implicit s: Symbols): Type = {
if (simple) this else {
val (symbols, tpe) = cache
if (s eq symbols) tpe else {
val tpe = computeType
cache = s -> tpe.setSimple()
tpe
}
}
}
protected def computeType(implicit s: Symbols): Type = {
val NAryType(tps, recons) = this
unveilUntyped(recons(tps.map(_.getType)))
}
}
case object Untyped extends Type
case class BooleanType() extends Type
case class UnitType() extends Type
case class CharType() extends Type
case class IntegerType() extends Type
case class RealType() extends Type
case class StringType() extends Type
sealed case class BVType(signed: Boolean, size: Int) extends Type
abstract class BVTypeExtractor(signed: Boolean, size: Int) {
def apply(): BVType = BVType(signed, size)
def unapply(tpe: BVType): Boolean = tpe.signed == signed && tpe.size == size
}
object Int8Type extends BVTypeExtractor(true, 8)
object Int16Type extends BVTypeExtractor(true, 16)
object Int32Type extends BVTypeExtractor(true, 32)
object Int64Type extends BVTypeExtractor(true, 64)
sealed case class TypeParameter(id: Identifier, flags: Seq[Flag]) extends Type {
def freshen = TypeParameter(id.freshen, flags).copiedFrom(this)
override def equals(that: Any) = that match {
case tp: TypeParameter => id == tp.id
case _ => false
}
override def hashCode = id.hashCode
}
object TypeParameter {
def fresh(name: String, flags: Seq[Flag] = Seq.empty) =
TypeParameter(FreshIdentifier(name), flags)
}
/*
* If you are not sure about the requirement,
* you should use tupleTypeWrap in purescala.Constructors
*/
sealed case class TupleType(bases: Seq[Type]) extends Type {
val dimension: Int = bases.length
require(dimension >= 2)
}
sealed case class SetType(base: Type) extends Type
sealed case class BagType(base: Type) extends Type
sealed case class MapType(from: Type, to: Type) extends Type
sealed case class FunctionType(from: Seq[Type], to: Type) extends Type
sealed case class ADTType(id: Identifier, tps: Seq[Type]) extends Type {
def lookupSort(implicit s: Symbols): Option[TypedADTSort] = s.lookupSort(id, tps)
def getSort(implicit s: Symbols): TypedADTSort = s.getSort(id, tps)
def getField(selector: Identifier)(implicit s: Symbols): Option[ValDef] = lookupSort match {
case Some(tsort: TypedADTSort) =>
tsort.constructors.flatMap(_.fields).collectFirst {
case vd @ ValDef(`selector`, _, _) => vd
}
case _ => None
}
}
/* Dependent Types */
private object TypeNormalization {
private class TypeNormalizer extends SelfTreeTransformer {
private val subst: MutableMap[Variable, Variable] = MutableMap.empty
private var counter: Int = 0
override def transform(expr: Expr): Expr = expr match {
case v: Variable => subst.getOrElse(v, v)
case _ => super.transform(expr)
}
override def transform(vd: ValDef): ValDef = {
val nid = new Identifier("x", counter, counter, false)
counter += 1
val newVd = ValDef(nid, transform(vd.tpe), vd.flags map transform).copiedFrom(vd)
subst(vd.toVariable) = newVd.toVariable
newVd
}
}
def apply[T <: Type](tpe: T): T = (new TypeNormalizer).transform(tpe).asInstanceOf[T]
}
protected sealed trait TypeNormalization { self: Type with Product =>
@inline
private final def elements: List[Any] = _elements.get
private[this] val _elements: utils.Lazy[List[Any]] = utils.Lazy({
// @nv: note that we can't compare `normalized` directly as we are
// overriding the `equals` method and this would lead to non-termination.
val normalized = TypeNormalization(this)
normalized.productIterator.toList
})
protected final def same(that: TypeNormalization): Boolean = elements == that.elements
private[this] val _code: utils.Lazy[Int] = utils.Lazy(elements.hashCode)
protected final def code: Int = _code.get
}
sealed case class PiType(params: Seq[ValDef], to: Type) extends Type with TypeNormalization {
require(params.nonEmpty)
override protected def computeType(implicit s: Symbols): Type =
unveilUntyped(FunctionType(params.map(_.getType), to.getType))
override def hashCode: Int = 31 * code
override def equals(that: Any): Boolean = that match {
case pi: PiType => this same pi
case _ => false
}
}
sealed case class SigmaType(params: Seq[ValDef], to: Type) extends Type with TypeNormalization {
require(params.nonEmpty)
override protected def computeType(implicit s: Symbols): Type =
unveilUntyped(TupleType(params.map(_.getType) :+ to.getType))
override def hashCode: Int = 53 * code
override def equals(that: Any): Boolean = that match {
case sigma: SigmaType => this same sigma
case _ => false
}
}
sealed case class RefinementType(vd: ValDef, prop: Expr) extends Type with TypeNormalization {
override protected def computeType(implicit s: Symbols): Type =
checkParamType(prop, BooleanType(), vd.getType)
override def hashCode: Int = 79 * code
override def equals(that: Any): Boolean = that match {
case ref: RefinementType => this same ref
case _ => false
}
}
/* Utility methods for type checking */
protected final def checkParamType(real: Typed, formal: Typed, result: => Type)(implicit s: Symbols) = {
if (s.isSubtypeOf(real.getType, formal.getType)) result.getType else Untyped
}
protected final def checkParamTypes(real: Seq[Typed], formal: Seq[Typed], result: => Type)(implicit s: Symbols) = {
if (
real.size == formal.size &&
(real zip formal forall (p => s.isSubtypeOf(p._1.getType, p._2.getType)))
) result.getType else Untyped
}
protected final def checkAllTypes(real: Seq[Typed], formal: Typed, result: => Type)(implicit s: Symbols) = {
checkParamTypes(real, List.fill(real.size)(formal), result)
}
protected implicit class TypeWrapper(tpe: Type) {
def orElse(other: => Type): Type = if (tpe == Untyped) other else tpe
}
/* Override points for supporting more complex types */
protected final def getIntegerType(tpe: Typed, tpes: Typed*)(implicit s: Symbols): Type =
checkAllTypes(tpe +: tpes, IntegerType(), IntegerType())
protected final def getRealType(tpe: Typed, tpes: Typed*)(implicit s: Symbols): Type =
checkAllTypes(tpe +: tpes, RealType(), RealType())
protected def getBVType(tpe: Typed, tpes: Typed*)(implicit s: Symbols): Type = tpe.getType match {
case bv: BVType => checkAllTypes(tpes, bv, bv)
case _ => Untyped
}
protected final def getCharType(tpe: Typed, tpes: Typed*)(implicit s: Symbols): Type =
checkAllTypes(tpe +: tpes, CharType(), CharType())
protected def getADTType(tpe: Typed, tpes: Typed*)(implicit s: Symbols): Type = tpe.getType match {
case adt: ADTType => checkAllTypes(tpes, adt, adt)
case _ => Untyped
}
protected def getTupleType(tpe: Typed, tpes: Typed*)(implicit s: Symbols): Type = tpe.getType match {
case tt: TupleType => checkAllTypes(tpes, tt, tt)
case _ => Untyped
}
protected def getSetType(tpe: Typed, tpes: Typed*)(implicit s: Symbols): Type = tpe.getType match {
case st: SetType => checkAllTypes(tpes, st, st)
case _ => Untyped
}
protected def getBagType(tpe: Typed, tpes: Typed*)(implicit s: Symbols): Type = tpe.getType match {
case bt: BagType => checkAllTypes(tpes, bt, bt)
case _ => Untyped
}
protected def getMapType(tpe: Typed, tpes: Typed*)(implicit s: Symbols): Type = tpe.getType match {
case mt: MapType => checkAllTypes(tpes, mt, mt)
case _ => Untyped
}
/** NAryType extractor to extract any Type in a consistent way.
*
* @see [[Deconstructors.Operator]] about why we can't have nice(r) things
*/
object NAryType extends {
protected val s: self.type = self
protected val t: self.type = self
} with TreeExtractor {
type Source = Type
type Target = Type
def unapply(t: Type): Option[(Seq[Type], Seq[Type] => Type)] = {
val (ids, vs, es, tps, flags, recons) = deconstructor.deconstruct(t)
Some((tps, tps => recons(ids, vs, es, tps, flags)))
}
}
object typeOps extends {
protected val sourceTrees: self.type = self
protected val targetTrees: self.type = self
} with GenTreeOps {
type Source = self.Type
type Target = self.Type
lazy val Deconstructor = NAryType
// Helper for typeParamsOf
class TypeCollector extends SelfTreeTraverser {
private[this] val typeParams: MutableSet[TypeParameter] = MutableSet.empty
def getResult: Set[TypeParameter] = typeParams.toSet
override def traverse(tpe: Type): Unit = tpe match {
case tp: TypeParameter => typeParams += tp
case _ => super.traverse(tpe)
}
}
def typeParamsOf(e: Expr): Set[TypeParameter] = {
val collector = new TypeCollector
collector.traverse(e)
collector.getResult
}
def typeParamsOf(t: Type): Set[TypeParameter] = {
val collector = new TypeCollector
collector.traverse(t)
collector.getResult
}
// Helpers for instantiateType
class TypeInstantiator(tps: Map[TypeParameter, Type]) extends SelfTreeTransformer {
override def transform(tpe: Type): Type = tpe match {
case tp: TypeParameter => tps.getOrElse(tp, super.transform(tpe))
case _ => super.transform(tpe)
}
}
def instantiateType(tpe: Type, tps: Map[TypeParameter, Type]): Type = {
if (tps.isEmpty) {
tpe
} else {
new TypeInstantiator(tps).transform(tpe)
}
}
def instantiateType(e: Expr, tps: Map[TypeParameter, Type]): Expr = {
if (tps.isEmpty) {
e
} else {
new TypeInstantiator(tps).transform(e)
}
}
def isParametricType(tpe: Type): Boolean = tpe match {
case (tp: TypeParameter) => true
case NAryType(tps, builder) => tps.exists(isParametricType)
}
def replaceFromSymbols[V <: VariableSymbol](subst: Map[V, Expr], tpe: Type)
(implicit ev: VariableConverter[V]): Type = {
new SelfTreeTransformer {
override def transform(expr: Expr): Expr = expr match {
case v: Variable => subst.getOrElse(v.to[V], v)
case _ => super.transform(expr)
}
}.transform(tpe)
}
def variablesOf(tpe: Type): Set[Variable] = tpe match {
case PiType(params, to) =>
params.foldRight(variablesOf(to)) {
case (vd, vars) => vars - vd.toVariable ++ variablesOf(vd.tpe)
}
case SigmaType(params, to) =>
params.foldRight(variablesOf(to)) {
case (vd, vars) => vars - vd.toVariable ++ variablesOf(vd.tpe)
}
case RefinementType(vd, pred) =>
exprOps.variablesOf(pred) - vd.toVariable ++ variablesOf(vd.tpe)
case NAryType(tpes, _) => tpes.flatMap(variablesOf).toSet
}
class TypeSimplifier(implicit symbols: Symbols) extends SelfTreeTransformer {
override def transform(tpe: Type): Type = tpe match {
case (_: PiType | _: SigmaType | _: FunctionType) => tpe.getType
case _ => super.transform(tpe)
}
}
def simplify(expr: Expr)(implicit symbols: Symbols): Expr = new TypeSimplifier().transform(expr)
}
}
|
romac/inox
|
src/main/scala/inox/ast/Types.scala
|
Scala
|
apache-2.0
| 12,598
|
package com.snowplowanalytics.snowplow.storage.kinesis.redshift.limiter
/**
* Created by denismo on 18/09/15.
*/
trait FlushLimiter {
def isFlushRequired: Boolean
def flushed(writeStart: Long, writeEnd: Long, flushCount: Long)
def onRecord(values: Array[String])
}
|
jramos/kinesis-redshift-sink
|
src/main/scala/com.snowplowanalytics.snowplow.storage.kinesis/redshift/limiter/FlushLimiter.scala
|
Scala
|
apache-2.0
| 274
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.admin.AdminUtils
import kafka.api._
import kafka.message._
import kafka.network._
import kafka.log._
import kafka.utils.ZKGroupTopicDirs
import scala.collection._
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic._
import kafka.metrics.KafkaMetricsGroup
import org.I0Itec.zkclient.ZkClient
import kafka.common._
import kafka.utils.{ZkUtils, Pool, SystemTime, Logging}
import kafka.network.RequestChannel.Response
import kafka.cluster.Broker
import kafka.controller.KafkaController
/**
* Logic to handle the various Kafka requests
*/
class KafkaApis(val requestChannel: RequestChannel,
val replicaManager: ReplicaManager,
val zkClient: ZkClient,
val brokerId: Int,
val config: KafkaConfig,
val controller: KafkaController) extends Logging {
private val producerRequestPurgatory =
new ProducerRequestPurgatory(replicaManager.config.producerPurgatoryPurgeIntervalRequests)
private val fetchRequestPurgatory =
new FetchRequestPurgatory(requestChannel, replicaManager.config.fetchPurgatoryPurgeIntervalRequests)
private val delayedRequestMetrics = new DelayedRequestMetrics
/* following 3 data structures are updated by the update metadata request
* and is queried by the topic metadata request. */
var metadataCache: mutable.Map[TopicAndPartition, PartitionStateInfo] =
new mutable.HashMap[TopicAndPartition, PartitionStateInfo]()
private val aliveBrokers: mutable.Map[Int, Broker] = new mutable.HashMap[Int, Broker]()
private val partitionMetadataLock = new Object
this.logIdent = "[KafkaApi-%d] ".format(brokerId)
/**
* Top-level method that handles all requests and multiplexes to the right api
*/
def handle(request: RequestChannel.Request) {
try{
trace("Handling request: " + request.requestObj + " from client: " + request.remoteAddress)
request.requestId match {
case RequestKeys.ProduceKey => handleProducerRequest(request)
case RequestKeys.FetchKey => handleFetchRequest(request)
case RequestKeys.OffsetsKey => handleOffsetRequest(request)
case RequestKeys.MetadataKey => handleTopicMetadataRequest(request)
case RequestKeys.LeaderAndIsrKey => handleLeaderAndIsrRequest(request)
case RequestKeys.StopReplicaKey => handleStopReplicaRequest(request)
case RequestKeys.UpdateMetadataKey => handleUpdateMetadataRequest(request)
case RequestKeys.ControlledShutdownKey => handleControlledShutdownRequest(request)
case RequestKeys.OffsetCommitKey => handleOffsetCommitRequest(request)
case RequestKeys.OffsetFetchKey => handleOffsetFetchRequest(request)
case requestId => throw new KafkaException("Unknown api code " + requestId)
}
} catch {
case e: Throwable =>
request.requestObj.handleError(e, requestChannel, request)
error("error when handling request %s".format(request.requestObj), e)
} finally
request.apiLocalCompleteTimeMs = SystemTime.milliseconds
}
// ensureTopicExists is only for client facing requests
private def ensureTopicExists(topic: String) = {
if(!metadataCache.exists { case(topicAndPartition, partitionStateInfo) => topicAndPartition.topic.equals(topic)} )
throw new UnknownTopicOrPartitionException("Topic " + topic + " either doesn't exist or is in the process of being deleted")
}
def handleLeaderAndIsrRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val leaderAndIsrRequest = request.requestObj.asInstanceOf[LeaderAndIsrRequest]
try {
val (response, error) = replicaManager.becomeLeaderOrFollower(leaderAndIsrRequest)
val leaderAndIsrResponse = new LeaderAndIsrResponse(leaderAndIsrRequest.correlationId, response, error)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(leaderAndIsrResponse)))
} catch {
case e: KafkaStorageException =>
fatal("Disk error during leadership change.", e)
Runtime.getRuntime.halt(1)
}
}
def handleStopReplicaRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val stopReplicaRequest = request.requestObj.asInstanceOf[StopReplicaRequest]
val (response, error) = replicaManager.stopReplicas(stopReplicaRequest)
val stopReplicaResponse = new StopReplicaResponse(stopReplicaRequest.correlationId, response.toMap, error)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(stopReplicaResponse)))
replicaManager.replicaFetcherManager.shutdownIdleFetcherThreads()
}
def handleUpdateMetadataRequest(request: RequestChannel.Request) {
val updateMetadataRequest = request.requestObj.asInstanceOf[UpdateMetadataRequest]
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val stateChangeLogger = replicaManager.stateChangeLogger
if(updateMetadataRequest.controllerEpoch < replicaManager.controllerEpoch) {
val stateControllerEpochErrorMessage = ("Broker %d received update metadata request with correlation id %d from an " +
"old controller %d with epoch %d. Latest known controller epoch is %d").format(brokerId,
updateMetadataRequest.correlationId, updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch,
replicaManager.controllerEpoch)
stateChangeLogger.warn(stateControllerEpochErrorMessage)
throw new ControllerMovedException(stateControllerEpochErrorMessage)
}
partitionMetadataLock synchronized {
replicaManager.controllerEpoch = updateMetadataRequest.controllerEpoch
// cache the list of alive brokers in the cluster
updateMetadataRequest.aliveBrokers.foreach(b => aliveBrokers.put(b.id, b))
updateMetadataRequest.partitionStateInfos.foreach { partitionState =>
metadataCache.put(partitionState._1, partitionState._2)
stateChangeLogger.trace(("Broker %d cached leader info %s for partition %s in response to UpdateMetadata request " +
"sent by controller %d epoch %d with correlation id %d").format(brokerId, partitionState._2, partitionState._1,
updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch, updateMetadataRequest.correlationId))
}
// remove the topics that don't exist in the UpdateMetadata request since those are the topics that are
// currently being deleted by the controller
val topicsKnownToThisBroker = metadataCache.map {
case(topicAndPartition, partitionStateInfo) => topicAndPartition.topic }.toSet
val topicsKnownToTheController = updateMetadataRequest.partitionStateInfos.map {
case(topicAndPartition, partitionStateInfo) => topicAndPartition.topic }.toSet
val deletedTopics = topicsKnownToThisBroker -- topicsKnownToTheController
val partitionsToBeDeleted = metadataCache.filter {
case(topicAndPartition, partitionStateInfo) => deletedTopics.contains(topicAndPartition.topic)
}.keySet
partitionsToBeDeleted.foreach { partition =>
metadataCache.remove(partition)
stateChangeLogger.trace(("Broker %d deleted partition %s from metadata cache in response to UpdateMetadata request " +
"sent by controller %d epoch %d with correlation id %d").format(brokerId, partition,
updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch, updateMetadataRequest.correlationId))
}
}
val updateMetadataResponse = new UpdateMetadataResponse(updateMetadataRequest.correlationId)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(updateMetadataResponse)))
}
def handleControlledShutdownRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val controlledShutdownRequest = request.requestObj.asInstanceOf[ControlledShutdownRequest]
val partitionsRemaining = controller.shutdownBroker(controlledShutdownRequest.brokerId)
val controlledShutdownResponse = new ControlledShutdownResponse(controlledShutdownRequest.correlationId,
ErrorMapping.NoError, partitionsRemaining)
requestChannel.sendResponse(new Response(request, new BoundedByteBufferSend(controlledShutdownResponse)))
}
/**
* Check if a partitionData from a produce request can unblock any
* DelayedFetch requests.
*/
def maybeUnblockDelayedFetchRequests(topic: String, partition: Int, messageSizeInBytes: Int) {
val satisfied = fetchRequestPurgatory.update(RequestKey(topic, partition), messageSizeInBytes)
trace("Producer request to (%s-%d) unblocked %d fetch requests.".format(topic, partition, satisfied.size))
// send any newly unblocked responses
for(fetchReq <- satisfied) {
val topicData = readMessageSets(fetchReq.fetch)
val response = FetchResponse(fetchReq.fetch.correlationId, topicData)
requestChannel.sendResponse(new RequestChannel.Response(fetchReq.request, new FetchResponseSend(response)))
}
}
/**
* Handle a produce request
*/
def handleProducerRequest(request: RequestChannel.Request) {
val produceRequest = request.requestObj.asInstanceOf[ProducerRequest]
val sTime = SystemTime.milliseconds
val localProduceResults = appendToLocalLog(produceRequest)
debug("Produce to local log in %d ms".format(SystemTime.milliseconds - sTime))
val numPartitionsInError = localProduceResults.count(_.error.isDefined)
produceRequest.data.foreach(partitionAndData =>
maybeUnblockDelayedFetchRequests(partitionAndData._1.topic, partitionAndData._1.partition, partitionAndData._2.sizeInBytes))
val allPartitionHaveReplicationFactorOne =
!produceRequest.data.keySet.exists(
m => replicaManager.getReplicationFactorForPartition(m.topic, m.partition) != 1)
if(produceRequest.requiredAcks == 0) {
// no operation needed if producer request.required.acks = 0; however, if there is any exception in handling the request, since
// no response is expected by the producer the handler will send a close connection response to the socket server
// to close the socket so that the producer client will know that some exception has happened and will refresh its metadata
if (numPartitionsInError != 0) {
info(("Send the close connection response due to error handling produce request " +
"[clientId = %s, correlationId = %s, topicAndPartition = %s] with Ack=0")
.format(produceRequest.clientId, produceRequest.correlationId, produceRequest.topicPartitionMessageSizeMap.keySet.mkString(",")))
requestChannel.closeConnection(request.processor, request)
} else {
requestChannel.noOperation(request.processor, request)
}
} else if (produceRequest.requiredAcks == 1 ||
produceRequest.numPartitions <= 0 ||
allPartitionHaveReplicationFactorOne ||
numPartitionsInError == produceRequest.numPartitions) {
val statuses = localProduceResults.map(r => r.key -> ProducerResponseStatus(r.errorCode, r.start)).toMap
val response = ProducerResponse(produceRequest.correlationId, statuses)
requestChannel.sendResponse(new RequestChannel.Response(request, new BoundedByteBufferSend(response)))
} else {
// create a list of (topic, partition) pairs to use as keys for this delayed request
val producerRequestKeys = produceRequest.data.keys.map(
topicAndPartition => new RequestKey(topicAndPartition)).toSeq
val statuses = localProduceResults.map(r => r.key -> ProducerResponseStatus(r.errorCode, r.end + 1)).toMap
val delayedProduce = new DelayedProduce(producerRequestKeys,
request,
statuses,
produceRequest,
produceRequest.ackTimeoutMs.toLong)
producerRequestPurgatory.watch(delayedProduce)
/*
* Replica fetch requests may have arrived (and potentially satisfied)
* delayedProduce requests while they were being added to the purgatory.
* Here, we explicitly check if any of them can be satisfied.
*/
var satisfiedProduceRequests = new mutable.ArrayBuffer[DelayedProduce]
producerRequestKeys.foreach(key =>
satisfiedProduceRequests ++=
producerRequestPurgatory.update(key, key))
debug(satisfiedProduceRequests.size +
" producer requests unblocked during produce to local log.")
satisfiedProduceRequests.foreach(_.respond())
// we do not need the data anymore
produceRequest.emptyData()
}
}
case class ProduceResult(key: TopicAndPartition, start: Long, end: Long, error: Option[Throwable] = None) {
def this(key: TopicAndPartition, throwable: Throwable) =
this(key, -1L, -1L, Some(throwable))
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(error) => ErrorMapping.codeFor(error.getClass.asInstanceOf[Class[Throwable]])
}
}
/**
* Helper method for handling a parsed producer request
*/
private def appendToLocalLog(producerRequest: ProducerRequest): Iterable[ProduceResult] = {
val partitionAndData: Map[TopicAndPartition, MessageSet] = producerRequest.data
trace("Append [%s] to local log ".format(partitionAndData.toString))
partitionAndData.map {case (topicAndPartition, messages) =>
// update stats for incoming bytes rate
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesInRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats.bytesInRate.mark(messages.sizeInBytes)
try {
ensureTopicExists(topicAndPartition.topic)
val partitionOpt = replicaManager.getPartition(topicAndPartition.topic, topicAndPartition.partition)
val info =
partitionOpt match {
case Some(partition) => partition.appendMessagesToLeader(messages.asInstanceOf[ByteBufferMessageSet])
case None => throw new UnknownTopicOrPartitionException("Partition %s doesn't exist on %d"
.format(topicAndPartition, brokerId))
}
val numAppendedMessages = if (info.firstOffset == -1L || info.lastOffset == -1L) 0 else (info.lastOffset - info.firstOffset + 1)
// update stats for successfully appended messages
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).logBytesAppendRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats.logBytesAppendRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).messagesInRate.mark(numAppendedMessages)
BrokerTopicStats.getBrokerAllTopicsStats.messagesInRate.mark(numAppendedMessages)
trace("%d bytes written to log %s-%d beginning at offset %d and ending at offset %d"
.format(messages.size, topicAndPartition.topic, topicAndPartition.partition, info.firstOffset, info.lastOffset))
ProduceResult(topicAndPartition, info.firstOffset, info.lastOffset)
} catch {
// NOTE: Failed produce requests is not incremented for UnknownTopicOrPartitionException and NotLeaderForPartitionException
// since failed produce requests metric is supposed to indicate failure of a broker in handling a produce request
// for a partition it is the leader for
case e: KafkaStorageException =>
fatal("Halting due to unrecoverable I/O error while handling produce request: ", e)
Runtime.getRuntime.halt(1)
null
case utpe: UnknownTopicOrPartitionException =>
warn("Produce request with correlation id %d from client %s on partition %s failed due to %s".format(
producerRequest.correlationId, producerRequest.clientId, topicAndPartition, utpe.getMessage))
new ProduceResult(topicAndPartition, utpe)
case nle: NotLeaderForPartitionException =>
warn("Produce request with correlation id %d from client %s on partition %s failed due to %s".format(
producerRequest.correlationId, producerRequest.clientId, topicAndPartition, nle.getMessage))
new ProduceResult(topicAndPartition, nle)
case e: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).failedProduceRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats.failedProduceRequestRate.mark()
error("Error processing ProducerRequest with correlation id %d from client %s on partition %s"
.format(producerRequest.correlationId, producerRequest.clientId, topicAndPartition), e)
new ProduceResult(topicAndPartition, e)
}
}
}
/**
* Handle a fetch request
*/
def handleFetchRequest(request: RequestChannel.Request) {
val fetchRequest = request.requestObj.asInstanceOf[FetchRequest]
if(fetchRequest.isFromFollower) {
maybeUpdatePartitionHw(fetchRequest)
// after updating HW, some delayed produce requests may be unblocked
var satisfiedProduceRequests = new mutable.ArrayBuffer[DelayedProduce]
fetchRequest.requestInfo.foreach {
case (topicAndPartition, _) =>
val key = new RequestKey(topicAndPartition)
satisfiedProduceRequests ++= producerRequestPurgatory.update(key, key)
}
debug("Replica %d fetch unblocked %d producer requests."
.format(fetchRequest.replicaId, satisfiedProduceRequests.size))
satisfiedProduceRequests.foreach(_.respond())
}
val dataRead = readMessageSets(fetchRequest)
val bytesReadable = dataRead.values.map(_.messages.sizeInBytes).sum
if(fetchRequest.maxWait <= 0 ||
bytesReadable >= fetchRequest.minBytes ||
fetchRequest.numPartitions <= 0) {
debug("Returning fetch response %s for fetch request with correlation id %d to client %s"
.format(dataRead.values.map(_.error).mkString(","), fetchRequest.correlationId, fetchRequest.clientId))
val response = new FetchResponse(fetchRequest.correlationId, dataRead)
requestChannel.sendResponse(new RequestChannel.Response(request, new FetchResponseSend(response)))
} else {
debug("Putting fetch request with correlation id %d from client %s into purgatory".format(fetchRequest.correlationId,
fetchRequest.clientId))
// create a list of (topic, partition) pairs to use as keys for this delayed request
val delayedFetchKeys = fetchRequest.requestInfo.keys.toSeq.map(new RequestKey(_))
val delayedFetch = new DelayedFetch(delayedFetchKeys, request, fetchRequest, fetchRequest.maxWait, bytesReadable)
fetchRequestPurgatory.watch(delayedFetch)
}
}
private def maybeUpdatePartitionHw(fetchRequest: FetchRequest) {
debug("Maybe update partition HW due to fetch request: %s ".format(fetchRequest))
fetchRequest.requestInfo.foreach(info => {
val (topic, partition, offset) = (info._1.topic, info._1.partition, info._2.offset)
replicaManager.recordFollowerPosition(topic, partition, fetchRequest.replicaId, offset)
})
}
/**
* Read from all the offset details given and return a map of
* (topic, partition) -> PartitionData
*/
private def readMessageSets(fetchRequest: FetchRequest) = {
val isFetchFromFollower = fetchRequest.isFromFollower
fetchRequest.requestInfo.map
{
case (TopicAndPartition(topic, partition), PartitionFetchInfo(offset, fetchSize)) =>
val partitionData =
try {
ensureTopicExists(topic)
val (messages, highWatermark) = readMessageSet(topic, partition, offset, fetchSize, fetchRequest.replicaId)
BrokerTopicStats.getBrokerTopicStats(topic).bytesOutRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats.bytesOutRate.mark(messages.sizeInBytes)
if (!isFetchFromFollower) {
new FetchResponsePartitionData(ErrorMapping.NoError, highWatermark, messages)
} else {
debug("Leader %d for partition [%s,%d] received fetch request from follower %d"
.format(brokerId, topic, partition, fetchRequest.replicaId))
new FetchResponsePartitionData(ErrorMapping.NoError, highWatermark, messages)
}
} catch {
// NOTE: Failed fetch requests is not incremented for UnknownTopicOrPartitionException and NotLeaderForPartitionException
// since failed fetch requests metric is supposed to indicate failure of a broker in handling a fetch request
// for a partition it is the leader for
case utpe: UnknownTopicOrPartitionException =>
warn("Fetch request with correlation id %d from client %s on partition [%s,%d] failed due to %s".format(
fetchRequest.correlationId, fetchRequest.clientId, topic, partition, utpe.getMessage))
new FetchResponsePartitionData(ErrorMapping.codeFor(utpe.getClass.asInstanceOf[Class[Throwable]]), -1L, MessageSet.Empty)
case nle: NotLeaderForPartitionException =>
warn("Fetch request with correlation id %d from client %s on partition [%s,%d] failed due to %s".format(
fetchRequest.correlationId, fetchRequest.clientId, topic, partition, nle.getMessage))
new FetchResponsePartitionData(ErrorMapping.codeFor(nle.getClass.asInstanceOf[Class[Throwable]]), -1L, MessageSet.Empty)
case t: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topic).failedFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats.failedFetchRequestRate.mark()
error("Error when processing fetch request for partition [%s,%d] offset %d from %s with correlation id %d"
.format(topic, partition, offset, if (isFetchFromFollower) "follower" else "consumer", fetchRequest.correlationId), t)
new FetchResponsePartitionData(ErrorMapping.codeFor(t.getClass.asInstanceOf[Class[Throwable]]), -1L, MessageSet.Empty)
}
(TopicAndPartition(topic, partition), partitionData)
}
}
/**
* Read from a single topic/partition at the given offset upto maxSize bytes
*/
private def readMessageSet(topic: String,
partition: Int,
offset: Long,
maxSize: Int,
fromReplicaId: Int): (MessageSet, Long) = {
// check if the current broker is the leader for the partitions
val localReplica = if(fromReplicaId == Request.DebuggingConsumerId)
replicaManager.getReplicaOrException(topic, partition)
else
replicaManager.getLeaderReplicaIfLocal(topic, partition)
trace("Fetching log segment for topic, partition, offset, size = " + (topic, partition, offset, maxSize))
val maxOffsetOpt =
if (Request.isReplicaIdFromFollower(fromReplicaId))
None
else
Some(localReplica.highWatermark)
val messages = localReplica.log match {
case Some(log) =>
log.read(offset, maxSize, maxOffsetOpt)
case None =>
error("Leader for partition [%s,%d] on broker %d does not have a local log".format(topic, partition, brokerId))
MessageSet.Empty
}
(messages, localReplica.highWatermark)
}
/**
* Service the offset request API
*/
def handleOffsetRequest(request: RequestChannel.Request) {
val offsetRequest = request.requestObj.asInstanceOf[OffsetRequest]
val responseMap = offsetRequest.requestInfo.map(elem => {
val (topicAndPartition, partitionOffsetRequestInfo) = elem
try {
ensureTopicExists(topicAndPartition.topic)
// ensure leader exists
val localReplica = if(!offsetRequest.isFromDebuggingClient)
replicaManager.getLeaderReplicaIfLocal(topicAndPartition.topic, topicAndPartition.partition)
else
replicaManager.getReplicaOrException(topicAndPartition.topic, topicAndPartition.partition)
val offsets = {
val allOffsets = fetchOffsets(replicaManager.logManager,
topicAndPartition,
partitionOffsetRequestInfo.time,
partitionOffsetRequestInfo.maxNumOffsets)
if (!offsetRequest.isFromOrdinaryClient) {
allOffsets
} else {
val hw = localReplica.highWatermark
if (allOffsets.exists(_ > hw))
hw +: allOffsets.dropWhile(_ > hw)
else
allOffsets
}
}
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.NoError, offsets))
} catch {
// NOTE: UnknownTopicOrPartitionException and NotLeaderForPartitionException are special cased since these error messages
// are typically transient and there is no value in logging the entire stack trace for the same
case utpe: UnknownTopicOrPartitionException =>
warn("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
offsetRequest.correlationId, offsetRequest.clientId, topicAndPartition, utpe.getMessage))
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(utpe.getClass.asInstanceOf[Class[Throwable]]), Nil) )
case nle: NotLeaderForPartitionException =>
warn("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
offsetRequest.correlationId, offsetRequest.clientId, topicAndPartition,nle.getMessage))
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(nle.getClass.asInstanceOf[Class[Throwable]]), Nil) )
case e: Throwable =>
warn("Error while responding to offset request", e)
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]), Nil) )
}
})
val response = OffsetResponse(offsetRequest.correlationId, responseMap)
requestChannel.sendResponse(new RequestChannel.Response(request, new BoundedByteBufferSend(response)))
}
def fetchOffsets(logManager: LogManager, topicAndPartition: TopicAndPartition, timestamp: Long, maxNumOffsets: Int): Seq[Long] = {
logManager.getLog(topicAndPartition) match {
case Some(log) =>
fetchOffsetsBefore(log, timestamp, maxNumOffsets)
case None =>
if (timestamp == OffsetRequest.LatestTime || timestamp == OffsetRequest.EarliestTime)
Seq(0L)
else
Nil
}
}
def fetchOffsetsBefore(log: Log, timestamp: Long, maxNumOffsets: Int): Seq[Long] = {
val segsArray = log.logSegments.toArray
var offsetTimeArray: Array[(Long, Long)] = null
if(segsArray.last.size > 0)
offsetTimeArray = new Array[(Long, Long)](segsArray.length + 1)
else
offsetTimeArray = new Array[(Long, Long)](segsArray.length)
for(i <- 0 until segsArray.length)
offsetTimeArray(i) = (segsArray(i).baseOffset, segsArray(i).lastModified)
if(segsArray.last.size > 0)
offsetTimeArray(segsArray.length) = (log.logEndOffset, SystemTime.milliseconds)
var startIndex = -1
timestamp match {
case OffsetRequest.LatestTime =>
startIndex = offsetTimeArray.length - 1
case OffsetRequest.EarliestTime =>
startIndex = 0
case _ =>
var isFound = false
debug("Offset time array = " + offsetTimeArray.foreach(o => "%d, %d".format(o._1, o._2)))
startIndex = offsetTimeArray.length - 1
while (startIndex >= 0 && !isFound) {
if (offsetTimeArray(startIndex)._2 <= timestamp)
isFound = true
else
startIndex -=1
}
}
val retSize = maxNumOffsets.min(startIndex + 1)
val ret = new Array[Long](retSize)
for(j <- 0 until retSize) {
ret(j) = offsetTimeArray(startIndex)._1
startIndex -= 1
}
// ensure that the returned seq is in descending order of offsets
ret.toSeq.sortBy(- _)
}
/**
* Service the topic metadata request API
*/
def handleTopicMetadataRequest(request: RequestChannel.Request) {
val metadataRequest = request.requestObj.asInstanceOf[TopicMetadataRequest]
val topicsMetadata = new mutable.ArrayBuffer[TopicMetadata]()
val config = replicaManager.config
var uniqueTopics = Set.empty[String]
uniqueTopics = {
if(metadataRequest.topics.size > 0)
metadataRequest.topics.toSet
else {
partitionMetadataLock synchronized {
metadataCache.keySet.map(_.topic)
}
}
}
val topicMetadataList =
partitionMetadataLock synchronized {
uniqueTopics.map { topic =>
if(metadataCache.keySet.map(_.topic).contains(topic)) {
debug("Topic %s exists in metadata cache on broker %d".format(topic, config.brokerId))
val partitionStateInfo = metadataCache.filter(p => p._1.topic.equals(topic))
val sortedPartitions = partitionStateInfo.toList.sortWith((m1,m2) => m1._1.partition < m2._1.partition)
val partitionMetadata = sortedPartitions.map { case(topicAndPartition, partitionState) =>
val replicas = metadataCache(topicAndPartition).allReplicas
var replicaInfo: Seq[Broker] = replicas.map(aliveBrokers.getOrElse(_, null)).filter(_ != null).toSeq
var leaderInfo: Option[Broker] = None
var isrInfo: Seq[Broker] = Nil
val leaderIsrAndEpoch = partitionState.leaderIsrAndControllerEpoch
val leader = leaderIsrAndEpoch.leaderAndIsr.leader
val isr = leaderIsrAndEpoch.leaderAndIsr.isr
debug("%s".format(topicAndPartition) + ";replicas = " + replicas + ", in sync replicas = " + isr + ", leader = " + leader)
try {
if(aliveBrokers.keySet.contains(leader))
leaderInfo = Some(aliveBrokers(leader))
else throw new LeaderNotAvailableException("Leader not available for partition %s".format(topicAndPartition))
isrInfo = isr.map(aliveBrokers.getOrElse(_, null)).filter(_ != null)
if(replicaInfo.size < replicas.size)
throw new ReplicaNotAvailableException("Replica information not available for following brokers: " +
replicas.filterNot(replicaInfo.map(_.id).contains(_)).mkString(","))
if(isrInfo.size < isr.size)
throw new ReplicaNotAvailableException("In Sync Replica information not available for following brokers: " +
isr.filterNot(isrInfo.map(_.id).contains(_)).mkString(","))
new PartitionMetadata(topicAndPartition.partition, leaderInfo, replicaInfo, isrInfo, ErrorMapping.NoError)
} catch {
case e: Throwable =>
error("Error while fetching metadata for partition %s".format(topicAndPartition), e)
new PartitionMetadata(topicAndPartition.partition, leaderInfo, replicaInfo, isrInfo,
ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]))
}
}
new TopicMetadata(topic, partitionMetadata)
} else {
debug("Topic %s does not exist in metadata cache on broker %d".format(topic, config.brokerId))
// topic doesn't exist, send appropriate error code
new TopicMetadata(topic, Seq.empty[PartitionMetadata], ErrorMapping.UnknownTopicOrPartitionCode)
}
}
}
// handle auto create topics
topicMetadataList.foreach { topicMetadata =>
topicMetadata.errorCode match {
case ErrorMapping.NoError => topicsMetadata += topicMetadata
case ErrorMapping.UnknownTopicOrPartitionCode =>
if (config.autoCreateTopicsEnable) {
try {
AdminUtils.createTopic(zkClient, topicMetadata.topic, config.numPartitions, config.defaultReplicationFactor)
info("Auto creation of topic %s with %d partitions and replication factor %d is successful!"
.format(topicMetadata.topic, config.numPartitions, config.defaultReplicationFactor))
} catch {
case e: TopicExistsException => // let it go, possibly another broker created this topic
}
topicsMetadata += new TopicMetadata(topicMetadata.topic, topicMetadata.partitionsMetadata, ErrorMapping.LeaderNotAvailableCode)
} else {
debug("Auto create topic skipped for %s".format(topicMetadata.topic))
topicsMetadata += topicMetadata
}
case _ =>
debug("Error while fetching topic metadata for topic %s due to %s ".format(topicMetadata.topic,
ErrorMapping.exceptionFor(topicMetadata.errorCode).getClass.getName))
topicsMetadata += topicMetadata
}
}
trace("Sending topic metadata %s for correlation id %d to client %s".format(topicsMetadata.mkString(","), metadataRequest.correlationId, metadataRequest.clientId))
val response = new TopicMetadataResponse(topicsMetadata.toSeq, metadataRequest.correlationId)
requestChannel.sendResponse(new RequestChannel.Response(request, new BoundedByteBufferSend(response)))
}
/*
* Service the Offset commit API
*/
def handleOffsetCommitRequest(request: RequestChannel.Request) {
val offsetCommitRequest = request.requestObj.asInstanceOf[OffsetCommitRequest]
val responseInfo = offsetCommitRequest.requestInfo.map{
case (topicAndPartition, metaAndError) => {
val topicDirs = new ZKGroupTopicDirs(offsetCommitRequest.groupId, topicAndPartition.topic)
try {
ensureTopicExists(topicAndPartition.topic)
if(metaAndError.metadata != null && metaAndError.metadata.length > config.offsetMetadataMaxSize) {
(topicAndPartition, ErrorMapping.OffsetMetadataTooLargeCode)
} else {
ZkUtils.updatePersistentPath(zkClient, topicDirs.consumerOffsetDir + "/" +
topicAndPartition.partition, metaAndError.offset.toString)
(topicAndPartition, ErrorMapping.NoError)
}
} catch {
case e: Throwable => (topicAndPartition, ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]))
}
}
}
val response = new OffsetCommitResponse(responseInfo,
offsetCommitRequest.correlationId)
requestChannel.sendResponse(new RequestChannel.Response(request, new BoundedByteBufferSend(response)))
}
/*
* Service the Offset fetch API
*/
def handleOffsetFetchRequest(request: RequestChannel.Request) {
val offsetFetchRequest = request.requestObj.asInstanceOf[OffsetFetchRequest]
val responseInfo = offsetFetchRequest.requestInfo.map( t => {
val topicDirs = new ZKGroupTopicDirs(offsetFetchRequest.groupId, t.topic)
try {
ensureTopicExists(t.topic)
val payloadOpt = ZkUtils.readDataMaybeNull(zkClient, topicDirs.consumerOffsetDir + "/" + t.partition)._1
payloadOpt match {
case Some(payload) => {
(t, OffsetMetadataAndError(offset=payload.toLong, error=ErrorMapping.NoError))
}
case None => (t, OffsetMetadataAndError(OffsetMetadataAndError.InvalidOffset, OffsetMetadataAndError.NoMetadata,
ErrorMapping.UnknownTopicOrPartitionCode))
}
} catch {
case e: Throwable =>
(t, OffsetMetadataAndError(OffsetMetadataAndError.InvalidOffset, OffsetMetadataAndError.NoMetadata,
ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])))
}
})
val response = new OffsetFetchResponse(collection.immutable.Map(responseInfo: _*),
offsetFetchRequest.correlationId)
requestChannel.sendResponse(new RequestChannel.Response(request, new BoundedByteBufferSend(response)))
}
def close() {
debug("Shutting down.")
fetchRequestPurgatory.shutdown()
producerRequestPurgatory.shutdown()
debug("Shut down complete.")
}
private [kafka] trait MetricKey {
def keyLabel: String
}
private [kafka] object MetricKey {
val globalLabel = "All"
}
private [kafka] case class RequestKey(topic: String, partition: Int)
extends MetricKey {
def this(topicAndPartition: TopicAndPartition) = this(topicAndPartition.topic, topicAndPartition.partition)
def topicAndPartition = TopicAndPartition(topic, partition)
override def keyLabel = "%s-%d".format(topic, partition)
}
/**
* A delayed fetch request
*/
class DelayedFetch(keys: Seq[RequestKey], request: RequestChannel.Request, val fetch: FetchRequest, delayMs: Long, initialSize: Long)
extends DelayedRequest(keys, request, delayMs) {
val bytesAccumulated = new AtomicLong(initialSize)
}
/**
* A holding pen for fetch requests waiting to be satisfied
*/
class FetchRequestPurgatory(requestChannel: RequestChannel, purgeInterval: Int)
extends RequestPurgatory[DelayedFetch, Int](brokerId, purgeInterval) {
this.logIdent = "[FetchRequestPurgatory-%d] ".format(brokerId)
/**
* A fetch request is satisfied when it has accumulated enough data to meet the min_bytes field
*/
def checkSatisfied(messageSizeInBytes: Int, delayedFetch: DelayedFetch): Boolean = {
val accumulatedSize = delayedFetch.bytesAccumulated.addAndGet(messageSizeInBytes)
accumulatedSize >= delayedFetch.fetch.minBytes
}
/**
* When a request expires just answer it with whatever data is present
*/
def expire(delayed: DelayedFetch) {
debug("Expiring fetch request %s.".format(delayed.fetch))
try {
val topicData = readMessageSets(delayed.fetch)
val response = FetchResponse(delayed.fetch.correlationId, topicData)
val fromFollower = delayed.fetch.isFromFollower
delayedRequestMetrics.recordDelayedFetchExpired(fromFollower)
requestChannel.sendResponse(new RequestChannel.Response(delayed.request, new FetchResponseSend(response)))
}
catch {
case e1: LeaderNotAvailableException =>
debug("Leader changed before fetch request %s expired.".format(delayed.fetch))
case e2: UnknownTopicOrPartitionException =>
debug("Replica went offline before fetch request %s expired.".format(delayed.fetch))
}
}
}
class DelayedProduce(keys: Seq[RequestKey],
request: RequestChannel.Request,
initialErrorsAndOffsets: Map[TopicAndPartition, ProducerResponseStatus],
val produce: ProducerRequest,
delayMs: Long)
extends DelayedRequest(keys, request, delayMs) with Logging {
/**
* Map of (topic, partition) -> partition status
* The values in this map don't need to be synchronized since updates to the
* values are effectively synchronized by the ProducerRequestPurgatory's
* update method
*/
private [kafka] val partitionStatus = keys.map(requestKey => {
val producerResponseStatus = initialErrorsAndOffsets(TopicAndPartition(requestKey.topic, requestKey.partition))
// if there was an error in writing to the local replica's log, then don't
// wait for acks on this partition
val (acksPending, error, nextOffset) =
if (producerResponseStatus.error == ErrorMapping.NoError) {
// Timeout error state will be cleared when requiredAcks are received
(true, ErrorMapping.RequestTimedOutCode, producerResponseStatus.offset)
}
else (false, producerResponseStatus.error, producerResponseStatus.offset)
val initialStatus = PartitionStatus(acksPending, error, nextOffset)
trace("Initial partition status for %s = %s".format(requestKey.keyLabel, initialStatus))
(requestKey, initialStatus)
}).toMap
def respond() {
val finalErrorsAndOffsets = initialErrorsAndOffsets.map(
status => {
val pstat = partitionStatus(new RequestKey(status._1))
(status._1, ProducerResponseStatus(pstat.error, pstat.requiredOffset))
})
val response = ProducerResponse(produce.correlationId, finalErrorsAndOffsets)
requestChannel.sendResponse(new RequestChannel.Response(
request, new BoundedByteBufferSend(response)))
}
/**
* Returns true if this delayed produce request is satisfied (or more
* accurately, unblocked) -- this is the case if for every partition:
* Case A: This broker is not the leader: unblock - should return error.
* Case B: This broker is the leader:
* B.1 - If there was a localError (when writing to the local log): unblock - should return error
* B.2 - else, at least requiredAcks replicas should be caught up to this request.
*
* As partitions become acknowledged, we may be able to unblock
* DelayedFetchRequests that are pending on those partitions.
*/
def isSatisfied(followerFetchRequestKey: RequestKey) = {
val topic = followerFetchRequestKey.topic
val partitionId = followerFetchRequestKey.partition
val key = RequestKey(topic, partitionId)
val fetchPartitionStatus = partitionStatus(key)
trace("Checking producer request satisfaction for %s-%d, acksPending = %b"
.format(topic, partitionId, fetchPartitionStatus.acksPending))
if (fetchPartitionStatus.acksPending) {
val partitionOpt = replicaManager.getPartition(topic, partitionId)
val (hasEnough, errorCode) = partitionOpt match {
case Some(partition) =>
partition.checkEnoughReplicasReachOffset(fetchPartitionStatus.requiredOffset, produce.requiredAcks)
case None =>
(false, ErrorMapping.UnknownTopicOrPartitionCode)
}
if (errorCode != ErrorMapping.NoError) {
fetchPartitionStatus.acksPending = false
fetchPartitionStatus.error = errorCode
} else if (hasEnough) {
fetchPartitionStatus.acksPending = false
fetchPartitionStatus.error = ErrorMapping.NoError
}
if (!fetchPartitionStatus.acksPending) {
val messageSizeInBytes = produce.topicPartitionMessageSizeMap(followerFetchRequestKey.topicAndPartition)
maybeUnblockDelayedFetchRequests(topic, partitionId, messageSizeInBytes)
}
}
// unblocked if there are no partitions with pending acks
val satisfied = ! partitionStatus.exists(p => p._2.acksPending)
trace("Producer request satisfaction for %s-%d = %b".format(topic, partitionId, satisfied))
satisfied
}
case class PartitionStatus(var acksPending: Boolean,
var error: Short,
requiredOffset: Long) {
def setThisBrokerNotLeader() {
error = ErrorMapping.NotLeaderForPartitionCode
acksPending = false
}
override def toString =
"acksPending:%b, error: %d, requiredOffset: %d".format(
acksPending, error, requiredOffset
)
}
}
/**
* A holding pen for produce requests waiting to be satisfied.
*/
private [kafka] class ProducerRequestPurgatory(purgeInterval: Int)
extends RequestPurgatory[DelayedProduce, RequestKey](brokerId, purgeInterval) {
this.logIdent = "[ProducerRequestPurgatory-%d] ".format(brokerId)
protected def checkSatisfied(followerFetchRequestKey: RequestKey,
delayedProduce: DelayedProduce) =
delayedProduce.isSatisfied(followerFetchRequestKey)
/**
* Handle an expired delayed request
*/
protected def expire(delayedProduce: DelayedProduce) {
for (partitionStatus <- delayedProduce.partitionStatus if partitionStatus._2.acksPending)
delayedRequestMetrics.recordDelayedProducerKeyExpired(partitionStatus._1)
delayedProduce.respond()
}
}
private class DelayedRequestMetrics {
private class DelayedProducerRequestMetrics(keyLabel: String = MetricKey.globalLabel) extends KafkaMetricsGroup {
val expiredRequestMeter = newMeter(keyLabel + "ExpiresPerSecond", "requests", TimeUnit.SECONDS)
}
private class DelayedFetchRequestMetrics(forFollower: Boolean) extends KafkaMetricsGroup {
private val metricPrefix = if (forFollower) "Follower" else "Consumer"
val expiredRequestMeter = newMeter(metricPrefix + "ExpiresPerSecond", "requests", TimeUnit.SECONDS)
}
private val producerRequestMetricsForKey = {
val valueFactory = (k: MetricKey) => new DelayedProducerRequestMetrics(k.keyLabel + "-")
new Pool[MetricKey, DelayedProducerRequestMetrics](Some(valueFactory))
}
private val aggregateProduceRequestMetrics = new DelayedProducerRequestMetrics
private val aggregateFollowerFetchRequestMetrics = new DelayedFetchRequestMetrics(forFollower = true)
private val aggregateNonFollowerFetchRequestMetrics = new DelayedFetchRequestMetrics(forFollower = false)
def recordDelayedProducerKeyExpired(key: MetricKey) {
val keyMetrics = producerRequestMetricsForKey.getAndMaybePut(key)
List(keyMetrics, aggregateProduceRequestMetrics).foreach(_.expiredRequestMeter.mark())
}
def recordDelayedFetchExpired(forFollower: Boolean) {
val metrics = if (forFollower) aggregateFollowerFetchRequestMetrics
else aggregateNonFollowerFetchRequestMetrics
metrics.expiredRequestMeter.mark()
}
}
}
|
harshach/kafka
|
core/src/main/scala/kafka/server/KafkaApis.scala
|
Scala
|
apache-2.0
| 47,329
|
package com.anteoy.coreScala
/**
* Created by zhoudazhuang
* Date: 17-1-3
* Time: 下午3:18 函数式编程 函数 参数 返回值 回调
* Description :下面的程序将实现简单定时器的功能,负责定时的函数(function)名为:oncePerSecond,它接受一个回调函数作为参数,该回调函数的类型记为:() => Unit,代表任何无参数、无返回值的函数(Unit和C/C++中的void类似)。程序的main方法调用定时函数,作为实参传进去的回调函数timeFlies,仅仅向终端打印一句话,所以,该程序的实际功能是:每秒钟在屏幕上打印一条信息:time flies like an arrow。
*/
object Timer {
def oncePerSecond(callback: () => Unit) {
while (true) {
callback(); Thread sleep 1000
}
}
def timeFlies() {
println("time flies like an arrow...")
}
def main(args: Array[String]) {
oncePerSecond(timeFlies)
}
}
|
Anteoy/jottings
|
src/main/scala/com/anteoy/coreScala/Timer.scala
|
Scala
|
apache-2.0
| 945
|
/*
Stratagem is a model checker for transition systems described using rewriting
rules and strategies.
Copyright (C) 2013 - SMV@Geneva University.
Program written by Edmundo Lopez Bobeda <edmundo [at] lopezbobeda.net>.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package ch.unige.cui.smv.stratagem.ts
/**
* Represents the Sequence strategy.
* @param S1 strategy to be used as parameter.
* @param S2 strategy to be used as parameter.
* @author mundacho
*
*/
case class Sequence(S1: Strategy, S2: Strategy) extends NonVariableStrategy
|
didierbuchs/oldstratagem
|
src/main/scala/ch/unige/cui/smv/stratagem/ts/Sequence.scala
|
Scala
|
gpl-2.0
| 1,175
|
object Problem08 {
def myCompress[A](list: List[A]): List[A] = {
list match {
case x :: y :: xs => if (x == y) myCompress(List(x) ::: xs)
else List(x) ::: myCompress(List(y) ::: xs)
case x => x
}
}
}
|
leonardi-tutorials/tutorial-scala
|
src/main/scala/Problem08.scala
|
Scala
|
apache-2.0
| 258
|
package scalapb
import com.google.protobuf.ExtensionRegistry
import com.google.protobuf.compiler.PluginProtos.{CodeGeneratorRequest, CodeGeneratorResponse}
import com.trueaccord.scalapb.Scalapb
import com.trueaccord.scalapb.compiler.ProtobufGenerator
import protocbridge.{ProtocCodeGenerator, Artifact}
object ScalaPbCodeGenerator extends ProtocCodeGenerator {
override def name: String = "scala"
override def run(req: CodeGeneratorRequest): CodeGeneratorResponse = {
ProtobufGenerator.handleCodeGeneratorRequest(req)
}
override def registerExtensions(registry: ExtensionRegistry): Unit = {
Scalapb.registerAllExtensions(registry)
}
override def suggestedDependencies: Seq[Artifact] = Seq(
Artifact("com.google.protobuf", "protobuf-java", "3.0.0-beta-2"),
Artifact("com.trueaccord.scalapb", "scalapb-runtime",
com.trueaccord.scalapb.compiler.Version.scalapbVersion, crossVersion = true)
)
}
|
eiennohito/ScalaPB
|
compiler-plugin/src/main/scala/scalapb/ScalaPbCodeGenerator.scala
|
Scala
|
apache-2.0
| 933
|
//object Format {
// import com.typesafe.sbt.SbtScalariform._
//
// lazy val settings = defaultScalariformSettings ++ Seq(
// ScalariformKeys.preferences := formattingPreferences
// )
//
// lazy val formattingPreferences = {
// import scalariform.formatter.preferences._
// FormattingPreferences().
// setPreference( DoubleIndentClassDeclaration, true ).
// setPreference( MultilineScaladocCommentsStartOnFirstLine, true ).
// setPreference( PlaceScaladocAsterisksBeneathSecondAsterisk, true )
// }
//}
|
dmrolfs/demesne
|
project/Format.scala
|
Scala
|
apache-2.0
| 532
|
package org.homermultitext.edmodel
/** All possible categories for alternate readings
* are enumerated by case objects extending this trait
*
* Used by [[org.homermultitext.edmodel.AlternateReading]] and therefore also by [[org.homermultitext.edmodel.HmtToken]] and [[org.homermultitext.edmodel.TeiReader]]
*/
sealed trait AlternateCategory {def name : String}
/** restored by modern editor
*
* This should only apply to editorial expansions of abbreviations.
*/
case object Restoration extends AlternateCategory {val name = "editorial restoration or completion"}
/** alternate reading offered by scribe */
case object Multiform extends AlternateCategory {val name
= "scribally recorded multiform"}
/** scribal correction of text */
case object Correction extends AlternateCategory {val name = "scribal correction"}
/** scribal deletion of text */
case object Deletion extends AlternateCategory {val name = "scribal deletion"}
/** an alternate reading for a token
*
* The `name` member must be implemented with an English description of the editorial status
*
* @param alternateCategory category of alternate reading
* @param reading all [[org.homermultitext.edmodel.Reading]]s for this alternate reading
*/
case class AlternateReading (alternateCategory: AlternateCategory, reading: Vector[Reading] ) {
def leidenize: String = {
Reading.leidenize(reading) + " (" + alternateCategory + ")"
}
def simpleString: String = {
alternateCategory match {
case Deletion => ""
case _ => reading.map(_.reading).mkString
}
}
}
/** string formatting function
*/
object AlternateReading {
/** format all [[org.homermultitext.edmodel.Reading]]s in a single string*/
def alternative (alt: AlternateReading): String = {
alt.reading.map(rdg => rdg.typedText).mkString(" + ")
}
}
|
homermultitext/edmodel
|
src/main/scala/org/homermultitext/edmodel/AlternateReading.scala
|
Scala
|
gpl-3.0
| 1,813
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.SparkConf
import org.apache.spark.sql.catalyst.expressions.Hex
import org.apache.spark.sql.connector.catalog.InMemoryPartitionTableCatalog
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils}
import org.apache.spark.unsafe.types.UTF8String
/**
* The base trait for SQL INSERT.
*/
trait SQLInsertTestSuite extends QueryTest with SQLTestUtils {
import testImplicits._
def format: String
protected def createTable(
table: String,
cols: Seq[String],
colTypes: Seq[String],
partCols: Seq[String] = Nil): Unit = {
val values = cols.zip(colTypes).map(tuple => tuple._1 + " " + tuple._2).mkString("(", ", ", ")")
val partitionSpec = if (partCols.nonEmpty) {
partCols.mkString("PARTITIONED BY (", ",", ")")
} else ""
sql(s"CREATE TABLE $table$values USING $format $partitionSpec")
}
protected def processInsert(
tableName: String,
input: DataFrame,
cols: Seq[String] = Nil,
partitionExprs: Seq[String] = Nil,
overwrite: Boolean): Unit = {
val tmpView = "tmp_view"
val columnList = if (cols.nonEmpty) cols.mkString("(", ",", ")") else ""
val partitionList = if (partitionExprs.nonEmpty) {
partitionExprs.mkString("PARTITION (", ",", ")")
} else ""
withTempView(tmpView) {
input.createOrReplaceTempView(tmpView)
val overwriteStr = if (overwrite) "OVERWRITE" else "INTO"
sql(
s"INSERT $overwriteStr TABLE $tableName $partitionList $columnList SELECT * FROM $tmpView")
}
}
protected def verifyTable(tableName: String, expected: DataFrame): Unit = {
checkAnswer(spark.table(tableName), expected)
}
test("insert with column list - follow table output order") {
withTable("t1") {
val df = Seq((1, 2L, "3")).toDF()
val cols = Seq("c1", "c2", "c3")
createTable("t1", cols, Seq("int", "long", "string"))
Seq(false, true).foreach { m =>
processInsert("t1", df, cols, overwrite = m)
verifyTable("t1", df)
}
}
}
test("insert with column list - follow table output order + partitioned table") {
val cols = Seq("c1", "c2", "c3", "c4")
val df = Seq((1, 2, 3, 4)).toDF(cols: _*)
withTable("t1") {
createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2))
Seq(false, true).foreach { m =>
processInsert("t1", df, cols, overwrite = m)
verifyTable("t1", df)
}
}
withTable("t1") {
createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2))
Seq(false, true).foreach { m =>
processInsert(
"t1", df.selectExpr("c1", "c2"), cols.take(2), Seq("c3=3", "c4=4"), overwrite = m)
verifyTable("t1", df)
}
}
withTable("t1") {
createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2))
Seq(false, true).foreach { m =>
processInsert("t1", df.selectExpr("c1", "c2", "c4"),
cols.filterNot(_ == "c3"), Seq("c3=3", "c4"), overwrite = m)
verifyTable("t1", df)
}
}
}
test("insert with column list - table output reorder") {
withTable("t1") {
val cols = Seq("c1", "c2", "c3")
val df = Seq((1, 2, 3)).toDF(cols: _*)
createTable("t1", cols, Seq("int", "int", "int"))
Seq(false, true).foreach { m =>
processInsert("t1", df, cols.reverse, overwrite = m)
verifyTable("t1", df.selectExpr(cols.reverse: _*))
}
}
}
test("insert with column list - table output reorder + partitioned table") {
val cols = Seq("c1", "c2", "c3", "c4")
val df = Seq((1, 2, 3, 4)).toDF(cols: _*)
withTable("t1") {
createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2))
Seq(false, true).foreach { m =>
processInsert("t1", df, cols.reverse, overwrite = m)
verifyTable("t1", df.selectExpr(cols.reverse: _*))
}
}
withTable("t1") {
createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2))
Seq(false, true).foreach { m =>
processInsert(
"t1", df.selectExpr("c1", "c2"), cols.take(2).reverse, Seq("c3=3", "c4=4"), overwrite = m)
verifyTable("t1", df.selectExpr("c2", "c1", "c3", "c4"))
}
}
withTable("t1") {
createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2))
Seq(false, true).foreach { m =>
processInsert("t1",
df.selectExpr("c1", "c2", "c4"), Seq("c4", "c2", "c1"), Seq("c3=3", "c4"), overwrite = m)
verifyTable("t1", df.selectExpr("c4", "c2", "c3", "c1"))
}
}
}
test("insert with column list - duplicated columns") {
withTable("t1") {
val cols = Seq("c1", "c2", "c3")
createTable("t1", cols, Seq("int", "long", "string"))
val e1 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1, c2, c2) values(1, 2, 3)"))
assert(e1.getMessage.contains("Found duplicate column(s) in the column list: `c2`"))
}
}
test("insert with column list - invalid columns") {
withTable("t1") {
val cols = Seq("c1", "c2", "c3")
createTable("t1", cols, Seq("int", "long", "string"))
val e1 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1, c2, c4) values(1, 2, 3)"))
assert(e1.getMessage.contains("Cannot resolve column name c4"))
}
}
test("insert with column list - mismatched column list size") {
val msg = "Cannot write to table due to mismatched user specified column size"
withTable("t1") {
val cols = Seq("c1", "c2", "c3")
createTable("t1", cols, Seq("int", "long", "string"))
val e1 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1, c2) values(1, 2, 3)"))
assert(e1.getMessage.contains(msg))
val e2 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1, c2, c3) values(1, 2)"))
assert(e2.getMessage.contains(msg))
}
}
test("insert with column list - mismatched target table out size after rewritten query") {
val v2Msg = "Cannot write to 'testcat.t1', not enough data columns:"
val cols = Seq("c1", "c2", "c3", "c4")
withTable("t1") {
createTable("t1", cols, Seq.fill(4)("int"))
val e1 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1) values(1)"))
assert(e1.getMessage.contains("target table has 4 column(s) but the inserted data has 1") ||
e1.getMessage.contains(v2Msg))
}
withTable("t1") {
createTable("t1", cols, Seq.fill(4)("int"), cols.takeRight(2))
val e1 = intercept[AnalysisException] {
sql(s"INSERT INTO t1 partition(c3=3, c4=4) (c1) values(1)")
}
assert(e1.getMessage.contains("target table has 4 column(s) but the inserted data has 3") ||
e1.getMessage.contains(v2Msg))
}
}
test("SPARK-34223: static partition with null raise NPE") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c string) USING PARQUET PARTITIONED BY (c)")
sql("INSERT OVERWRITE t PARTITION (c=null) VALUES ('1')")
checkAnswer(spark.table("t"), Row("1", null))
}
}
test("SPARK-33474: Support typed literals as partition spec values") {
withTable("t1") {
val binaryStr = "Spark SQL"
val binaryHexStr = Hex.hex(UTF8String.fromString(binaryStr).getBytes).toString
sql(
"""
| CREATE TABLE t1(name STRING, part1 DATE, part2 TIMESTAMP, part3 BINARY,
| part4 STRING, part5 STRING, part6 STRING, part7 STRING)
| USING PARQUET PARTITIONED BY (part1, part2, part3, part4, part5, part6, part7)
""".stripMargin)
sql(
s"""
| INSERT OVERWRITE t1 PARTITION(
| part1 = date'2019-01-01',
| part2 = timestamp'2019-01-01 11:11:11',
| part3 = X'$binaryHexStr',
| part4 = 'p1',
| part5 = date'2019-01-01',
| part6 = timestamp'2019-01-01 11:11:11',
| part7 = X'$binaryHexStr'
| ) VALUES('a')
""".stripMargin)
checkAnswer(sql(
"""
| SELECT
| name,
| CAST(part1 AS STRING),
| CAST(part2 as STRING),
| CAST(part3 as STRING),
| part4,
| part5,
| part6,
| part7
| FROM t1
""".stripMargin),
Row("a", "2019-01-01", "2019-01-01 11:11:11", "Spark SQL", "p1",
"2019-01-01", "2019-01-01 11:11:11", "Spark SQL"))
val e = intercept[AnalysisException] {
sql("CREATE TABLE t2(name STRING, part INTERVAL) USING PARQUET PARTITIONED BY (part)")
}.getMessage
assert(e.contains("Cannot use interval"))
}
}
test("SPARK-34556: " +
"checking duplicate static partition columns should respect case sensitive conf") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c string) USING PARQUET PARTITIONED BY (c)")
val e = intercept[AnalysisException] {
sql("INSERT OVERWRITE t PARTITION (c='2', C='3') VALUES (1)")
}
assert(e.getMessage.contains("Found duplicate keys 'c'"))
}
// The following code is skipped for Hive because columns stored in Hive Metastore is always
// case insensitive and we cannot create such table in Hive Metastore.
if (!format.startsWith("hive")) {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withTable("t") {
sql(s"CREATE TABLE t(i int, c string, C string) USING PARQUET PARTITIONED BY (c, C)")
sql("INSERT OVERWRITE t PARTITION (c='2', C='3') VALUES (1)")
checkAnswer(spark.table("t"), Row(1, "2", "3"))
}
}
}
}
test("SPARK-30844: static partition should also follow StoreAssignmentPolicy") {
val testingPolicies = if (format == "foo") {
// DS v2 doesn't support the legacy policy
Seq(SQLConf.StoreAssignmentPolicy.ANSI, SQLConf.StoreAssignmentPolicy.STRICT)
} else {
SQLConf.StoreAssignmentPolicy.values
}
def shouldThrowException(policy: SQLConf.StoreAssignmentPolicy.Value): Boolean = policy match {
case SQLConf.StoreAssignmentPolicy.ANSI | SQLConf.StoreAssignmentPolicy.STRICT =>
true
case SQLConf.StoreAssignmentPolicy.LEGACY =>
false
}
testingPolicies.foreach { policy =>
withSQLConf(SQLConf.STORE_ASSIGNMENT_POLICY.key -> policy.toString) {
withTable("t") {
sql("create table t(a int, b string) using parquet partitioned by (a)")
if (shouldThrowException(policy)) {
val errorMsg = intercept[NumberFormatException] {
sql("insert into t partition(a='ansi') values('ansi')")
}.getMessage
assert(errorMsg.contains("invalid input syntax for type numeric: ansi"))
} else {
sql("insert into t partition(a='ansi') values('ansi')")
checkAnswer(sql("select * from t"), Row("ansi", null) :: Nil)
}
}
}
}
}
test("SPARK-38228: legacy store assignment should not fail on error under ANSI mode") {
// DS v2 doesn't support the legacy policy
if (format != "foo") {
Seq(true, false).foreach { ansiEnabled =>
withSQLConf(
SQLConf.STORE_ASSIGNMENT_POLICY.key -> SQLConf.StoreAssignmentPolicy.LEGACY.toString,
SQLConf.ANSI_ENABLED.key -> ansiEnabled.toString) {
withTable("t") {
sql("create table t(a int) using parquet")
sql("insert into t values('ansi')")
checkAnswer(spark.table("t"), Row(null))
}
}
}
}
}
}
class FileSourceSQLInsertTestSuite extends SQLInsertTestSuite with SharedSparkSession {
override def format: String = "parquet"
override protected def sparkConf: SparkConf = {
super.sparkConf.set(SQLConf.USE_V1_SOURCE_LIST, format)
}
}
class DSV2SQLInsertTestSuite extends SQLInsertTestSuite with SharedSparkSession {
override def format: String = "foo"
protected override def sparkConf: SparkConf = {
super.sparkConf
.set("spark.sql.catalog.testcat", classOf[InMemoryPartitionTableCatalog].getName)
.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
}
}
|
ueshin/apache-spark
|
sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala
|
Scala
|
apache-2.0
| 13,009
|
package com.wavesplatform.lang.v1
import cats.Id
import cats.syntax.applicative._
import com.wavesplatform.lang.v1.compiler.Terms._
import com.wavesplatform.lang.v1.task.TaskM
import com.wavesplatform.lang.v1.task.imports._
import scala.annotation.tailrec
import scala.collection.mutable.Queue
package object compiler {
type CompileM[A] = TaskM[CompilerContext, CompilationError, A]
implicit class EiExt[A](ei: Either[CompilationError, A]) {
def toCompileM: CompileM[A] =
ei.fold(
raiseError[Id, CompilerContext, CompilationError, A],
_.pure[CompileM]
)
}
def containsBlockV2(e: EXPR): Boolean = {
@tailrec
def horTraversal(queue: Queue[EXPR]): Boolean = {
queue.headOption match {
case Some(expr) =>
expr match {
case BLOCK(_, _) => true
case GETTER(expr1, _) => horTraversal(queue.tail += expr1)
case LET_BLOCK(let, body) => horTraversal(queue.tail ++ Queue(let.value, body))
case IF(expr1, expr2, expr3) => horTraversal(queue.tail ++ Queue(expr1, expr2, expr3))
case FUNCTION_CALL(_, exprList) => horTraversal(queue.tail ++ exprList)
case _ => false
}
case None => false
}
}
horTraversal(Queue(e))
}
def containsArray(e: EXPR): Boolean = {
@tailrec
def horTraversal(queue: Queue[EXPR]): Boolean = {
queue.headOption match {
case Some(expr) =>
expr match {
case ARR(_) => true
case BLOCK(let: LET, body) => horTraversal(queue.tail ++ Queue(let.value, body))
case BLOCK(func: FUNC, body) => horTraversal(queue.tail ++ Queue(func.body, body))
case LET_BLOCK(let, body) => horTraversal(queue.tail ++ Queue(let.value, body))
case GETTER(expr1, _) => horTraversal(queue.tail += expr1)
case IF(expr1, expr2, expr3) => horTraversal(queue.tail ++ Queue(expr1, expr2, expr3))
case FUNCTION_CALL(_, exprList) => horTraversal(queue.tail ++ exprList)
case _ => false
}
case None => false
}
}
horTraversal(Queue(e))
}
/**
* (a1,...,an),...,(z1,...,zk)
* ||
* \\/
* (a1,...,z1),...,(a1,...,zk),...,(an,...,zk)
*
* regroup(
* List(
* List(1, 2),
* List("a", "b", "c")
* )
* ) =
* List(
* List(1, "a"),
* List(2, "a"),
* List(1, "b"),
* List(2, "b"),
* List(1, "c"),
* List(2, "c")
* )
*/
def regroup[A](listOfLists: Seq[Seq[A]]): Seq[Seq[A]] = {
def combine(acc: Seq[Seq[A]], next: Seq[A]): Seq[Seq[A]] =
if (acc.isEmpty)
next.map(Seq(_))
else
for {
a <- acc
b <- next
} yield a :+ b
listOfLists.foldLeft(Seq.empty[Seq[A]])(combine)
}
}
|
wavesplatform/Waves
|
lang/shared/src/main/scala/com/wavesplatform/lang/v1/compiler/package.scala
|
Scala
|
mit
| 3,020
|
package sma.eventsourcing
import akka.actor.{Actor, ActorLogging, PoisonPill}
trait Particle extends Actor with ActorLogging {
def logStarting = log.info(s"--> [${self.path.name}] actor is starting")
def logStopped = log.info(s"--> [${self.path.name}] actor stopped")
def logReceiving(text: String) = log.info(s"--> [${self.path.name}] actor received ${text}")
def logUnknown = log.error(s"--> [${self.path.name}] actor received an unknown message")
def suicide = {
log.error(s"--> [${self.path.name}] stopping the actor")
self ! PoisonPill
}
override def unhandled(message: Any): Unit = {
logUnknown
suicide
}
override def postStop(): Unit = logStopped
}
|
eduardo-lago-aguilar/sma
|
src/main/scala/sma/eventsourcing/Particle.scala
|
Scala
|
mit
| 700
|
package com.shalloui.tblite.demo.data
/**
* Created by a.reisberg on 8/30/2016.
*/
sealed trait Company
case class Department(name: String, employeeIds: List[String]) extends Company
case class Employee(_id: String, name: String, age: Int, address: Address) extends Company
case class Address(city: String, zip: String)
|
a-reisberg/typebase-lite
|
tbljavademo/src/main/scala/com/shalloui/tblite/demo/data/Company.scala
|
Scala
|
apache-2.0
| 327
|
/* Copyright (C) 2016 Tomáš Janoušek
* This file is a part of locus-rflkt-addon.
* See the COPYING and LICENSE files in the project root directory.
*/
package cz.nomi.locusRflktAddon
import android.content.SharedPreferences
import android.preference._
import android.support.v7.app.AppCompatActivity
import android.widget.{LinearLayout, TextView}
import Log._
import display.Pages.{ConfPage, ConfPageNav, ConfPageNotif,
ConfPage1x3, ConfPage2x2, Conf1x3, Conf2x2}
class Settings extends AppCompatActivity
with RActivity with BackToParentActivity
{
import macroid._
onCreate {
logger.info("Settings: onCreate")
setContentView {
import scala.language.postfixOps
import macroid.FullDsl._
import macroid.contrib.LpTweaks.matchWidth
Ui.get {
l[LinearLayout](
w[TextView] <~ text("(need reconnect to take effect)") <~ matchWidth <~ center <~ padding(all = 3 dp),
f[SettingsFragment].framed(Gen.Id.settings, Gen.Tag.settings)
) <~ vertical
}
}
}
private def center: Tweak[TextView] = {
import android.view.Gravity
Tweak[TextView](_.setGravity(Gravity.CENTER_HORIZONTAL))
}
}
class SettingsFragment extends PreferenceFragment with RFragment {
onCreate {
val root = getPreferenceManager().createPreferenceScreen(getActivity())
setPreferenceScreen(root)
ButtonSettings.addToGroup(this, root); ()
PageSettings.addToGroup(this, root); ()
}
}
object ButtonSettings extends SettingCategory with Setting2x2 {
lazy val prefix = "allPages.buttons"
lazy val title = "RFLKT button functions"
import display.Const.{Function => F}
lazy val entries = Seq(
"Previous page" -> F.hwPageLeft,
"Next page" -> F.hwPageRight,
"Start/pause track recording" -> F.startStopWorkout,
"Backlight for 5 seconds" -> F.backlight
)
lazy val northWestDef = F.startStopWorkout
lazy val northEastDef = F.backlight
lazy val southWestDef = F.hwPageLeft
lazy val southEastDef = F.hwPageRight
}
object PageSettings extends SettingCategory with SettingValue[Seq[ConfPage]] {
import display.Const.{Page => P}
lazy val title = "RFLKT pages"
lazy val pages =
(1 to 4).map(new SettingWidgetPage(_)) :+
new SettingNavPage :+
new SettingNotifPage
override def addPreferences(pf: PreferenceFragment,
group: PreferenceGroup): Seq[Preference] =
super.addPreferences(pf, group) ++
pages.map(_.addToGroup(pf, group))
def getValue(pref: SharedPreferences): Seq[ConfPage] =
pages.map(_.getValue(pref)).flatten
}
class SettingNavPage extends SettingScreen with SettingValue[Option[ConfPageNav]] {
lazy val title = "Navigation page"
lazy val enabled =
SwitchPref("navigationPage.enabled", "Enabled",
"(loading pages faster if disabled)", true)
lazy val notReduced =
SwitchPref("navigationPage.notReduced", "Full icons",
"(loading pages faster if disabled)", false)
lazy val autoSwitch =
SwitchPref("navigationPage.autoSwitch", "Autoswitch",
"(show navigation 100 meters before turn)", true)
override def addPreferences(pf: PreferenceFragment,
group: PreferenceGroup): Seq[Preference] = {
val sup = super.addPreferences(pf, group)
val switch = enabled.addToGroup(pf, group)
val other = Seq(
notReduced.addToGroup(pf, group),
autoSwitch.addToGroup(pf, group)
)
switch.setDisableDependentsState(false)
other.foreach(_.setDependency(switch.getKey()))
sup ++: switch +: other
}
def getValue(pref: SharedPreferences): Option[ConfPageNav] =
if (enabled.getValue(pref))
Some(new ConfPageNav(
reduced = !notReduced.getValue(pref),
autoSwitch = autoSwitch.getValue(pref)
))
else
None
}
class SettingNotifPage extends Setting with SettingValue[Option[ConfPageNotif]] {
lazy val enabled =
SwitchPref("notificationPage.enabled", "Notification page",
"(loading pages faster if disabled)", true)
def addToGroup(pf: PreferenceFragment, root: PreferenceGroup): Preference =
enabled.addToGroup(pf, root)
def getValue(pref: SharedPreferences): Option[ConfPageNotif] =
if (enabled.getValue(pref)) Some(new ConfPageNotif()) else None
}
class SettingWidgetPage(number: Int) extends SettingScreen with SettingValue[Option[ConfPage]] {
lazy val title = s"Page $number"
lazy val enabled =
if (number == 1)
ConstPref(true)
else
SwitchPref(s"pages.$number.enabled", "Enabled", null, false)
lazy val templateEntries = Seq(
"top and 2 × 2 widgets" -> "2x2",
"top and 1 × 3 widgets" -> "1x3"
)
lazy val templateDef = "2x2"
lazy val template = ListPref(s"pages.$number.template",
"Template", templateEntries, templateDef)
def widgets(t: String) = t match {
case "2x2" => new SettingPage2x2(number)
case "1x3" => new SettingPage1x3(number)
case _ => ???
}
override def addPreferences(pf: PreferenceFragment, group: PreferenceGroup): Seq[Preference] = {
val sup = super.addPreferences(pf, group)
val switch = enabled.addToGroup(pf, group)
val templ = template.addToGroup(pf, group)
val t = template.getValue(templ.getSharedPreferences())
val widgetGroup = widgets(t).addToGroup(pf, group)
onPrefChange(templ) { newTemplate: String =>
widgetGroup.removeAll()
widgets(newTemplate).addPreferences(pf, widgetGroup)
}
if (switch == null) { // first page
sup :+ templ :+ widgetGroup
} else {
switch.setDisableDependentsState(false)
templ.setDependency(switch.getKey())
widgetGroup.setDependency(switch.getKey())
sup :+ switch :+ templ :+ widgetGroup
}
}
override def getValue(pref: SharedPreferences) =
if (enabled.getValue(pref))
Some(widgets(template.getValue(pref)).getValue(pref))
else
None
private def onPrefChange[T](pref: Preference)(f: T => Unit) =
pref.setOnPreferenceChangeListener {
new Preference.OnPreferenceChangeListener {
def onPreferenceChange(pref: Preference, newValue: Any): Boolean = {
f(newValue.asInstanceOf[T])
true
}
}
}
}
class SettingPage1x3(number: Int) extends SettingPageWidgets(number)
with SettingNorth with Setting1x3
{
import display.Const.{Widget => W}
lazy val entries =
display.Pages.unitWidgets.map(w => w.description -> w.key)
lazy val line1Def = W.speedCurrent
lazy val line2Def = W.averageSpeedWorkout
lazy val line3Def = W.averageMovingSpeedWorkout
override def getValue(pref: SharedPreferences) =
new ConfPage1x3(
s"PAGE$number",
north.getValue(pref),
super.getValue(pref))
}
class SettingPage2x2(number: Int) extends SettingPageWidgets(number)
with SettingNorth with Setting2x2
{
import display.Const.{Widget => W}
lazy val entries =
display.Pages.unitWidgets.map(w => w.description -> w.key)
lazy val northWestDef = W.speedCurrent
lazy val northEastDef = W.distanceWorkout
lazy val southWestDef = W.cadenceCurrent
lazy val southEastDef = W.heartRateCurrent
override def getValue(pref: SharedPreferences) =
new ConfPage2x2(
s"PAGE$number",
north.getValue(pref),
super.getValue(pref))
}
trait SettingNorth extends SettingPageWidgets {
import display.Const.{Widget => W}
lazy val northEntries = Seq(
"Clock" -> W.clock,
"Time – total (workout)" -> W.timeWorkout,
"Time – moving (workout)" -> W.timeMovingWorkout
)
lazy val northDef = W.clock
lazy val north =
ListPref(s"$prefix.north", "top", northEntries, northDef)
override def addPreferences(pf: PreferenceFragment, group: PreferenceGroup): Seq[Preference] =
super.addPreferences(pf, group) :+
north.addToGroup(pf, group)
}
trait Setting1x3 extends SettingGroup with SettingValue[Conf1x3] {
def prefix: String
def entries: Seq[(String, String)]
def line1Def: String
def line2Def: String
def line3Def: String
private lazy val line1 =
ListPref(s"$prefix.line1", "line 1", entries, line1Def)
private lazy val line2 =
ListPref(s"$prefix.line2", "line 2", entries, line2Def)
private lazy val line3 =
ListPref(s"$prefix.line3", "line 3", entries, line3Def)
override def addPreferences(pf: PreferenceFragment,
group: PreferenceGroup): Seq[Preference] =
super.addPreferences(pf, group) ++
Seq(
line1.addToGroup(pf, group),
line2.addToGroup(pf, group),
line3.addToGroup(pf, group)
)
def getValue(pref: SharedPreferences) =
new Conf1x3(
line1.getValue(pref),
line2.getValue(pref),
line3.getValue(pref)
)
}
trait Setting2x2 extends SettingGroup with SettingValue[Conf2x2] {
def prefix: String
def entries: Seq[(String, String)]
def northWestDef: String
def northEastDef: String
def southWestDef: String
def southEastDef: String
private lazy val northWest =
ListPref(s"$prefix.northWest", "top left", entries, northWestDef)
private lazy val northEast =
ListPref(s"$prefix.northEast", "top right", entries, northEastDef)
private lazy val southWest =
ListPref(s"$prefix.southWest", "bottom left", entries, southWestDef)
private lazy val southEast =
ListPref(s"$prefix.southEast", "bottom right", entries, southEastDef)
override def addPreferences(pf: PreferenceFragment,
group: PreferenceGroup): Seq[Preference] =
super.addPreferences(pf, group) ++
Seq(
northWest.addToGroup(pf, group),
northEast.addToGroup(pf, group),
southWest.addToGroup(pf, group),
southEast.addToGroup(pf, group)
)
def getValue(pref: SharedPreferences) =
new Conf2x2(
northWest.getValue(pref),
northEast.getValue(pref),
southWest.getValue(pref),
southEast.getValue(pref)
)
}
abstract class SettingPageWidgets(number: Int) extends SettingCategory {
lazy val prefix = s"pages.$number.widgets"
lazy val title = "Widgets"
}
abstract class SettingCategory extends SettingGroup {
def createGroup(pf: PreferenceFragment): PreferenceGroup = {
val cat = new PreferenceCategory(pf.getActivity())
cat.setTitle(title)
cat
}
}
abstract class SettingScreen extends SettingGroup {
def createGroup(pf: PreferenceFragment): PreferenceGroup = {
val screen = pf.getPreferenceManager().createPreferenceScreen(pf.getActivity())
screen.setTitle(title)
screen
}
}
abstract class SettingGroup extends Setting {
def title: String
def createGroup(pf: PreferenceFragment): PreferenceGroup
def addPreferences(pf: PreferenceFragment,
group: PreferenceGroup): Seq[Preference] = Seq()
def addToGroup(pf: PreferenceFragment, root: PreferenceGroup): PreferenceGroup = {
val group = createGroup(pf)
root.addPreference(group)
addPreferences(pf, group)
group
}
}
case class ListPref(key: String, title: String,
entries: Seq[(String, String)], default: String)
extends SettingWidget[ListPreference] with SettingPrefValue[String]
{
protected def preference(pf: PreferenceFragment): ListPreference =
new ListPreference(pf.getActivity()) {
setKey(key)
setTitle(title)
setSummary("%s")
setEntries(entries.map(_._1: CharSequence).toArray)
setEntryValues(entries.map(_._2: CharSequence).toArray)
setDefaultValue(default)
override protected def onSetInitialValue(restore: Boolean, any: Any) {
if (restore) {
getPersistedString(default) match {
case v if validValues(v) => setValue(v)
case _ => setValue(default)
}
} else {
setValue(default)
}
}
}
override def getValue(pref: SharedPreferences): String =
super.getValue(pref) match {
case v if validValues(v) => v
case _ => default
}
private lazy val validValues: Set[String] = entries.map(_._2).toSet
}
case class SwitchPref(key: String, title: String, summary: String, default: Boolean)
extends SettingWidget[SwitchPreference] with SettingPrefValue[Boolean]
{
protected def preference(pf: PreferenceFragment): SwitchPreference =
new SwitchPreference(pf.getActivity()) {
setKey(key)
setTitle(title)
setSummary(summary)
setDefaultValue(default)
}
}
case class ConstPref[T](default: T)
extends SettingWidget[Null] with SettingValue[T]
{
protected def preference(pf: PreferenceFragment): Null = null
def getValue(pref: SharedPreferences): T = default
}
trait SettingWidget[P <: Preference] extends Setting {
protected def preference(pf: PreferenceFragment): P
def addToGroup(pf: PreferenceFragment, root: PreferenceGroup): P = {
val widget = preference(pf)
if (widget != null) root.addPreference(widget)
widget
}
}
trait SettingPrefValue[T] extends SettingValue[T] {
def key: String
def default: T
def getValue(pref: SharedPreferences): T =
Preferences.preferenceVar(key, default)(pref)
}
trait SettingValue[T] extends Setting {
def getValue(pref: SharedPreferences): T
}
abstract class Setting {
def addToGroup(pf: PreferenceFragment, root: PreferenceGroup): Preference
}
|
liskin/locus-rflkt-addon
|
src/main/scala/cz/nomi/locusRflktAddon/Settings.scala
|
Scala
|
gpl-3.0
| 13,116
|
package com.avsystem.commons
package redis.commands
import com.avsystem.commons.misc.{Opt => _, OptArg => _, _}
import com.avsystem.commons.redis.CommandEncoder.CommandArg
import com.avsystem.commons.redis._
import com.avsystem.commons.redis.commands.ReplyDecoders._
import com.avsystem.commons.redis.protocol.SimpleStringMsg
import scala.collection.compat._
import scala.collection.mutable
trait KeyedClusterApi extends ApiSubset {
def keySlot(key: Key): Int =
Hash.slot(keyCodec.write(key))
/** Executes [[http://redis.io/commands/cluster-keyslot CLUSTER KEYSLOT]] */
def clusterKeyslot(key: Key): Result[Int] =
execute(new ClusterKeyslot(key))
private final class ClusterKeyslot(key: Key) extends RedisIntCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "KEYSLOT").key(key).result
}
}
trait NodeClusterApi extends KeyedClusterApi {
/** Executes [[http://redis.io/commands/cluster-addslots CLUSTER ADDSLOTS]] */
def clusterAddslots(slot: Int, slots: Int*): Result[Unit] =
execute(new ClusterAddslots(slot +:: slots))
/** Executes [[http://redis.io/commands/cluster-addslots CLUSTER ADDSLOTS]]
* or does nothing when `slots` is empty. */
def clusterAddslots(slots: Iterable[Int]): Result[Unit] =
execute(new ClusterAddslots(slots))
/** Executes [[http://redis.io/commands/cluster-bumpepoch CLUSTER BUMPEPOCH]] */
def clusterBumpepoch: Result[BumpepochResult] =
execute(ClusterBumpepoch)
/** Executes [[http://redis.io/commands/cluster-count-failure-reports CLUSTER COUNT-FAILURE-REPORTS]] */
def clusterCountFailureReports(nodeId: NodeId): Result[Long] =
execute(new ClusterCountFailureReports(nodeId))
/** Executes [[http://redis.io/commands/cluster-countkeysinslot CLUSTER COUNTKEYSINSLOT]] */
def clusterCountkeysinslot(slot: Int): Result[Long] =
execute(new ClusterCountkeysinslot(slot))
/** Executes [[http://redis.io/commands/cluster-delslots CLUSTER DELSLOTS]] */
def clusterDelslots(slot: Int, slots: Int*): Result[Unit] =
execute(new ClusterDelslots(slot +:: slots))
/** Executes [[http://redis.io/commands/cluster-delslots CLUSTER DELSLOTS]]
* or does nothing when `slots` is empty */
def clusterDelslots(slots: Iterable[Int]): Result[Unit] =
execute(new ClusterDelslots(slots))
/** Executes [[http://redis.io/commands/cluster-failover CLUSTER FAILOVER]] */
def clusterFailover: Result[Unit] = clusterFailover()
/** Executes [[http://redis.io/commands/cluster-failover CLUSTER FAILOVER]] */
def clusterFailover(option: OptArg[FailoverOption] = OptArg.Empty): Result[Unit] =
execute(new ClusterFailover(option.toOpt))
/** Executes [[http://redis.io/commands/cluster-flushslots CLUSTER FLUSHSLOTS]] */
def clusterFlushslots: Result[Unit] =
execute(ClusterFlushslots)
/** Executes [[http://redis.io/commands/cluster-forget CLUSTER FORGET]] */
def clusterForget(nodeId: NodeId): Result[Unit] =
execute(new ClusterForget(nodeId))
/** Executes [[http://redis.io/commands/cluster-getkeysinslot CLUSTER GETKEYSINSLOT]] */
def clusterGetkeysinslot(slot: Int, count: Int): Result[Seq[Key]] =
execute(new ClusterGetkeysinslot(slot, count))
/** Executes [[http://redis.io/commands/cluster-info CLUSTER INFO]] */
def clusterInfo: Result[ClusterStateInfo] =
execute(ClusterInfo)
/** Executes [[http://redis.io/commands/cluster-meet CLUSTER MEET]] */
def clusterMeet(address: NodeAddress): Result[Unit] =
execute(new ClusterMeet(address))
/** Executes [[http://redis.io/commands/cluster-myid CLUSTER MYID]] */
def clusterMyid: Result[NodeId] =
execute(ClusterMyid)
/** Executes [[http://redis.io/commands/cluster-nodes CLUSTER NODES]] */
def clusterNodes: Result[Seq[NodeInfo]] =
execute(ClusterNodes)
/** Executes [[http://redis.io/commands/cluster-replicas CLUSTER REPLICAS]] */
def clusterReplicas(nodeId: NodeId): Result[Seq[NodeInfo]] =
execute(new ClusterReplicas(nodeId))
/** Executes [[http://redis.io/commands/cluster-replicate CLUSTER REPLICATE]] */
def clusterReplicate(nodeId: NodeId): Result[Unit] =
execute(new ClusterReplicate(nodeId))
/** Executes [[http://redis.io/commands/cluster-reset CLUSTER RESET]] */
def clusterReset: Result[Unit] = clusterReset()
/** Executes [[http://redis.io/commands/cluster-reset CLUSTER RESET]] */
def clusterReset(hard: Boolean = false): Result[Unit] =
execute(new ClusterReset(hard))
/** Executes [[http://redis.io/commands/cluster-saveconfig CLUSTER SAVECONFIG]] */
def clusterSaveconfig: Result[Unit] =
execute(ClusterSaveconfig)
/** Executes [[http://redis.io/commands/cluster-set-config-epoch CLUSTER SET-CONFIG-EPOCH]] */
def clusterSetConfigEpoch(configEpoch: Long): Result[Unit] =
execute(new ClusterSetConfigEpoch(configEpoch))
/** Executes [[http://redis.io/commands/cluster-setslot CLUSTER SETSLOT]] */
def clusterSetslot(slot: Int, subcommand: SetslotCmd): Result[Unit] =
execute(new ClusterSetslot(slot, subcommand))
/** Executes [[http://redis.io/commands/cluster-slaves CLUSTER SLAVES]] */
def clusterSlaves(nodeId: NodeId): Result[Seq[NodeInfo]] =
execute(new ClusterSlaves(nodeId))
/** Executes [[http://redis.io/commands/cluster-slots CLUSTER SLOTS]] */
def clusterSlots: Result[Seq[SlotRangeMapping]] =
execute(ClusterSlots)
private final class ClusterAddslots(slots: Iterable[Int]) extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "ADDSLOTS").add(slots).result
override def immediateResult: Opt[Unit] = whenEmpty(slots, ())
}
private object ClusterBumpepoch extends AbstractRedisCommand[BumpepochResult](simpleBumpepochResult) with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "BUMPEPOCH").result
}
private final class ClusterCountFailureReports(nodeId: NodeId) extends RedisLongCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "COUNT-FAILURE-REPORTS").add(nodeId.raw).result
}
private final class ClusterCountkeysinslot(slot: Int) extends RedisLongCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "COUNTKEYSINSLOT").add(slot).result
}
private final class ClusterDelslots(slots: Iterable[Int]) extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "DELSLOTS").add(slots).result
override def immediateResult: Opt[Unit] = whenEmpty(slots, ())
}
private final class ClusterFailover(option: Opt[FailoverOption]) extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "FAILOVER").optAdd(option).result
}
private object ClusterFlushslots extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "FLUSHSLOTS").result
}
private final class ClusterForget(nodeId: NodeId) extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "FORGET").add(nodeId.raw).result
}
private final class ClusterGetkeysinslot(slot: Int, count: Int) extends RedisDataSeqCommand[Key] with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "GETKEYSINSLOT").add(slot).add(count).result
}
private object ClusterInfo
extends AbstractRedisCommand[ClusterStateInfo](bulk(bs => ClusterStateInfo(bs.utf8String))) with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "INFO").result
}
private final class ClusterMeet(address: NodeAddress) extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "MEET").add(address.ip).add(address.port).result
}
private final object ClusterMyid extends AbstractRedisCommand[NodeId](bulkAsNodeId) with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "MYID").result
}
private object ClusterNodes extends AbstractRedisCommand[Seq[NodeInfo]](bulkAsNodeInfos) with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "NODES").result
}
private final class ClusterReplicas(nodeId: NodeId) extends AbstractRedisCommand[Seq[NodeInfo]](multiBulkAsNodeInfos) with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "REPLICAS").add(nodeId.raw).result
}
private final class ClusterReplicate(nodeId: NodeId) extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "REPLICATE").add(nodeId.raw).result
}
private final class ClusterReset(hard: Boolean) extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "RESET").addFlag("HARD", hard).result
}
private object ClusterSaveconfig extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "SAVECONFIG").result
}
private final class ClusterSetConfigEpoch(configEpoch: Long) extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "SET-CONFIG-EPOCH").add(configEpoch).result
}
private final class ClusterSetslot(slot: Int, subcommand: SetslotCmd) extends RedisUnitCommand with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "SETSLOT").add(slot).add(subcommand).result
}
private final class ClusterSlaves(nodeId: NodeId) extends AbstractRedisCommand[Seq[NodeInfo]](multiBulkAsNodeInfos) with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "SLAVES").add(nodeId.raw).result
}
private object ClusterSlots
extends RedisSeqCommand[SlotRangeMapping](multiBulkAsSlotRangeMapping) with NodeCommand {
val encoded: Encoded = encoder("CLUSTER", "SLOTS").result
}
}
trait ConnectionClusterApi extends NodeClusterApi {
/** Executes [[http://redis.io/commands/readonly READONLY]] */
def readonly: Result[Unit] =
execute(Readonly)
/** Executes [[http://redis.io/commands/readwrite READWRITE]] */
def readwrite: Result[Unit] =
execute(Readwrite)
private object Readonly extends RedisUnitCommand with ConnectionCommand {
val encoded: Encoded = encoder("READONLY").result
}
private object Readwrite extends RedisUnitCommand with ConnectionCommand {
val encoded: Encoded = encoder("READWRITE").result
}
}
case object Asking extends UnsafeCommand {
val encoded: Encoded = encoder("ASKING").result
}
case class NodeId(raw: String) extends AnyVal
sealed abstract class FailoverOption(val name: String) extends NamedEnum
object FailoverOption extends NamedEnumCompanion[FailoverOption] {
case object Force extends FailoverOption("FORCE")
case object Takeover extends FailoverOption("TAKEOVER")
val values: List[FailoverOption] = caseObjects
}
sealed trait SetslotCmd
object SetslotCmd {
case class Migrating(destinationNodeId: NodeId) extends SetslotCmd
case class Importing(sourceNodeId: NodeId) extends SetslotCmd
case object Stable extends SetslotCmd
case class Node(nodeId: NodeId) extends SetslotCmd
implicit val SubcommandCommandArg: CommandArg[SetslotCmd] =
CommandArg((encoder, arg) => arg match {
case Migrating(NodeId(nodeId)) => encoder.add("MIGRATING").add(nodeId)
case Importing(NodeId(nodeId)) => encoder.add("IMPORTING").add(nodeId)
case Stable => encoder.add("STABLE")
case Node(NodeId(nodeId)) => encoder.add("NODE").add(nodeId)
})
}
case class ClusterStateInfo(info: String) extends ParsedInfo(info, "\\r\\n", ":") {
val stateOk: Boolean = attrMap("cluster_state") == "ok"
val slotsAssigned: Int = attrMap("cluster_slots_assigned").toInt
val slotsOk: Int = attrMap("cluster_slots_ok").toInt
val slotsPfail: Int = attrMap("cluster_slots_pfail").toInt
val slotsFail: Int = attrMap("cluster_slots_fail").toInt
val knownNodes: Int = attrMap("cluster_known_nodes").toInt
val size: Int = attrMap("cluster_size").toInt
val currentEpoch: Long = attrMap("cluster_current_epoch").toLong
val myEpoch: Long = attrMap("cluster_my_epoch").toLong
val statsMessagesSent: Long = attrMap("cluster_stats_messages_sent").toLong
val statsMessagesReceived: Long = attrMap("cluster_stats_messages_received").toLong
}
case class NodeInfo(infoLine: String) {
private val splitLine: Array[String] = infoLine.split(' ')
private val splitAddr: Array[String] = splitLine(1).split('@')
val id: NodeId = NodeId(splitLine(0))
val address: NodeAddress = NodeAddress.parse(splitAddr(0))
val clusterPort: Opt[String] = splitAddr.opt.filter(_.length > 1).map(_.apply(1))
val flags: NodeFlags = NodeFlags(splitLine(2))
val master: Opt[NodeId] = Opt(splitLine(3)).filter(_ != "-").map(NodeId)
val pingSent: Long = splitLine(4).toLong
val pongRecv: Long = splitLine(5).toLong
val configEpoch: Long = splitLine(6).toLong
val connected: Boolean = splitLine(7) == "connected"
val (slots: Seq[SlotRange], importingSlots: Seq[(Int, NodeId)], migratingSlots: Seq[(Int, NodeId)]) = {
val slots = mutable.ArrayBuilder.make[SlotRange]
val importingSlots = mutable.ArrayBuilder.make[(Int, NodeId)]
val migratingSlots = mutable.ArrayBuilder.make[(Int, NodeId)]
splitLine.iterator.drop(8).foreach { str =>
(str.indexOf("-<-"), str.indexOf("->-"), str.indexOf('-')) match {
case (-1, -1, -1) =>
val slot = str.toInt
slots += SlotRange(slot, slot)
case (-1, -1, idx) =>
slots += SlotRange(str.take(idx).toInt, str.drop(idx + 1).toInt)
case (idx, -1, _) =>
importingSlots += ((str.substring(1, idx).toInt, NodeId(str.substring(idx + 1, str.length - 1))))
case (-1, idx, _) =>
migratingSlots += ((str.substring(1, idx).toInt, NodeId(str.substring(idx + 1, str.length - 1))))
case _ =>
}
}
def res[T](b: mutable.ArrayBuilder[T]): IndexedSeq[T] =
IArraySeq.unsafeWrapArray(b.result())
(res(slots), res(importingSlots), res(migratingSlots))
}
override def toString: String = infoLine
}
class NodeFlags(val raw: Int) extends AnyVal {
import NodeFlags._
def |(other: NodeFlags): NodeFlags = new NodeFlags(raw | other.raw)
def &(other: NodeFlags): NodeFlags = new NodeFlags(raw & other.raw)
def ^(other: NodeFlags): NodeFlags = new NodeFlags(raw ^ other.raw)
def unary_~ : NodeFlags = new NodeFlags(~raw)
def myself: Boolean = (this & Myself) != Noflags
def master: Boolean = (this & Master) != Noflags
def slave: Boolean = (this & Slave) != Noflags
def pfail: Boolean = (this & Pfail) != Noflags
def fail: Boolean = (this & Fail) != Noflags
def handshake: Boolean = (this & Handshake) != Noflags
def noaddr: Boolean = (this & Noaddr) != Noflags
override def toString: String =
if (this == Noflags) "noflags"
else reprValuePairs.iterator
.collect({ case (str, flags) if (this & flags) != Noflags => str })
.mkString(",")
}
object NodeFlags {
val Noflags = new NodeFlags(0)
val Myself = new NodeFlags(1 << 0)
val Master = new NodeFlags(1 << 1)
val Slave = new NodeFlags(1 << 2)
val Pfail = new NodeFlags(1 << 3)
val Fail = new NodeFlags(1 << 4)
val Handshake = new NodeFlags(1 << 5)
val Noaddr = new NodeFlags(1 << 6)
private val reprValuePairs = Seq(
"myself" -> Myself,
"master" -> Master,
"slave" -> Slave,
"fail?" -> Pfail,
"fail" -> Fail,
"handshake" -> Handshake,
"noaddr" -> Noaddr
)
def apply(str: String): NodeFlags = {
val flagSet = str.split(',').to(mutable.HashSet)
reprValuePairs.foldLeft(Noflags) {
case (res, (s, flags)) => if (flagSet(s)) res | flags else res
}
}
}
case class SlotRangeMapping(
range: SlotRange, master: NodeAddress, masterId: Opt[NodeId], slaves: Seq[(NodeAddress, Opt[NodeId])]
) {
private def nodeRepr(addr: NodeAddress, idOpt: Opt[NodeId]): String =
addr.toString + idOpt.fold("")(id => s" (${id.raw})")
override def toString: String =
s"slots: $range, master: ${nodeRepr(master, masterId)}, slaves: ${slaves.map((nodeRepr _).tupled).mkString(",")}"
}
case class SlotRange(start: Int, end: Int) {
def toRange: Range = start to end
def contains(slot: Int): Boolean = slot >= start && slot <= end
override def toString: String = if (start == end) start.toString else s"$start-$end"
}
object SlotRange {
final val LastSlot = Hash.TotalSlots - 1
final val Full = SlotRange(0, LastSlot)
}
final class BumpepochResult(implicit enumCtx: EnumCtx) extends AbstractValueEnum {
val encoded: SimpleStringMsg = SimpleStringMsg(name.toUpperCase)
}
object BumpepochResult extends AbstractValueEnumCompanion[BumpepochResult] {
final val Bumped, Still: Value = new BumpepochResult
}
|
AVSystem/scala-commons
|
commons-redis/src/main/scala/com/avsystem/commons/redis/commands/cluster.scala
|
Scala
|
mit
| 16,442
|
package slick.test.codegen
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global
import slick.codegen.SourceCodeGenerator
import slick.driver._
/** Generates code for CodeGenRoundTripTest.
*
* This is generated using Derby currently because Derby strips column size of some columns, which
* works with all backends. If the code was generated using model data where the size is included
* it would fail in derby and hsqldb. The code is tested using all enabled drivers. We should also
* diversify generation as well at some point. */
object GenerateRoundtripSources {
def main(args: Array[String]) {
val driver = slick.driver.H2Driver
val url = "jdbc:h2:mem:test4"
val jdbcDriver = "org.h2.Driver"
object Tables extends Tables(driver)
import Tables._
import Tables.profile.api._
val ddl = posts.schema ++ categories.schema ++ typeTest.schema ++ large.schema ++ `null`.schema ++ X.schema ++ SingleNonOptionColumn.schema ++ SelfRef.schema
val a1 = driver.createModel(ignoreInvalidDefaults=false).map(m => new SourceCodeGenerator(m) {
override def tableName = {
case n if n.toLowerCase == "null" => "null" // testing null as table name
case n => super.tableName(n)
}
})
val a2 = driver.createModel(ignoreInvalidDefaults=false).map(m => new SourceCodeGenerator(m) {
override def Table = new Table(_){
override def autoIncLastAsOption = true
}
})
val db = Database.forURL(url=url, driver=jdbcDriver, keepAliveConnection=true)
val (gen,gen2) = try Await.result(db.run(ddl.create >> (a1 zip a2)), Duration.Inf) finally db.close
val pkg = "slick.test.codegen.roundtrip"
gen.writeToFile( "slick.driver.H2Driver", args(0), pkg )
gen2.writeToFile( "slick.driver.H2Driver", args(0), pkg+"2" )
}
}
class Tables(val profile: JdbcProfile){
import profile.api._
/** Tests single column table, scala keyword type name, non-dentifier column name and all nullable columns table*/
class `null`(tag: Tag) extends Table[Option[String]](tag, "null") {
def name = column[Option[String]]("na me")
def * = name
}
val `null` = TableQuery[`null`]
/** Tests table with self-referring foreign key */
class SelfRef(tag: Tag) extends Table[(Int,Option[Int])](tag, "SELF_REF") {
def id = column[Int]("id",O.AutoInc)
def parent = column[Option[Int]]("parent")
def parentFK = foreignKey("parent_fk", parent, SelfRef)(_.id.?)
def * = (id,parent)
}
val SelfRef = TableQuery[SelfRef]
/** Tests single column table, scala keyword type name and all nullable columns table*/
class SingleNonOptionColumn(tag: Tag) extends Table[String](tag, "SingleNonOptionColumn") {
def name = column[String]("name")
def * = name
}
val SingleNonOptionColumn = TableQuery[SingleNonOptionColumn]
/** Tests single column table and collision with generated names */
class all(tag: Tag) extends Table[String](tag, "all") {
def dynamic = column[String]("dynamic")
def * = dynamic
}
val all = TableQuery[all]
/** Tests slick term name collision */
class X(tag: Tag) extends Table[(Int,Int,Option[Int],Int,Double,String,Option[Int],Option[Int],Option[String],Option[String],Option[String])](tag, "X") {
def pk = column[Int]("pk")
def pk2 = column[Int]("pk2")
def pkpk = primaryKey( "", (pk,pk2) ) // pk column collision
def i1 = column[Option[Int]]("index_1") // scala keyword collision
def c = column[Int]("column") // slick Table method with args collision
def p = column[Option[Int]]("posts")
def a = column[Option[Int]]("val") // scala keyword collision
def s = column[Double]("schema_name") // slick Table no-arg method collision
def sx = column[String]("schema_name_x") // column name collision after disambiguation
def t_ag = column[Option[String]]("tag") // column name collision after disambiguation
def tt = column[Option[String]]("_table_tag") // column name collision after disambiguation
def _underscore = column[Option[String]]("_underscore") // column name collision after disambiguation
def * = (pk,pk2,a,c,s,sx,i1,p,t_ag,tt,_underscore)
def idx1 = index("",i1) // idx column collision
def idx2 = index("i2",i1) // idx column collision
def idx3 = index("foo",c,unique=true)
def idx4 = index("bar",p,unique=true)
def categoryFK1 = foreignKey("fk1", pk, categories)(_.id) // dup FK collision
def categoryFK2 = foreignKey("fk2", pk2, categories)(_.id)
def postsFK = foreignKey("fk_to_posts", p, posts)(_.id.?) // fk column name collision
}
val X = TableQuery[X]
case class Category(id: Int, name: String)
class Categories(tag: Tag) extends Table[Category](tag, "categories") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def name = column[String]("name", O.Length(254))
def * = (id, name) <> (Category.tupled,Category.unapply)
def idx = index("IDX_NAME",name)
}
val categories = TableQuery[Categories]
class Posts(tag: Tag) extends Table[(Int, String, Option[Int])](tag, "POSTS") {
def id = column[Int]("id")
def title = column[String]("title")
def category = column[Option[Int]]("category")
def * = (id, title, category)
def categoryFK = foreignKey("_", category, categories)(_.id.?)
}
val posts = TableQuery[Posts]
// Clob disabled because it fails in postgres and mysql, see https://github.com/slick/slick/issues/637
class TypeTest(tag: Tag) extends Table[(
String,Boolean,Byte,Short,Int,Long,Float,Double,String,java.sql.Date,java.sql.Time,java.sql.Timestamp,java.util.UUID,java.sql.Blob//,java.sql.Clob
,Option[Int]
,(
Option[Boolean],Option[Byte],Option[Short],Option[Int],Option[Long],Option[Float],Option[Double],Option[String],Option[java.sql.Date],Option[java.sql.Time],Option[java.sql.Timestamp],Option[java.util.UUID],Option[java.sql.Blob]//,Option[java.sql.Clob]
)
)](tag, "TYPE_TEST") {
def `type` = column[String]("type") // <- test escaping of keywords
def Boolean = column[Boolean]("Boolean",O.Default(true))
def Byte = column[Byte]("Byte")
def Short = column[Short]("Short")
def Int = column[Int]("Int",O.Default(-5))
def Long = column[Long]("Long",O.Default(5L))
//def java_math_BigInteger = column[java.math.BigInteger]("java_math_BigInteger")
def Float = column[Float]("Float",O.Default(9.999F))
def Double = column[Double]("Double",O.Default(9.999))
//def java_math_BigDecimal = column[java.math.BigDecimal]("java_math_BigDecimal")
def String = column[String]("String",O.Default("someDefaultString"),O.Length(254))
def java_sql_Date = column[java.sql.Date]("java_sql_Date")
def java_sql_Time = column[java.sql.Time]("java_sql_Time")
def java_sql_Timestamp = column[java.sql.Timestamp]("java_sql_Timestamp")
def java_util_UUID = column[java.util.UUID]("java_util_UUID")
def java_sql_Blob = column[java.sql.Blob]("java_sql_Blob")
//def java_sql_Clob = column[java.sql.Clob]("java_sql_Clob")
def None_Int = column[Option[Int]]("None_Int",O.Default(None))
def Option_Boolean = column[Option[Boolean]]("Option_Boolean",O.Default(Some(true)))
def Option_Byte = column[Option[Byte]]("Option_Byte")
def Option_Short = column[Option[Short]]("Option_Short")
def Option_Int = column[Option[Int]]("Option_Int",O.Default(Some(5)))
def Option_Long = column[Option[Long]]("Option_Long",O.Default(Some(5L)))
//def java_math_BigInteger = column[Option[java.math.BigInteger]]("java_math_BigInteger")
def Option_Float = column[Option[Float]]("Option_Float",O.Default(Some(9.999F)))
def Option_Double = column[Option[Double]]("Option_Double",O.Default(Some(9.999)))
//def java_math_BigDecimal = column[Option[java.math.BigDecimal]]("java_math_BigDecimal")
def Option_String = column[Option[String]]("Option_String",O.Default(Some("someDefaultString")),O.Length(254))
def Option_java_sql_Date = column[Option[java.sql.Date]]("Option_java_sql_Date")
def Option_java_sql_Time = column[Option[java.sql.Time]]("Option_java_sql_Time")
def Option_java_sql_Timestamp = column[Option[java.sql.Timestamp]]("Option_java_sql_Timestamp")
def Option_java_util_UUID = column[Option[java.util.UUID]]("Option_java_util_UUID")
def Option_java_sql_Blob = column[Option[java.sql.Blob]]("Option_java_sql_Blob")
def Option_java_sql_Option_Blob = column[Option[Option[java.sql.Blob]]]("Option_java_sql_Blob")
//def Option_java_sql_Clob = column[Option[java.sql.Clob]]("Option_java_sql_Clob")
def * = (
`type`,
Boolean,Byte,Short,Int,Long,Float,Double,String,java_sql_Date,java_sql_Time,java_sql_Timestamp,java_util_UUID,java_sql_Blob//,java_sql_Clob
,None_Int
,(
Option_Boolean,Option_Byte,Option_Short,Option_Int,Option_Long,Option_Float,Option_Double,Option_String,Option_java_sql_Date,Option_java_sql_Time,Option_java_sql_Timestamp,Option_java_util_UUID,Option_java_sql_Blob//,Option_java_sql_Clob
)
)
def pk = primaryKey("PK", (Int,Long))
}
val typeTest = TableQuery[TypeTest]
// testing table larger 22 columns (code gen round trip does not preserve structure of the * projection or names of mapped to classes)
case class Part(i1: Int, i2: Int, i3: Int, i4: Int, i5: Int, i6: Int)
case class Whole(id: Long, p1: Part, p2: Part, p3: Part, p4: Part, p5: Part, p6: Part)
class Large(tag: Tag) extends Table[Whole](tag, "LARGE") {
def id = column[Long]("id", O.PrimaryKey)
def p1i1 = column[Int]("p1i1",O.Default(11))
def p1i2 = column[Int]("p1i2",O.Default(12))
def p1i3 = column[Int]("p1i3",O.Default(13))
def p1i4 = column[Int]("p1i4",O.Default(14))
def p1i5 = column[Int]("p1i5",O.Default(15))
def p1i6 = column[Int]("p1i6",O.Default(16))
def p2i1 = column[Int]("p2i1",O.Default(21))
def p2i2 = column[Int]("p2i2",O.Default(22))
def p2i3 = column[Int]("p2i3",O.Default(23))
def p2i4 = column[Int]("p2i4",O.Default(24))
def p2i5 = column[Int]("p2i5",O.Default(25))
def p2i6 = column[Int]("p2i6",O.Default(26))
def p3i1 = column[Int]("p3i1",O.Default(31))
def p3i2 = column[Int]("p3i2",O.Default(32))
def p3i3 = column[Int]("p3i3",O.Default(33))
def p3i4 = column[Int]("p3i4",O.Default(34))
def p3i5 = column[Int]("p3i5",O.Default(35))
def p3i6 = column[Int]("p3i6",O.Default(36))
def p4i1 = column[Int]("p4i1",O.Default(41))
def p4i2 = column[Int]("p4i2",O.Default(42))
def p4i3 = column[Int]("p4i3",O.Default(43))
def p4i4 = column[Int]("p4i4",O.Default(44))
def p4i5 = column[Int]("p4i5",O.Default(45))
def p4i6 = column[Int]("p4i6",O.Default(46))
def p5i1 = column[Int]("p5i1",O.Default(51))
def p5i2 = column[Int]("p5i2",O.Default(52))
def p5i3 = column[Int]("p5i3",O.Default(53))
def p5i4 = column[Int]("p5i4",O.Default(54))
def p5i5 = column[Int]("p5i5",O.Default(55))
def p5i6 = column[Int]("p5i6",O.Default(56))
def p6i1 = column[Int]("p6i1",O.Default(61))
def p6i2 = column[Int]("p6i2",O.Default(62))
def p6i3 = column[Int]("p6i3",O.Default(63))
def p6i4 = column[Int]("p6i4",O.Default(64))
def p6i5 = column[Int]("p6i5",O.Default(65))
def p6i6 = column[Int]("p6i6",O.Default(66))
def * = (
id,
(p1i1, p1i2, p1i3, p1i4, p1i5, p1i6),
(p2i1, p2i2, p2i3, p2i4, p2i5, p2i6),
(p3i1, p3i2, p3i3, p3i4, p3i5, p3i6),
(p4i1, p4i2, p4i3, p4i4, p4i5, p4i6),
(p5i1, p5i2, p5i3, p5i4, p5i5, p5i6),
(p6i1, p6i2, p6i3, p6i4, p6i5, p6i6)
).shaped <> ({ case (id, p1, p2, p3, p4, p5, p6) =>
// We could do this without .shaped but then we'd have to write a type annotation for the parameters
Whole(id, Part.tupled.apply(p1), Part.tupled.apply(p2), Part.tupled.apply(p3), Part.tupled.apply(p4), Part.tupled.apply(p5), Part.tupled.apply(p6))
}, { w: Whole =>
def f(p: Part) = Part.unapply(p).get
Some((w.id, f(w.p1), f(w.p2), f(w.p3), f(w.p4), f(w.p5), f(w.p6)))
})
}
val large = TableQuery[Large]
}
|
jkutner/slick
|
slick-testkit/src/codegen/scala/slick/test/codegen/GenerateRoundtripSources.scala
|
Scala
|
bsd-2-clause
| 12,129
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.examples.util
import java.io.File
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{CarbonContext, SaveMode}
import org.apache.carbondata.core.util.CarbonProperties
// scalastyle:off println
object ExampleUtils {
def currentPath: String = new File(this.getClass.getResource("/").getPath + "../../")
.getCanonicalPath
val storeLocation = currentPath + "/target/store"
def createCarbonContext(appName: String): CarbonContext = {
val sc = new SparkContext(new SparkConf()
.setAppName(appName)
.setMaster("local[2]"))
sc.setLogLevel("ERROR")
println(s"Starting $appName using spark version ${sc.version}")
val cc = new CarbonContext(sc, storeLocation, currentPath + "/target/carbonmetastore")
CarbonProperties.getInstance()
.addProperty("carbon.storelocation", storeLocation)
cc
}
/**
* This func will write a sample CarbonData file containing following schema:
* c1: String, c2: String, c3: Double
* Returns table path
*/
def writeSampleCarbonFile(cc: CarbonContext, tableName: String, numRows: Int = 1000): String = {
cc.sql(s"DROP TABLE IF EXISTS $tableName")
writeDataframe(cc, tableName, numRows, SaveMode.Overwrite)
s"$storeLocation/default/$tableName"
}
/**
* This func will append data to the CarbonData file
* Returns table path
*/
def appendSampleCarbonFile(cc: CarbonContext, tableName: String, numRows: Int = 1000): String = {
writeDataframe(cc, tableName, numRows, SaveMode.Append)
s"$storeLocation/default/$tableName"
}
/**
* create a new dataframe and write to CarbonData file, based on save mode
*/
private def writeDataframe(
cc: CarbonContext, tableName: String, numRows: Int, mode: SaveMode): Unit = {
// use CarbonContext to write CarbonData files
import cc.implicits._
val sc = cc.sparkContext
val df = sc.parallelize(1 to numRows, 2)
.map(x => ("a", "b", x))
.toDF("c1", "c2", "c3")
// save dataframe directl to carbon file without tempCSV
df.write
.format("carbondata")
.option("tableName", tableName)
.option("compress", "true")
.option("tempCSV", "false")
.mode(mode)
.save()
}
def cleanSampleCarbonFile(cc: CarbonContext, tableName: String): Unit = {
cc.sql(s"DROP TABLE IF EXISTS $tableName")
}
}
// scalastyle:on println
|
Sephiroth-Lin/incubator-carbondata
|
examples/spark/src/main/scala/org/apache/carbondata/examples/util/ExampleUtils.scala
|
Scala
|
apache-2.0
| 3,237
|
/*
* Copyright (c) 2014-2017 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and
* limitations there under.
*/
import sbt._
object Dependencies {
object V {
// Java
val dynamodb = "1.11.98"
val jsonValidator = "2.2.3"
// Scala
val spark = "2.1.0"
val json4sJackson = "3.2.11"
val scalaz7 = "7.0.9"
val scopt = "3.5.0"
val commonEnrich = "0.25.0"
val igluClient = "0.5.0"
// Scala (test only)
val specs2 = "2.3.13"
}
object Libraries {
// Java
val jsonValidator = "com.github.fge" % "json-schema-validator" % V.jsonValidator
val dynamodb = "com.amazonaws" % "aws-java-sdk-dynamodb" % V.dynamodb
// Scala
val sparkCore = "org.apache.spark" %% "spark-core" % V.spark % "provided"
val sparkSQL = "org.apache.spark" %% "spark-sql" % V.spark % "provided"
val json4sJackson = "org.json4s" %% "json4s-jackson" % V.json4sJackson
val scalaz7 = "org.scalaz" %% "scalaz-core" % V.scalaz7
val scopt = "com.github.scopt" %% "scopt" % V.scopt
val commonEnrich = "com.snowplowanalytics" %% "snowplow-common-enrich" % V.commonEnrich
val igluClient = "com.snowplowanalytics" %% "iglu-scala-client" % V.igluClient
// Scala (test only)
val specs2 = "org.specs2" %% "specs2-core" % V.specs2 % "test"
}
}
|
Propertyfinder/snowplow
|
4-storage/rdb-shredder/project/Dependencies.scala
|
Scala
|
apache-2.0
| 2,181
|
package skutek_experimental
import skutek.abstraction._
import skutek.std_effects._
import org.specs2._
class CyclicMemoTest extends Specification with CanLaunchTheMissiles {
def is = graph
def graph = br ^ "CyclicMemoizer operations should work" ! {
case object FxMemo extends CyclicMemoizer[Int, Vertex]
case object FxW extends Writer[Vector[Int]]
case class Vertex(serno: Int, outgoing: List[Edge])
case class Edge(from: () => Vertex, to: () => Vertex)
val outgoings = Vector(
List(0,1,2,3,4,5),
List(6,7),
List(7,2,1),
List(3,7),
List(),
List(6),
List(0),
List()
)
val missiles = outgoings.map(_ => Missile())
def visit(n: Int) = {
for {
_ <- missiles(n).launch_!
_ <- FxW.Tell(n)
from <- FxMemo.Recur(n)
edges <- (
for (i <- outgoings(n))
yield for (to <- FxMemo.Recur(i))
yield Edge(from, to)
).traverse
} yield Vertex(n, edges)
}
val (log, roots) =
Vector(0)
.map(FxMemo.Recur(_)).traverse
.handleWith[FxW.type](FxMemo.handler[FxW.type](visit))
.flatten
.runWith(FxW.handler)
{
def loop(todos: List[Vertex], visited: Set[Int]): Unit = {
todos match {
case Nil => ()
case x :: rest =>
val targets = x.outgoing.map(_.to())
val more = targets.filterNot(v => visited.contains(v.serno))
val visited2 = visited ++ more.map(_.serno)
loop(rest ++ more, visited2)
}
}
loop(roots.head() :: Nil, Set(roots.head().serno))
}
missiles.map(_.mustHaveLaunchedOnce).reduce(_ and _) and
(log.sorted must_== (0 until outgoings.size))
}
}
|
marcinzh/skutek
|
modules/experimental/src/test/scala/skutek_experimental/CyclicMemoTest.scala
|
Scala
|
mit
| 1,772
|
package dotty.tools.benchmarks.tuples
import org.openjdk.jmh.annotations._
import scala.runtime.DynamicTuple
@State(Scope.Thread)
class Apply {
@Param(Array("1 0"))
var sizeAndIndex: String = _
var tuple: NonEmptyTuple = _
var index: Int = _
@Setup
def setup(): Unit = {
val size = sizeAndIndex.split(' ')(0).toInt
index = sizeAndIndex.split(' ')(1).toInt
tuple = "elem" *: ()
for (i <- 1 until size)
tuple = "elem" *: tuple
}
@Benchmark
def tupleApply(): Any = {
runtime.Tuple.apply(tuple, index)
}
@Benchmark
def productElement(): Any = {
tuple.asInstanceOf[Product].productElement(index)
}
}
|
som-snytt/dotty
|
bench-run/src/main/scala/dotty/tools/benchmarks/tuples/Apply.scala
|
Scala
|
apache-2.0
| 657
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.physical.stream
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical._
import org.apache.flink.table.planner.plan.nodes.physical.common.CommonPhysicalLookupJoin
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalLookupJoin
import org.apache.flink.table.planner.plan.rules.physical.common.{BaseSnapshotOnCalcTableScanRule, BaseSnapshotOnTableScanRule}
import org.apache.calcite.plan.{RelOptRule, RelOptTable}
import org.apache.calcite.rex.RexProgram
/**
* Rules that convert [[FlinkLogicalJoin]] on a [[FlinkLogicalSnapshot]]
* into [[StreamPhysicalLookupJoin]]
*
* There are 2 conditions for this rule:
* 1. the root parent of [[FlinkLogicalSnapshot]] should be a TableSource which implements
* [[org.apache.flink.table.sources.LookupableTableSource]].
* 2. the period of [[FlinkLogicalSnapshot]] must be left table's proctime attribute.
*/
object StreamPhysicalLookupJoinRule {
val SNAPSHOT_ON_TABLESCAN: RelOptRule = new SnapshotOnTableScanRule
val SNAPSHOT_ON_CALC_TABLESCAN: RelOptRule = new SnapshotOnCalcTableScanRule
class SnapshotOnTableScanRule
extends BaseSnapshotOnTableScanRule("StreamPhysicalSnapshotOnTableScanRule") {
override protected def transform(
join: FlinkLogicalJoin,
input: FlinkLogicalRel,
temporalTable: RelOptTable,
calcProgram: Option[RexProgram]): CommonPhysicalLookupJoin = {
doTransform(join, input, temporalTable, calcProgram)
}
}
class SnapshotOnCalcTableScanRule
extends BaseSnapshotOnCalcTableScanRule("StreamPhysicalSnapshotOnCalcTableScanRule") {
override protected def transform(
join: FlinkLogicalJoin,
input: FlinkLogicalRel,
temporalTable: RelOptTable,
calcProgram: Option[RexProgram]): CommonPhysicalLookupJoin = {
doTransform(join, input, temporalTable, calcProgram)
}
}
private def doTransform(
join: FlinkLogicalJoin,
input: FlinkLogicalRel,
temporalTable: RelOptTable,
calcProgram: Option[RexProgram]): StreamPhysicalLookupJoin = {
val joinInfo = join.analyzeCondition
val cluster = join.getCluster
val providedTrait = join.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL)
val requiredTrait = input.getTraitSet.replace(FlinkConventions.STREAM_PHYSICAL)
val convInput = RelOptRule.convert(input, requiredTrait)
new StreamPhysicalLookupJoin(
cluster,
providedTrait,
convInput,
temporalTable,
calcProgram,
joinInfo,
join.getJoinType)
}
}
|
apache/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/StreamPhysicalLookupJoinRule.scala
|
Scala
|
apache-2.0
| 3,468
|
package org.kokho.scheduling.multicritical.system
import org.kokho.scheduling.{PeriodicJob, Task}
/**
* Represents a job of a LowCriticalTask
*
* @author: Mikhail Kokho
* @date: 6/4/15
*/
case class LoCriticalJob(release: Int, task: LoCriticalTaskDefault) extends PeriodicJob {
override def isOfTask(thatTask: Task): Boolean =
thatTask == this.task || this.task.relatedTo.getOrElse(this.task) == thatTask
}
|
mkokho/dynoslack
|
src/main/scala/org/kokho/scheduling/multicritical/system/LoCriticalJob.scala
|
Scala
|
apache-2.0
| 423
|
package org.elasticsearch.spark.sql
import java.util.Properties
import scala.Array.fallbackCanBuildFrom
import scala.collection.JavaConverters.asScalaBufferConverter
import scala.collection.JavaConverters.propertiesAsScalaMapConverter
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.MapType
import org.apache.spark.sql.catalyst.types.BinaryType
import org.apache.spark.sql.catalyst.types.BooleanType
import org.apache.spark.sql.catalyst.types.ByteType
import org.apache.spark.sql.catalyst.types.DoubleType
import org.apache.spark.sql.catalyst.types.FloatType
import org.apache.spark.sql.catalyst.types.IntegerType
import org.apache.spark.sql.catalyst.types.LongType
import org.apache.spark.sql.catalyst.types.NullType
import org.apache.spark.sql.catalyst.types.ShortType
import org.apache.spark.sql.catalyst.types.StringType
import org.apache.spark.sql.catalyst.types.StructField
import org.apache.spark.sql.catalyst.types.StructType
import org.apache.spark.sql.catalyst.types.TimestampType
import org.elasticsearch.hadoop.EsHadoopIllegalArgumentException
import org.elasticsearch.hadoop.cfg.Settings
import org.elasticsearch.hadoop.rest.RestRepository
import org.elasticsearch.hadoop.serialization.FieldType.BINARY
import org.elasticsearch.hadoop.serialization.FieldType.BOOLEAN
import org.elasticsearch.hadoop.serialization.FieldType.BYTE
import org.elasticsearch.hadoop.serialization.FieldType.DATE
import org.elasticsearch.hadoop.serialization.FieldType.DOUBLE
import org.elasticsearch.hadoop.serialization.FieldType.FLOAT
import org.elasticsearch.hadoop.serialization.FieldType.INTEGER
import org.elasticsearch.hadoop.serialization.FieldType.LONG
import org.elasticsearch.hadoop.serialization.FieldType.NULL
import org.elasticsearch.hadoop.serialization.FieldType.OBJECT
import org.elasticsearch.hadoop.serialization.FieldType.SHORT
import org.elasticsearch.hadoop.serialization.FieldType.STRING
import org.elasticsearch.hadoop.serialization.dto.mapping.Field
import org.elasticsearch.hadoop.util.Assert
import org.elasticsearch.hadoop.util.IOUtils
import org.elasticsearch.hadoop.util.StringUtils
import org.elasticsearch.hadoop.cfg.InternalConfigurationOptions
import org.elasticsearch.hadoop.serialization.dto.mapping.MappingUtils
import org.elasticsearch.spark.sql.Utils.ROOT_LEVEL_NAME
import org.elasticsearch.spark.sql.Utils.ROW_ORDER_PROPERTY
private[sql] object SchemaUtils {
case class Schema(field: Field, struct: StructType)
val readInclude = "es.read.field.include"
val readExclude = "es.read.field.exclude"
def discoverMapping(cfg: Settings): Schema = {
val field = discoverMappingAsField(cfg)
val struct = convertToStruct(field, cfg)
Schema(field, struct)
}
def discoverMappingAsField(cfg: Settings): Field = {
val repo = new RestRepository(cfg)
try {
if (repo.indexExists(true)) {
var field = repo.getMapping.skipHeaders()
val readIncludeCfg = cfg.getProperty(readInclude)
val readExcludeCfg = cfg.getProperty(readExclude)
// apply mapping filtering only when present to minimize configuration settings (big when dealing with large mappings)
if (StringUtils.hasText(readIncludeCfg) || StringUtils.hasText(readExcludeCfg)) {
// apply any possible include/exclude that can define restrict the DataFrame to just a number of fields
val includes = StringUtils.tokenize(readIncludeCfg);
val excludes = StringUtils.tokenize(readExcludeCfg);
field = MappingUtils.filter(field, includes, excludes)
// NB: metadata field is synthetic so it doesn't have to be filtered
// its presence is controller through the dedicated config setting
cfg.setProperty(InternalConfigurationOptions.INTERNAL_ES_TARGET_FIELDS, StringUtils.concatenate(Field.toLookupMap(field).keySet()));
}
return field
}
else {
throw new EsHadoopIllegalArgumentException(s"Cannot find mapping for ${cfg.getResourceRead} - one is required before using Spark SQL")
}
} finally {
repo.close()
}
}
private def convertToStruct(rootField: Field, cfg: Settings): StructType = {
var fields = for (fl <- rootField.properties()) yield convertField(fl)
if (cfg.getReadMetadata) {
val metadataMap = new StructField(cfg.getReadMetadataField, new MapType(StringType, StringType, true), true)
fields :+= metadataMap
}
new StructType(fields)
}
private def convertToStruct(field: Field): StructType = {
new StructType(for (fl <- field.properties()) yield convertField(fl))
}
private def convertField(field: Field): StructField = {
val dataType = Utils.extractType(field) match {
case NULL => NullType
case BINARY => BinaryType
case BOOLEAN => BooleanType
case BYTE => ByteType
case SHORT => ShortType
case INTEGER => IntegerType
case LONG => LongType
case FLOAT => FloatType
case DOUBLE => DoubleType
case STRING => StringType
case DATE => TimestampType
case OBJECT => convertToStruct(field)
// fall back to String
case _ => StringType //throw new EsHadoopIllegalStateException("Unknown field type " + field);
}
return new StructField(field.name(), dataType, true)
}
def setRowOrder(settings: Settings, struct: StructType) = {
val rowOrder = detectRowOrder(settings, struct)
// save the field in the settings to pass it to the value reader
settings.setProperty(ROW_ORDER_PROPERTY, IOUtils.propsToString(rowOrder))
}
def getRowOrder(settings: Settings) = {
val rowOrderString = settings.getProperty(ROW_ORDER_PROPERTY)
Assert.hasText(rowOrderString, "no schema/row order detected...")
val rowOrderProps = IOUtils.propsFromString(rowOrderString)
val map = new scala.collection.mutable.LinkedHashMap[String, Seq[String]]
for (prop <- rowOrderProps.asScala) {
map.put(prop._1, new ArrayBuffer ++= (StringUtils.tokenize(prop._2).asScala))
}
map
}
private def detectRowOrder(settings: Settings, struct: StructType): Properties = {
val rowOrder = new Properties
doDetectOrder(rowOrder, ROOT_LEVEL_NAME, struct)
val csv = settings.getScrollFields()
// if a projection is applied, use that instead
if (StringUtils.hasText(csv)) {
if (settings.getReadMetadata) {
rowOrder.setProperty(ROOT_LEVEL_NAME, csv + StringUtils.DEFAULT_DELIMITER + settings.getReadMetadataField)
}
else {
rowOrder.setProperty(ROOT_LEVEL_NAME, csv)
}
}
rowOrder
}
private def doDetectOrder(properties: Properties, level: String, struct: StructType) {
val list = new java.util.ArrayList[String]
for (field <- struct.fields) {
list.add(field.name)
if (field.dataType.isInstanceOf[StructType]) {
doDetectOrder(properties, field.name, field.dataType.asInstanceOf[StructType])
}
}
properties.setProperty(level, StringUtils.concatenate(list, StringUtils.DEFAULT_DELIMITER))
}
}
|
yonglehou/elasticsearch-hadoop
|
spark/sql-12/src/main/scala/org/elasticsearch/spark/sql/SchemaUtils.scala
|
Scala
|
apache-2.0
| 7,100
|
package autosteamgifts
import autosteamgifts.Implicits._
import autosteamgifts.TimeAmount._
import org.scalajs.dom.raw.{HTMLAnchorElement, HTMLElement}
import scala.scalajs.js
class Giveaway(element: HTMLElement) {
protected lazy val innerElement = element.firstElementChild
lazy val headingElement = element.query[HTMLAnchorElement](".giveaway__heading")
protected lazy val headingNameElement = headingElement.query[HTMLAnchorElement](".giveaway__heading__name")
lazy val url = URLs.extractGiveawayUrl(headingNameElement)
lazy val code = URLs.giveawayCodeFromUrl(url)
lazy val game = {
val id = element.getAttribute("data-game-id")
val name = headingNameElement.textContent
val steamUrl = Option(element.query[HTMLAnchorElement]("[href*='steampowered.com/']")).map(_.href)
val imageUrl =
Option(element.query[HTMLElement](".giveaway_image_thumbnail"))
.flatMap(URLs.extractBackgroundImage)
new Game(id, name, steamUrl, imageUrl)
}
lazy val minUserLevel = {
val RE = js.RegExp("""^Level (\\d+)\\+$""")
val minUserLevelElement = Option(element.query[HTMLElement](".giveaway__column--contributor-level"))
minUserLevelElement.fold(0) { el =>
RE.exec(el.textContent)(1).get.toInt
}
}
lazy val requiredPoints = {
val RE = js.RegExp("""^\\((\\d+)P\\)$""")
val pointsElement = element.queryAll[HTMLElement](".giveaway__heading__thin").last
RE.exec(pointsElement.textContent)(1).get.toInt
}
lazy val timeRemaining = {
val timeRemainingElement = element.query[HTMLElement]("[data-timestamp]")
val endTimestamp = timeRemainingElement.getAttribute("data-timestamp").toInt
(endTimestamp - js.Date.now() / 1000).toInt.seconds
}
lazy val giverUsername = element.query[HTMLElement](".giveaway__username").textContent
def hidingElement: Option[HTMLElement] = Option(headingElement.query[HTMLElement](".giveaway__icon[data-popup='popup--hide-games']"))
protected val EnteredClassName = "is-faded"
def entered: Boolean = innerElement.classList.contains(EnteredClassName)
}
|
erdavila/auto-steamgifts
|
src/main/scala/autosteamgifts/Giveaway.scala
|
Scala
|
mit
| 2,077
|
package picocli.examples.scala.subcommands.SubCmdsViaClasses
import java.util.Locale
import picocli.CommandLine.{Command, Parameters}
@Command(name = "language", description = Array("Resolve ISO language code (ISO 639-1 or -2, two/three letters)"))
class SubCmd2 extends Runnable {
@Parameters(arity = "1..*n", paramLabel = "<language code 1> <language code 2>",
description = Array("language code(s) to be resolved"))
val languageCodes = new Array[String](0)
def run(): Unit = {
for (code <- languageCodes) {
println(s"${code.toUpperCase()}: ".concat(new Locale(code).getDisplayLanguage))
}
}
}
|
remkop/picocli
|
picocli-examples/src/main/scala/picocli/examples/scala/subcommands/SubCmdsViaClasses/SubCmd2.scala
|
Scala
|
apache-2.0
| 626
|
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.curve
import java.time._
import java.time.temporal.ChronoUnit
import org.locationtech.geomesa.curve.TimePeriod.TimePeriod
/**
* Represents a time by an offset into a binned bucket. The bin represents days, weeks,
* months or years since the java epoch. The offset represents milliseconds, seconds, or
* hours into that bin.
*
* Times can be partitioned based on four periods:
*
* TimePeriod.Day
* bin => day
* offset => milliseconds
* max date => 2059/09/18
*
* TimePeriod.Week
* bin => week
* offset => seconds
* max date => 2598/01/04
*
* TimePeriod.Month
* bin => month
* offset => seconds
* max date => 4700/08/31
*
* TimePeriod.Year
* bin => year
* offset => minutes
* max date => 34737/12/31
*
* @param bin number of time periods from the java epoch
* @param offset precise offset into the specific time period
*/
case class BinnedTime(bin: Short, offset: Long)
object BinnedTime {
type BinnedTimeToDate = BinnedTime => ZonedDateTime
type TimeToBinnedTime = Long => BinnedTime
type DateToBinnedTime = ZonedDateTime => BinnedTime
type TimeToBin = Long => Short
type DateToBin = ZonedDateTime => Short
val Epoch: ZonedDateTime = ZonedDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC)
// min value (inclusive)
val ZMinDate: ZonedDateTime = Epoch
// max values (exclusive)
val DaysMaxDate : ZonedDateTime = Epoch.plusDays(Short.MaxValue.toInt + 1)
val WeeksMaxDate : ZonedDateTime = Epoch.plusWeeks(Short.MaxValue.toInt + 1)
val MonthsMaxDate: ZonedDateTime = Epoch.plusMonths(Short.MaxValue.toInt + 1)
val YearsMaxDate : ZonedDateTime = Epoch.plusYears(Short.MaxValue.toInt + 1)
/**
* Gets period index (e.g. weeks since the epoch) and offset into that interval (e.g. seconds in week)
*
* @param period interval type
* @return
*/
def timeToBinnedTime(period: TimePeriod): TimeToBinnedTime = {
period match {
case TimePeriod.Day => toDayAndMillis
case TimePeriod.Week => toWeekAndSeconds
case TimePeriod.Month => toMonthAndSeconds
case TimePeriod.Year => toYearAndMinutes
}
}
/**
* Gets period index (e.g. weeks since the epoch)
*
* @param period interval type
* @return
*/
def timeToBin(period: TimePeriod): TimeToBin = {
period match {
case TimePeriod.Day => toDay
case TimePeriod.Week => toWeek
case TimePeriod.Month => toMonth
case TimePeriod.Year => toYear
}
}
/**
* Gets period index (e.g. weeks since the epoch) and offset into that interval (e.g. seconds in week)
*
* @param period interval type
* @return
*/
def dateToBinnedTime(period: TimePeriod): DateToBinnedTime = {
period match {
case TimePeriod.Day => toDayAndMillis
case TimePeriod.Week => toWeekAndSeconds
case TimePeriod.Month => toMonthAndSeconds
case TimePeriod.Year => toYearAndMinutes
}
}
/**
* Gets period index (e.g. weeks since the epoch) and offset into that interval (e.g. seconds in week)
*
* @param period interval type
* @return
*/
def dateToBin(period: TimePeriod): DateToBin = {
period match {
case TimePeriod.Day => toDay
case TimePeriod.Week => toWeek
case TimePeriod.Month => toMonth
case TimePeriod.Year => toYear
}
}
/**
* Gets a date back from a binned time
*
* @param period interval type
* @return
*/
def binnedTimeToDate(period: TimePeriod): BinnedTimeToDate = {
period match {
case TimePeriod.Day => fromDayAndMillis
case TimePeriod.Week => fromWeekAndSeconds
case TimePeriod.Month => fromMonthAndSeconds
case TimePeriod.Year => fromYearAndMinutes
}
}
/**
* Gets the max offset value for a given time period
*
* @param period interval type
* @return
*/
def maxOffset(period: TimePeriod): Long = {
period match {
case TimePeriod.Day => ChronoUnit.DAYS.getDuration.toMillis
case TimePeriod.Week => ChronoUnit.WEEKS.getDuration.toMillis / 1000L
case TimePeriod.Month => (ChronoUnit.DAYS.getDuration.toMillis / 1000L) * 31L
// based on 365 days + 1 leap day, with a fudge factor of 10 minutes to account for leap seconds added each year
case TimePeriod.Year => (ChronoUnit.DAYS.getDuration.toMinutes * 366L) + 10L
}
}
/**
* Max indexable date (exclusive) for a given time period
*
* @param period interval type
* @return
*/
def maxDate(period: TimePeriod): ZonedDateTime = {
period match {
case TimePeriod.Day => DaysMaxDate
case TimePeriod.Week => WeeksMaxDate
case TimePeriod.Month => MonthsMaxDate
case TimePeriod.Year => YearsMaxDate
}
}
/**
* Converts values extracted from a filter into valid indexable bounds
*
* @param period time period
* @return
*/
def boundsToIndexableDates(period: TimePeriod): ((Option[ZonedDateTime], Option[ZonedDateTime])) => (ZonedDateTime, ZonedDateTime) = {
val maxDateTime = maxDate(period).minus(1L, ChronoUnit.MILLIS)
bounds => {
val lo = bounds._1 match {
case None => ZMinDate
case Some(dt) if dt.isBefore(ZMinDate) => ZMinDate
case Some(dt) if dt.isAfter(maxDateTime) => maxDateTime
case Some(dt) => dt
}
val hi = bounds._2 match {
case None => maxDateTime
case Some(dt) if dt.isBefore(ZMinDate) => ZMinDate
case Some(dt) if dt.isAfter(maxDateTime) => maxDateTime
case Some(dt) => dt
}
(lo, hi)
}
}
private def toDay(time: Long): Short =
toDay(ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), ZoneOffset.UTC))
private def toDay(date: ZonedDateTime): Short = {
require(!date.isBefore(ZMinDate), s"Date exceeds minimum indexable value ($ZMinDate): $date")
require(DaysMaxDate.isAfter(date), s"Date exceeds maximum indexable value ($DaysMaxDate): $date")
ChronoUnit.DAYS.between(Epoch, date).toShort
}
private def toDayAndMillis(time: Long): BinnedTime =
toDayAndMillis(ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), ZoneOffset.UTC))
private def toDayAndMillis(date: ZonedDateTime): BinnedTime = {
val days = toDay(date)
val millisInDay = date.toInstant.toEpochMilli - Epoch.plus(days, ChronoUnit.DAYS).toInstant.toEpochMilli
BinnedTime(days, millisInDay)
}
private def fromDayAndMillis(date: BinnedTime): ZonedDateTime =
Epoch.plusDays(date.bin).plus(date.offset, ChronoUnit.MILLIS)
private def toWeek(time: Long): Short =
toWeek(ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), ZoneOffset.UTC))
private def toWeek(date: ZonedDateTime): Short = {
require(!date.isBefore(ZMinDate), s"Date exceeds minimum indexable value ($ZMinDate): $date")
require(WeeksMaxDate.isAfter(date), s"Date exceeds maximum indexable value ($WeeksMaxDate): $date")
ChronoUnit.WEEKS.between(Epoch, date).toShort
}
private def toWeekAndSeconds(time: Long): BinnedTime =
toWeekAndSeconds(ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), ZoneOffset.UTC))
private def toWeekAndSeconds(date: ZonedDateTime): BinnedTime = {
val weeks = toWeek(date)
val secondsInWeek = date.toEpochSecond - Epoch.plus(weeks, ChronoUnit.WEEKS).toEpochSecond
BinnedTime(weeks, secondsInWeek)
}
private def fromWeekAndSeconds(date: BinnedTime): ZonedDateTime =
Epoch.plusWeeks(date.bin).plus(date.offset, ChronoUnit.SECONDS)
private def toMonth(time: Long): Short =
toMonth(ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), ZoneOffset.UTC))
private def toMonth(date: ZonedDateTime): Short = {
require(!date.isBefore(ZMinDate), s"Date exceeds minimum indexable value ($ZMinDate): $date")
require(MonthsMaxDate.isAfter(date), s"Date exceeds maximum indexable value ($MonthsMaxDate): $date")
ChronoUnit.MONTHS.between(Epoch, date).toShort
}
private def toMonthAndSeconds(time: Long): BinnedTime =
toMonthAndSeconds(ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), ZoneOffset.UTC))
private def toMonthAndSeconds(date: ZonedDateTime): BinnedTime = {
val months = toMonth(date)
val secondsInMonth = date.toEpochSecond - Epoch.plus(months, ChronoUnit.MONTHS).toEpochSecond
BinnedTime(months, secondsInMonth)
}
private def fromMonthAndSeconds(date: BinnedTime): ZonedDateTime =
Epoch.plusMonths(date.bin).plus(date.offset, ChronoUnit.SECONDS)
private def toYear(time: Long): Short =
toYear(ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), ZoneOffset.UTC))
private def toYear(date: ZonedDateTime): Short = {
require(!date.isBefore(ZMinDate), s"Date exceeds minimum indexable value ($ZMinDate): $date")
require(YearsMaxDate.isAfter(date), s"Date exceeds maximum indexable value ($YearsMaxDate): $date")
ChronoUnit.YEARS.between(Epoch, date).toShort
}
private def toYearAndMinutes(time: Long): BinnedTime =
toYearAndMinutes(ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), ZoneOffset.UTC))
private def toYearAndMinutes(date: ZonedDateTime): BinnedTime = {
val years = toYear(date)
val minutesInYear = (date.toEpochSecond - Epoch.plus(years, ChronoUnit.YEARS).toEpochSecond) / 60L
BinnedTime(years, minutesInYear)
}
private def fromYearAndMinutes(date: BinnedTime): ZonedDateTime =
Epoch.plusYears(date.bin).plus(date.offset, ChronoUnit.MINUTES)
}
object TimePeriod extends Enumeration {
type TimePeriod = Value
val Day: Value = Value("day")
val Week: Value = Value("week")
val Month: Value = Value("month")
val Year: Value = Value("year")
}
|
locationtech/geomesa
|
geomesa-z3/src/main/scala/org/locationtech/geomesa/curve/BinnedTime.scala
|
Scala
|
apache-2.0
| 10,241
|
package models
import io.postman.generator.attributes.v0.models.AttributeName
import _root_.play.api.libs.json.{JsError, JsSuccess, JsValue, Reads}
import _root_.play.api.Logging
import io.apibuilder.spec.v0.models.Attribute
import scala.reflect.{ClassTag, classTag}
object AttributeValueReader extends Logging {
def findAndReadFirst[A : Reads : ClassTag](attributes: Seq[Attribute], attributeName: AttributeName): Option[A] = {
attributes.collectFirst {
case attr if attr.name.equalsIgnoreCase(attributeName.toString) =>
tryRead[A](attributeName, attr.value)
}.flatten
}
def tryRead[A : Reads : ClassTag](attributeName: AttributeName, json: JsValue): Option[A] = {
val reads = implicitly[Reads[A]]
reads.reads(json) match {
case JsSuccess(entity, _) =>
Some(entity)
case JsError(errors) =>
logger.warn(s"Attribute [$attributeName] value $json could not be read as ${classTag[A].runtimeClass.getName} / Errors: $errors")
None
}
}
}
|
mbryzek/apidoc-generator
|
postman-generator/src/main/scala/models/AttributeValueReader.scala
|
Scala
|
mit
| 1,016
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala
import org.apache.flink.annotation.PublicEvolving
import org.apache.flink.api.common.distributions.DataDistribution
import org.apache.flink.api.common.operators.Keys
import org.apache.flink.api.common.operators.base.PartitionOperatorBase.PartitionMethod
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.Utils
import org.apache.flink.api.java.Utils.ChecksumHashCode
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.api.java.operators.PartitionOperator
import org.apache.flink.api.java.utils.{DataSetUtils => jutils}
import org.apache.flink.util.AbstractID
import _root_.scala.language.implicitConversions
import _root_.scala.reflect.ClassTag
package object utils {
/**
* This class provides simple utility methods for zipping elements in a data set with an index
* or with a unique identifier, sampling elements from a data set.
*
* @param self Data Set
*/
@PublicEvolving
implicit class DataSetUtils[T: TypeInformation : ClassTag](val self: DataSet[T]) {
/**
* Method that goes over all the elements in each partition in order to retrieve
* the total number of elements.
*
* @return a data set of tuple2 consisting of (subtask index, number of elements mappings)
*/
def countElementsPerPartition: DataSet[(Int, Long)] = {
implicit val typeInfo = createTuple2TypeInformation[Int, Long](
BasicTypeInfo.INT_TYPE_INFO.asInstanceOf[TypeInformation[Int]],
BasicTypeInfo.LONG_TYPE_INFO.asInstanceOf[TypeInformation[Long]]
)
wrap(jutils.countElementsPerPartition(self.javaSet)).map { t => (t.f0.toInt, t.f1.toLong)}
}
/**
* Method that takes a set of subtask index, total number of elements mappings
* and assigns ids to all the elements from the input data set.
*
* @return a data set of tuple 2 consisting of consecutive ids and initial values.
*/
def zipWithIndex: DataSet[(Long, T)] = {
implicit val typeInfo = createTuple2TypeInformation[Long, T](
BasicTypeInfo.LONG_TYPE_INFO.asInstanceOf[TypeInformation[Long]],
implicitly[TypeInformation[T]]
)
wrap(jutils.zipWithIndex(self.javaSet)).map {
t: org.apache.flink.api.java.tuple.Tuple2[java.lang.Long, T] => (t.f0.toLong, t.f1)
}
}
/**
* Method that assigns a unique id to all the elements of the input data set.
*
* @return a data set of tuple 2 consisting of ids and initial values.
*/
def zipWithUniqueId: DataSet[(Long, T)] = {
implicit val typeInfo = createTuple2TypeInformation[Long, T](
BasicTypeInfo.LONG_TYPE_INFO.asInstanceOf[TypeInformation[Long]],
implicitly[TypeInformation[T]]
)
wrap(jutils.zipWithUniqueId(self.javaSet)).map {
t: org.apache.flink.api.java.tuple.Tuple2[java.lang.Long, T]=> (t.f0.toLong, t.f1)
}
}
// --------------------------------------------------------------------------------------------
// Sample
// --------------------------------------------------------------------------------------------
/**
* Generate a sample of DataSet by the probability fraction of each element.
*
* @param withReplacement Whether element can be selected more than once.
* @param fraction Probability that each element is chosen, should be [0,1] without
* replacement, and [0, ∞) with replacement. While fraction is larger
* than 1, the elements are expected to be selected multi times into
* sample on average.
* @param seed Random number generator seed.
* @return The sampled DataSet
*/
def sample(
withReplacement: Boolean,
fraction: Double,
seed: Long = Utils.RNG.nextLong())
: DataSet[T] = {
wrap(jutils.sample(self.javaSet, withReplacement, fraction, seed))
}
/**
* Generate a sample of DataSet with fixed sample size.
* <p>
* <strong>NOTE:</strong> Sample with fixed size is not as efficient as sample with fraction,
* use sample with fraction unless you need exact precision.
* <p/>
*
* @param withReplacement Whether element can be selected more than once.
* @param numSamples The expected sample size.
* @param seed Random number generator seed.
* @return The sampled DataSet
*/
def sampleWithSize(
withReplacement: Boolean,
numSamples: Int,
seed: Long = Utils.RNG.nextLong())
: DataSet[T] = {
wrap(jutils.sampleWithSize(self.javaSet, withReplacement, numSamples, seed))
}
// --------------------------------------------------------------------------------------------
// Partitioning
// --------------------------------------------------------------------------------------------
/**
* Range-partitions a DataSet on the specified tuple field positions.
*/
def partitionByRange(distribution: DataDistribution, fields: Int*): DataSet[T] = {
val op = new PartitionOperator[T](
self.javaSet,
PartitionMethod.RANGE,
new Keys.ExpressionKeys[T](fields.toArray, self.javaSet.getType),
distribution,
getCallLocationName())
wrap(op)
}
/**
* Range-partitions a DataSet on the specified fields.
*/
def partitionByRange(distribution: DataDistribution,
firstField: String,
otherFields: String*): DataSet[T] = {
val op = new PartitionOperator[T](
self.javaSet,
PartitionMethod.RANGE,
new Keys.ExpressionKeys[T](firstField +: otherFields.toArray, self.javaSet.getType),
distribution,
getCallLocationName())
wrap(op)
}
/**
* Range-partitions a DataSet using the specified key selector function.
*/
def partitionByRange[K: TypeInformation](distribution: DataDistribution,
fun: T => K): DataSet[T] = {
val keyExtractor = new KeySelector[T, K] {
val cleanFun = self.javaSet.clean(fun)
def getKey(in: T) = cleanFun(in)
}
val op = new PartitionOperator[T](
self.javaSet,
PartitionMethod.RANGE,
new Keys.SelectorFunctionKeys[T, K](
keyExtractor,
self.javaSet.getType,
implicitly[TypeInformation[K]]),
distribution,
getCallLocationName())
wrap(op)
}
// --------------------------------------------------------------------------------------------
// Checksum
// --------------------------------------------------------------------------------------------
/**
* Convenience method to get the count (number of elements) of a DataSet
* as well as the checksum (sum over element hashes).
*
* @return A ChecksumHashCode with the count and checksum of elements in the data set.
* @see [[org.apache.flink.api.java.Utils.ChecksumHashCodeHelper]]
*/
def checksumHashCode(): ChecksumHashCode = {
val id = new AbstractID().toString
self.javaSet.output(new Utils.ChecksumHashCodeHelper[T](id))
val res = self.javaSet.getExecutionEnvironment.execute()
res.getAccumulatorResult[ChecksumHashCode](id)
}
}
}
|
hequn8128/flink
|
flink-scala/src/main/scala/org/apache/flink/api/scala/utils/package.scala
|
Scala
|
apache-2.0
| 8,246
|
import scala.math.Ordering
import scala.reflect.ClassTag
trait MonoSam { def apply(x: Int): String }
trait SamP[U] { def apply(x: Int): U }
class OverloadedFun[T](x: T) {
// def foo(f: T => String): String = f(x) Can't unify - need just type equality
def foo(f: Any => String): String = f(x)
def foo(f: Any => T): T = f("a")
def poly[U](f: Int => String): String = f(1)
def poly[U](f: Int => U): U = f(1)
// def polySam[U](f: MonoSam): String = f(1) Only function types supported for pretype-args
// def polySam[U](f: SamP[U]): U = f(1)
// check that we properly instantiate java.util.function.Function's type param to String
def polyJavaSam(f: String => String) = 1
def polyJavaSam(f: java.util.function.Function[String, String]) = 2
}
class StringLike(xs: String) {
def map[A](f: Char => A): Array[A] = ???
def map(f: Char => Char): String = ???
}
object Test {
val of = new OverloadedFun[Int](1)
of.foo(_.toString)
of.poly(x => x / 2 )
//of.polySam(x => x / 2 )
of.polyJavaSam(x => x)
val sl = new StringLike("a")
sl.map(_ == 'a') // : Array[Boolean]
sl.map(x => 'a') // : String
}
object sorting {
def stableSort[K: ClassTag](a: Seq[K], f: (K, K) => Boolean): Array[K] = ???
def stableSort[L: ClassTag](a: Array[L], f: (L, L) => Boolean): Unit = ???
stableSort(??? : Seq[Boolean], (x: Boolean, y: Boolean) => x && !y)
}
// trait Bijection[A, B] extends (A => B) {
// def andThen[C](g: Bijection[B, C]): Bijection[A, C] = ???
// def compose[T](g: Bijection[T, A]) = g andThen this
// }
object SI10194 {
trait X[A] {
def map[B](f: A => B): Unit
}
trait Y[A] extends X[A] {
def map[B](f: A => B)(implicit ordering: Ordering[B]): Unit
}
trait Z[A] extends Y[A]
(null: Y[Int]).map(x => x.toString) // compiled
(null: Z[Int]).map(x => x.toString) // didn't compile
}
// Perform eta-expansion of methods passed as functions to overloaded functions
trait A { def map[T](f: Int => T): Unit = () }
object B extends A {
def noover(f: SAM[Int, Any]): Unit = ()
def map[T: scala.reflect.ClassTag](f: Int => T): Unit = ()
def f(x: Int) = x
noover(f)
noover(identity)
map(f) // same param type, monomorphic method
map(identity) // same param type, polymorphic method
// must not lub to an incompatible function type
object t { def f(x: Int => Int): Int = 0; def f(y: String => Int): String = "1" }
def fun(x: Int) = x
def fun2[T] = (x: T) => 42
t.f(fun) // different param type, monomorphic method
//t.f(fun2) // different param type, polymorphic method - not possible
}
// The same for SAM types
trait SAM[-T, +R] { def apply(x: T): R }
trait A2 { def map[T](f: SAM[Int, T]): Unit = () }
object B2 extends A2 {
def noover(f: SAM[Int, Any]): Unit = ()
def map[T: scala.reflect.ClassTag](f: SAM[Int, T]): Unit = ()
def f(x: Int) = x
noover(f)
noover(identity)
map(f) // same param type, monomorphic method
//map(identity) // same param type, polymorphic method - not possible for SAMs
// must not lub to an incompatible function type
object t { def f(x: SAM[Int, Int]): Int = 0; def f(y: SAM[String, Int]): String = "1" }
def fun(x: Int) = x
def fun2[T] = (x: T) => 42
t.f(fun) // different param type, monomorphic method
//t.f(fun2) // different param type, polymorphic method - not possible
}
|
som-snytt/dotty
|
tests/pos/overloaded_ho_fun.scala
|
Scala
|
apache-2.0
| 3,336
|
/*
* Copyright (c) 2013-14 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package object shapeless {
def unexpected : Nothing = sys.error("Unexpected invocation")
// Basic definitions
type Id[+T] = T
type Const[C] = {
type λ[T] = C
}
type ¬[T] = T => Nothing
type ¬¬[T] = ¬[¬[T]]
type ∧[T, U] = T with U
type ∨[T, U] = ¬[¬[T] ∧ ¬[U]]
// Type-lambda for context bound
type |∨|[T, U] = {
type λ[X] = ¬¬[X] <:< (T ∨ U)
}
// Type inequalities
trait =:!=[A, B]
implicit def neq[A, B] : A =:!= B = new =:!=[A, B] {}
implicit def neqAmbig1[A] : A =:!= A = unexpected
implicit def neqAmbig2[A] : A =:!= A = unexpected
trait <:!<[A, B]
implicit def nsub[A, B] : A <:!< B = new <:!<[A, B] {}
implicit def nsubAmbig1[A, B >: A] : A <:!< B = unexpected
implicit def nsubAmbig2[A, B >: A] : A <:!< B = unexpected
// Type-lambda for context bound
type |¬|[T] = {
type λ[U] = U <:!< T
}
// Quantifiers
type ∃[P[_]] = P[T] forSome { type T }
type ∀[P[_]] = ¬[∃[({ type λ[X] = ¬[P[X]]})#λ]]
/** `Optic` definitions */
val optic = OpticDefns
val lens = OpticDefns
val prism = OpticDefns
val ^ = Path
/** `Nat` literals */
val nat = Nat
/** `Poly` definitions */
val poly = PolyDefns
import poly._
/** Dependent nullary function type. */
trait DepFn0 {
type Out
def apply(): Out
}
/** Dependent unary function type. */
trait DepFn1[T] {
type Out
def apply(t: T): Out
}
/** Dependent binary function type. */
trait DepFn2[T, U] {
type Out
def apply(t: T, u: U): Out
}
/** The SYB everything combinator */
type Everything[F <: Poly, K <: Poly, T] = Case1[EverythingAux[F, K], T]
class ApplyEverything[F <: Poly] {
def apply(k : Poly): EverythingAux[F, k.type] {} = new EverythingAux[F, k.type]
}
def everything(f: Poly): ApplyEverything[f.type] {} = new ApplyEverything[f.type]
/** The SYB everywhere combinator */
type Everywhere[F <: Poly, T] = Case1[EverywhereAux[F], T]
def everywhere(f: Poly): EverywhereAux[f.type] {} = new EverywhereAux[f.type]
}
|
mandubian/shapeless
|
core/src/main/scala/shapeless/package.scala
|
Scala
|
apache-2.0
| 2,677
|
package razie.hosting
import play.api.mvc.{Request, RequestHeader}
import razie.OR._
import razie.cdebug
import razie.wiki.Services
import razie.wiki.model._
import razie.wiki.util.{DslProps, PlayTools}
/**
* Multihosting - website settings - will collect website properties from the topic if it has a 'website' section
*/
class Website (we:WikiPage, extra:Seq[(String,String)] = Seq()) extends DslProps(Some(we), "website,properties", extra) {
def label:String = this prop "label" OR name
def name:String = this prop "name" OR we.name //"-"
def reactor:String = this prop "reactor" OR (this prop "realm" OR "rk")
def title = this prop "title"
def css:Option[String] = this prop "css" // dark vs light
/** make a url for this realm - either configured */
def url:String = this prop "url" OR ("http://" + domain)
def domain:String = this prop "domain" OR (s"$name.dieselapps.com")
def homePage:Option[WID] = this wprop "home"
def userHomePage:Option[WID] = this wprop "userHome"
lazy val trustedSites:Array[String] = (this prop "trustedSites" OR "").split(",")
def notifyList:Option[WID] = this wprop "notifyList"
def footer:Option[WID] = this wprop "footer"
def twitter:String = this prop "bottom.Connect.Twitter" OR "coolscala"
def tos:String = this prop "tos" OR "/wiki/Terms_of_Service"
def privacy:String = this prop "privacy" OR "/wiki/Privacy_Policy"
def dieselReactor:String = this prop "dieselReactor" OR reactor
def dieselVisiblity:String = this prop "diesel.visibility" OR "public"
def dieselTrust:String = this prop "diesel.trust" OR ""
def stylesheet:Option[WID] = this wprop "stylesheet"
def join:String = this prop "join" OR "/doe/join"
def divMain:String = this prop "divMain" OR "9"
def copyright:Option[String] = this prop "copyright"
def logo:Option[String] = this prop "logo"
def adsOnList = this bprop "adsOnList" OR true
def adsAtBottom = this bprop "adsAtBottom" OR true
def adsOnSide = this bprop "adsOnSide" OR true
def adsForUsers = this bprop "adsForUsers" OR true
def noadsForPerms = (this prop "noAdsForPerms").map(_.split(",")) OR Array.empty[String]
def openMembership = this bprop "users.openMembership" OR true
def membersCanCreateTopics = this bprop "users.membersCanCreateTopics" OR true
def rightTop:Option[WID] = this wprop "rightTop"
def rightBottom:Option[WID] = this wprop "rightBottom"
def about:Option[String] = this prop "bottom.More.About" flatMap {s=>
if (s.startsWith("http") || (s startsWith "/")) Some(s)
else WID.fromPath(s).map(_.url)
}
def userTypes:List[String] = (this prop "userTypes").map(_.split(",").toList).getOrElse(Services.config.userTypes)
def userTypeDesc(ut:String):Option[String] = this prop ("userType."+ut)
def layout:String = this prop "layout" OR "Play:classicLayout"
def useWikiPrefix:Boolean = this bprop "useWikiPrefix" OR true
//todo optimize - don't parse every time
def propFilter (prefix:String) = {
propSeq.filter(_._1 startsWith (prefix)).map(t=>(t._1.replaceFirst(prefix, ""), t._2))
}
//sections should be "More" "Support" "Social"
def bottomMenu (section:String) = propFilter(s"bottom.$section.")
def navrMenu () = propFilter(s"navr.")
def navrMenuRemove () = propFilter(s"navr.remove.").map(_._1)
def navMenu () =
propFilter(s"nav.") ++
Seq("admin.badgeRefreshAllTests" -> "/diesel/statusAll")
def metas () = propFilter(s"meta.")
def navTheme:String = this prop "nav.Theme" OR "/doe/selecttheme"
def navBrand = this prop "navBrand"
def supportUrl:String = this prop "bottom.Support.Support" OR "/doe/support"
def supportEmail = this prop "support.email" OR "support@racerkidz.com"
def SUPPORT2 = this prop "support.email" OR "support@effectiveskiing.com"
}
/** multihosting utilities */
object Website {
private case class CacheEntry (w:Website, millis:Long)
private val cache = new collection.mutable.HashMap[String,CacheEntry]()
val EXP = 100000
// s is the host
def forHost (s:String):Option[Website] = {
val ce = cache.get(s)
if (ce.isEmpty) {// || System.currentTimeMillis > ce.get.millis) {
var w : Option[Website] = None
// cdebug << s" RkReactor looking for $s"
RkReactors.forHost(s).map { r=>
// cdebug << s" RkReactor found $r"
// auto-websites of type REACTOR.coolscala.com
WikiReactors.findWikiEntry(r).map { rpage=> // todo no need to reload, the reactor now has the page
// cdebug << s" Wiki found $r"
// create an entry even if no website section present
w = Some(new Website(rpage, Seq("reactor" -> r)))
cache.put(s, CacheEntry(w.get, System.currentTimeMillis()+EXP))
}
}
w
} else
// cdebug << s" Cache found $s"
ce.map(_.w)
}
def forRealm (r:String):Option[Website] = {
cache.values.find(_.w.reactor == r).map(_.w).orElse {
val web = WikiReactors(r).we.map(we=> new Website(we))
web.foreach {w=>
cache.put(w.domain, CacheEntry(w, System.currentTimeMillis()+EXP))
}
web
}
}
def clean (host:String):Unit = {
cache.remove(host)
}
def all = cache.values.map(_.w).toList
def xrealm (implicit request:RequestHeader) = (getHost flatMap Website.forHost).map(_.reactor).getOrElse(dflt.reactor)
def realm (implicit request:Request[_]) = apply(request).map(_.reactor).getOrElse(dflt.reactor)
def getRealm (implicit request:Request[_]) = realm(request)
def apply (implicit request: Request[_]):Option[Website] = getHost flatMap Website.forHost
def userTypes (implicit request: Request[_]) = apply(request).map(_.userTypes).getOrElse(Services.config.userTypes)
def userTypeDesc(ut:String)(implicit request: Request[_]) : String = apply(request).flatMap(_.userTypeDesc(ut)).getOrElse(ut)
/** find or default */
def get (implicit request: Request[_]) : Website = apply getOrElse dflt
def dflt = new Website(Wikis.rk.categories.head) //todo this is stupid - find better default
/** @deprecated use PlayTools.getHost */
def getHost (implicit request: RequestHeader) = PlayTools.getHost
}
|
razie/wikireactor
|
common/app/razie/hosting/Website.scala
|
Scala
|
apache-2.0
| 6,172
|
package com.dominikgruber.fpinscala.chapter05
import org.scalatest._
class Exercise14Spec extends FlatSpec with Matchers {
"startsWith" should "return true" in {
Stream(1, 2, 3).startsWith(Stream(1, 2)) should be (true)
}
it should "return false (I)" in {
Stream(1, 2, 3).startsWith(Stream(1, 2, 3, 4)) should be (false)
}
it should "return false (II)" in {
Stream(1, 2, 3).startsWith(Stream(0)) should be (false)
}
}
|
TheDom/functional-programming-in-scala
|
src/test/scala/com/dominikgruber/fpinscala/chapter05/Exercise14Spec.scala
|
Scala
|
mit
| 446
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.template.similarproduct
import org.apache.predictionio.controller.P2LAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import grizzled.slf4j.Logger
import scala.collection.mutable.PriorityQueue
case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
lambda: Double,
seed: Option[Long]) extends Params
class ALSModel(
val productFeatures: Map[Int, Array[Double]],
val itemStringIntMap: BiMap[String, Int],
val items: Map[Int, Item]
) extends Serializable {
@transient lazy val itemIntStringMap = itemStringIntMap.inverse
override def toString = {
s" productFeatures: [${productFeatures.size}]" +
s"(${productFeatures.take(2).toList}...)" +
s" itemStringIntMap: [${itemStringIntMap.size}]" +
s"(${itemStringIntMap.take(2).toString}...)]" +
s" items: [${items.size}]" +
s"(${items.take(2).toString}...)]"
}
}
/**
* Use ALS to build item x feature matrix
*/
class ALSAlgorithm(val ap: ALSAlgorithmParams)
extends P2LAlgorithm[PreparedData, ALSModel, Query, PredictedResult] {
@transient lazy val logger = Logger[this.type]
def train(sc:SparkContext ,data: PreparedData): ALSModel = {
require(!data.rateEvents.take(1).isEmpty,
s"rateEvents in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.users.take(1).isEmpty,
s"users in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.items.take(1).isEmpty,
s"items in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
// create User and item's String ID to integer index BiMap
val userStringIntMap = BiMap.stringInt(data.users.keys)
val itemStringIntMap = BiMap.stringInt(data.items.keys)
// collect Item as Map and convert ID to Int index
val items: Map[Int, Item] = data.items.map { case (id, item) =>
(itemStringIntMap(id), item)
}.collectAsMap.toMap
val mllibRatings = data.rateEvents
.map { r =>
// Convert user and item String IDs to Int index for MLlib
val uindex = userStringIntMap.getOrElse(r.user, -1)
val iindex = itemStringIntMap.getOrElse(r.item, -1)
if (uindex == -1)
logger.info(s"Couldn't convert nonexistent user ID ${r.user}"
+ " to Int index.")
if (iindex == -1)
logger.info(s"Couldn't convert nonexistent item ID ${r.item}"
+ " to Int index.")
((uindex, iindex), (r.rating,r.t)) //MODIFIED
}.filter { case ((u, i), v) =>
// keep events with valid user and item index
(u != -1) && (i != -1)
}
.reduceByKey { case (v1, v2) => // MODIFIED
// if a user may rate same item with different value at different times,
// use the latest value for this case.
// Can remove this reduceByKey() if no need to support this case.
val (rating1, t1) = v1
val (rating2, t2) = v2
// keep the latest value
if (t1 > t2) v1 else v2
}
.map { case ((u, i), (rating, t)) => // MODIFIED
// MLlibRating requires integer index for user and item
MLlibRating(u, i, rating) // MODIFIED
}.cache()
// MLLib ALS cannot handle empty training data.
require(!mllibRatings.take(1).isEmpty,
s"mllibRatings cannot be empty." +
" Please check if your events contain valid user and item ID.")
// seed for MLlib ALS
val seed = ap.seed.getOrElse(System.nanoTime)
val m = ALS.train(
ratings = mllibRatings,
rank = ap.rank,
iterations = ap.numIterations,
lambda = ap.lambda,
blocks = -1,
seed = seed)
new ALSModel(
productFeatures = m.productFeatures.collectAsMap.toMap,
itemStringIntMap = itemStringIntMap,
items = items
)
}
def predict(model: ALSModel, query: Query): PredictedResult = {
val productFeatures = model.productFeatures
// convert items to Int index
val queryList: Set[Int] = query.items.map(model.itemStringIntMap.get(_))
.flatten.toSet
val queryFeatures: Vector[Array[Double]] = queryList.toVector
// productFeatures may not contain the requested item
.map { item => productFeatures.get(item) }
.flatten
val whiteList: Option[Set[Int]] = query.whiteList.map( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val blackList: Option[Set[Int]] = query.blackList.map ( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val ord = Ordering.by[(Int, Double), Double](_._2).reverse
val indexScores: Array[(Int, Double)] = if (queryFeatures.isEmpty) {
logger.info(s"No productFeatures vector for query items ${query.items}.")
Array[(Int, Double)]()
} else {
productFeatures.par // convert to parallel collection
.mapValues { f =>
queryFeatures.map{ qf =>
cosine(qf, f)
}.reduce(_ + _)
}
.filter(_._2 > 0) // keep items with score > 0
.seq // convert back to sequential collection
.toArray
}
val filteredScore = indexScores.view.filter { case (i, v) =>
isCandidateItem(
i = i,
items = model.items,
categories = query.categories,
queryList = queryList,
whiteList = whiteList,
blackList = blackList
)
}
val topScores = getTopN(filteredScore, query.num)(ord).toArray
val itemScores = topScores.map { case (i, s) =>
new ItemScore(
item = model.itemIntStringMap(i),
score = s
)
}
new PredictedResult(itemScores)
}
private
def getTopN[T](s: Seq[T], n: Int)(implicit ord: Ordering[T]): Seq[T] = {
val q = PriorityQueue()
for (x <- s) {
if (q.size < n)
q.enqueue(x)
else {
// q is full
if (ord.compare(x, q.head) < 0) {
q.dequeue()
q.enqueue(x)
}
}
}
q.dequeueAll.toSeq.reverse
}
private
def cosine(v1: Array[Double], v2: Array[Double]): Double = {
val size = v1.size
var i = 0
var n1: Double = 0
var n2: Double = 0
var d: Double = 0
while (i < size) {
n1 += v1(i) * v1(i)
n2 += v2(i) * v2(i)
d += v1(i) * v2(i)
i += 1
}
val n1n2 = (math.sqrt(n1) * math.sqrt(n2))
if (n1n2 == 0) 0 else (d / n1n2)
}
private
def isCandidateItem(
i: Int,
items: Map[Int, Item],
categories: Option[Set[String]],
queryList: Set[Int],
whiteList: Option[Set[Int]],
blackList: Option[Set[Int]]
): Boolean = {
whiteList.map(_.contains(i)).getOrElse(true) &&
blackList.map(!_.contains(i)).getOrElse(true) &&
// discard items in query as well
(!queryList.contains(i)) &&
// filter categories
categories.map { cat =>
items(i).categories.map { itemCat =>
// keep this item if has ovelap categories with the query
!(itemCat.toSet.intersect(cat).isEmpty)
}.getOrElse(false) // discard this item if it has no categories
}.getOrElse(true)
}
}
|
himanshudhami/PredictionIO
|
examples/scala-parallel-similarproduct/add-rateevent/src/main/scala/ALSAlgorithm.scala
|
Scala
|
apache-2.0
| 8,348
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.lang.{Double => JDouble, Long => JLong}
import java.math.{BigDecimal => JBigDecimal}
import java.util.{Locale, TimeZone}
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.Resolver
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.{Cast, Literal}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
// TODO: We should tighten up visibility of the classes here once we clean up Hive coupling.
object PartitionPath {
def apply(values: InternalRow, path: String): PartitionPath =
apply(values, new Path(path))
}
/**
* Holds a directory in a partitioned collection of files as well as the partition values
* in the form of a Row. Before scanning, the files at `path` need to be enumerated.
*/
case class PartitionPath(values: InternalRow, path: Path)
case class PartitionSpec(
partitionColumns: StructType,
partitions: Seq[PartitionPath])
object PartitionSpec {
val emptySpec = PartitionSpec(StructType(Seq.empty[StructField]), Seq.empty[PartitionPath])
}
object PartitioningUtils {
private[datasources] case class PartitionValues(columnNames: Seq[String], literals: Seq[Literal])
{
require(columnNames.size == literals.size)
}
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.DEFAULT_PARTITION_NAME
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.escapePathName
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.unescapePathName
/**
* Given a group of qualified paths, tries to parse them and returns a partition specification.
* For example, given:
* {{{
* hdfs://<host>:<port>/path/to/partition/a=1/b=hello/c=3.14
* hdfs://<host>:<port>/path/to/partition/a=2/b=world/c=6.28
* }}}
* it returns:
* {{{
* PartitionSpec(
* partitionColumns = StructType(
* StructField(name = "a", dataType = IntegerType, nullable = true),
* StructField(name = "b", dataType = StringType, nullable = true),
* StructField(name = "c", dataType = DoubleType, nullable = true)),
* partitions = Seq(
* Partition(
* values = Row(1, "hello", 3.14),
* path = "hdfs://<host>:<port>/path/to/partition/a=1/b=hello/c=3.14"),
* Partition(
* values = Row(2, "world", 6.28),
* path = "hdfs://<host>:<port>/path/to/partition/a=2/b=world/c=6.28")))
* }}}
*/
private[datasources] def parsePartitions(
paths: Seq[Path],
typeInference: Boolean,
basePaths: Set[Path],
timeZoneId: String): PartitionSpec = {
parsePartitions(paths, typeInference, basePaths, DateTimeUtils.getTimeZone(timeZoneId))
}
private[datasources] def parsePartitions(
paths: Seq[Path],
typeInference: Boolean,
basePaths: Set[Path],
timeZone: TimeZone): PartitionSpec = {
// First, we need to parse every partition's path and see if we can find partition values.
val (partitionValues, optDiscoveredBasePaths) = paths.map { path =>
parsePartition(path, typeInference, basePaths, timeZone)
}.unzip
// We create pairs of (path -> path's partition value) here
// If the corresponding partition value is None, the pair will be skipped
val pathsWithPartitionValues = paths.zip(partitionValues).flatMap(x => x._2.map(x._1 -> _))
if (pathsWithPartitionValues.isEmpty) {
// This dataset is not partitioned.
PartitionSpec.emptySpec
} else {
// This dataset is partitioned. We need to check whether all partitions have the same
// partition columns and resolve potential type conflicts.
// Check if there is conflicting directory structure.
// For the paths such as:
// var paths = Seq(
// "hdfs://host:9000/invalidPath",
// "hdfs://host:9000/path/a=10/b=20",
// "hdfs://host:9000/path/a=10.5/b=hello")
// It will be recognised as conflicting directory structure:
// "hdfs://host:9000/invalidPath"
// "hdfs://host:9000/path"
// TODO: Selective case sensitivity.
val discoveredBasePaths = optDiscoveredBasePaths.flatten.map(_.toString.toLowerCase())
assert(
discoveredBasePaths.distinct.size == 1,
"Conflicting directory structures detected. Suspicious paths:\\b" +
discoveredBasePaths.distinct.mkString("\\n\\t", "\\n\\t", "\\n\\n") +
"If provided paths are partition directories, please set " +
"\\"basePath\\" in the options of the data source to specify the " +
"root directory of the table. If there are multiple root directories, " +
"please load them separately and then union them.")
val resolvedPartitionValues = resolvePartitions(pathsWithPartitionValues)
// Creates the StructType which represents the partition columns.
val fields = {
val PartitionValues(columnNames, literals) = resolvedPartitionValues.head
columnNames.zip(literals).map { case (name, Literal(_, dataType)) =>
// We always assume partition columns are nullable since we've no idea whether null values
// will be appended in the future.
StructField(name, dataType, nullable = true)
}
}
// Finally, we create `Partition`s based on paths and resolved partition values.
val partitions = resolvedPartitionValues.zip(pathsWithPartitionValues).map {
case (PartitionValues(_, literals), (path, _)) =>
PartitionPath(InternalRow.fromSeq(literals.map(_.value)), path)
}
PartitionSpec(StructType(fields), partitions)
}
}
/**
* Parses a single partition, returns column names and values of each partition column, also
* the path when we stop partition discovery. For example, given:
* {{{
* path = hdfs://<host>:<port>/path/to/partition/a=42/b=hello/c=3.14
* }}}
* it returns the partition:
* {{{
* PartitionValues(
* Seq("a", "b", "c"),
* Seq(
* Literal.create(42, IntegerType),
* Literal.create("hello", StringType),
* Literal.create(3.14, DoubleType)))
* }}}
* and the path when we stop the discovery is:
* {{{
* hdfs://<host>:<port>/path/to/partition
* }}}
*/
private[datasources] def parsePartition(
path: Path,
typeInference: Boolean,
basePaths: Set[Path],
timeZone: TimeZone): (Option[PartitionValues], Option[Path]) = {
val columns = ArrayBuffer.empty[(String, Literal)]
// Old Hadoop versions don't have `Path.isRoot`
var finished = path.getParent == null
// currentPath is the current path that we will use to parse partition column value.
var currentPath: Path = path
while (!finished) {
// Sometimes (e.g., when speculative task is enabled), temporary directories may be left
// uncleaned. Here we simply ignore them.
if (currentPath.getName.toLowerCase(Locale.ROOT) == "_temporary") {
return (None, None)
}
if (basePaths.contains(currentPath)) {
// If the currentPath is one of base paths. We should stop.
finished = true
} else {
// Let's say currentPath is a path of "/table/a=1/", currentPath.getName will give us a=1.
// Once we get the string, we try to parse it and find the partition column and value.
val maybeColumn =
parsePartitionColumn(currentPath.getName, typeInference, timeZone)
maybeColumn.foreach(columns += _)
// Now, we determine if we should stop.
// When we hit any of the following cases, we will stop:
// - In this iteration, we could not parse the value of partition column and value,
// i.e. maybeColumn is None, and columns is not empty. At here we check if columns is
// empty to handle cases like /table/a=1/_temporary/something (we need to find a=1 in
// this case).
// - After we get the new currentPath, this new currentPath represent the top level dir
// i.e. currentPath.getParent == null. For the example of "/table/a=1/",
// the top level dir is "/table".
finished =
(maybeColumn.isEmpty && !columns.isEmpty) || currentPath.getParent == null
if (!finished) {
// For the above example, currentPath will be "/table/".
currentPath = currentPath.getParent
}
}
}
if (columns.isEmpty) {
(None, Some(path))
} else {
val (columnNames, values) = columns.reverse.unzip
(Some(PartitionValues(columnNames, values)), Some(currentPath))
}
}
private def parsePartitionColumn(
columnSpec: String,
typeInference: Boolean,
timeZone: TimeZone): Option[(String, Literal)] = {
val equalSignIndex = columnSpec.indexOf('=')
if (equalSignIndex == -1) {
None
} else {
val columnName = unescapePathName(columnSpec.take(equalSignIndex))
assert(columnName.nonEmpty, s"Empty partition column name in '$columnSpec'")
val rawColumnValue = columnSpec.drop(equalSignIndex + 1)
assert(rawColumnValue.nonEmpty, s"Empty partition column value in '$columnSpec'")
val literal = inferPartitionColumnValue(rawColumnValue, typeInference, timeZone)
Some(columnName -> literal)
}
}
/**
* Given a partition path fragment, e.g. `fieldOne=1/fieldTwo=2`, returns a parsed spec
* for that fragment as a `TablePartitionSpec`, e.g. `Map(("fieldOne", "1"), ("fieldTwo", "2"))`.
*/
def parsePathFragment(pathFragment: String): TablePartitionSpec = {
parsePathFragmentAsSeq(pathFragment).toMap
}
/**
* Given a partition path fragment, e.g. `fieldOne=1/fieldTwo=2`, returns a parsed spec
* for that fragment as a `Seq[(String, String)]`, e.g.
* `Seq(("fieldOne", "1"), ("fieldTwo", "2"))`.
*/
def parsePathFragmentAsSeq(pathFragment: String): Seq[(String, String)] = {
pathFragment.split("/").map { kv =>
val pair = kv.split("=", 2)
(unescapePathName(pair(0)), unescapePathName(pair(1)))
}
}
/**
* This is the inverse of parsePathFragment().
*/
def getPathFragment(spec: TablePartitionSpec, partitionSchema: StructType): String = {
partitionSchema.map { field =>
escapePathName(field.name) + "=" + escapePathName(spec(field.name))
}.mkString("/")
}
/**
* Normalize the column names in partition specification, w.r.t. the real partition column names
* and case sensitivity. e.g., if the partition spec has a column named `monTh`, and there is a
* partition column named `month`, and it's case insensitive, we will normalize `monTh` to
* `month`.
*/
def normalizePartitionSpec[T](
partitionSpec: Map[String, T],
partColNames: Seq[String],
tblName: String,
resolver: Resolver): Map[String, T] = {
val normalizedPartSpec = partitionSpec.toSeq.map { case (key, value) =>
val normalizedKey = partColNames.find(resolver(_, key)).getOrElse {
throw new AnalysisException(s"$key is not a valid partition column in table $tblName.")
}
normalizedKey -> value
}
if (normalizedPartSpec.map(_._1).distinct.length != normalizedPartSpec.length) {
val duplicateColumns = normalizedPartSpec.map(_._1).groupBy(identity).collect {
case (x, ys) if ys.length > 1 => x
}
throw new AnalysisException(s"Found duplicated columns in partition specification: " +
duplicateColumns.mkString(", "))
}
normalizedPartSpec.toMap
}
/**
* Resolves possible type conflicts between partitions by up-casting "lower" types. The up-
* casting order is:
* {{{
* NullType ->
* IntegerType -> LongType ->
* DoubleType -> StringType
* }}}
*/
def resolvePartitions(
pathsWithPartitionValues: Seq[(Path, PartitionValues)]): Seq[PartitionValues] = {
if (pathsWithPartitionValues.isEmpty) {
Seq.empty
} else {
// TODO: Selective case sensitivity.
val distinctPartColNames =
pathsWithPartitionValues.map(_._2.columnNames.map(_.toLowerCase())).distinct
assert(
distinctPartColNames.size == 1,
listConflictingPartitionColumns(pathsWithPartitionValues))
// Resolves possible type conflicts for each column
val values = pathsWithPartitionValues.map(_._2)
val columnCount = values.head.columnNames.size
val resolvedValues = (0 until columnCount).map { i =>
resolveTypeConflicts(values.map(_.literals(i)))
}
// Fills resolved literals back to each partition
values.zipWithIndex.map { case (d, index) =>
d.copy(literals = resolvedValues.map(_(index)))
}
}
}
private[datasources] def listConflictingPartitionColumns(
pathWithPartitionValues: Seq[(Path, PartitionValues)]): String = {
val distinctPartColNames = pathWithPartitionValues.map(_._2.columnNames).distinct
def groupByKey[K, V](seq: Seq[(K, V)]): Map[K, Iterable[V]] =
seq.groupBy { case (key, _) => key }.mapValues(_.map { case (_, value) => value })
val partColNamesToPaths = groupByKey(pathWithPartitionValues.map {
case (path, partValues) => partValues.columnNames -> path
})
val distinctPartColLists = distinctPartColNames.map(_.mkString(", ")).zipWithIndex.map {
case (names, index) =>
s"Partition column name list #$index: $names"
}
// Lists out those non-leaf partition directories that also contain files
val suspiciousPaths = distinctPartColNames.sortBy(_.length).flatMap(partColNamesToPaths)
s"Conflicting partition column names detected:\\n" +
distinctPartColLists.mkString("\\n\\t", "\\n\\t", "\\n\\n") +
"For partitioned table directories, data files should only live in leaf directories.\\n" +
"And directories at the same level should have the same partition column name.\\n" +
"Please check the following directories for unexpected files or " +
"inconsistent partition column names:\\n" +
suspiciousPaths.map("\\t" + _).mkString("\\n", "\\n", "")
}
/**
* Converts a string to a [[Literal]] with automatic type inference. Currently only supports
* [[IntegerType]], [[LongType]], [[DoubleType]], [[DecimalType]], [[DateType]]
* [[TimestampType]], and [[StringType]].
*/
private[datasources] def inferPartitionColumnValue(
raw: String,
typeInference: Boolean,
timeZone: TimeZone): Literal = {
val decimalTry = Try {
// `BigDecimal` conversion can fail when the `field` is not a form of number.
val bigDecimal = new JBigDecimal(raw)
// It reduces the cases for decimals by disallowing values having scale (eg. `1.1`).
require(bigDecimal.scale <= 0)
// `DecimalType` conversion can fail when
// 1. The precision is bigger than 38.
// 2. scale is bigger than precision.
Literal(bigDecimal)
}
if (typeInference) {
// First tries integral types
Try(Literal.create(Integer.parseInt(raw), IntegerType))
.orElse(Try(Literal.create(JLong.parseLong(raw), LongType)))
.orElse(decimalTry)
// Then falls back to fractional types
.orElse(Try(Literal.create(JDouble.parseDouble(raw), DoubleType)))
// Then falls back to date/timestamp types
.orElse(Try(
Literal.create(
DateTimeUtils.getThreadLocalTimestampFormat(timeZone)
.parse(unescapePathName(raw)).getTime * 1000L,
TimestampType)))
.orElse(Try(
Literal.create(
DateTimeUtils.millisToDays(
DateTimeUtils.getThreadLocalDateFormat.parse(raw).getTime),
DateType)))
// Then falls back to string
.getOrElse {
if (raw == DEFAULT_PARTITION_NAME) {
Literal.create(null, NullType)
} else {
Literal.create(unescapePathName(raw), StringType)
}
}
} else {
if (raw == DEFAULT_PARTITION_NAME) {
Literal.create(null, NullType)
} else {
Literal.create(unescapePathName(raw), StringType)
}
}
}
private val upCastingOrder: Seq[DataType] =
Seq(NullType, IntegerType, LongType, FloatType, DoubleType, StringType)
def validatePartitionColumn(
schema: StructType,
partitionColumns: Seq[String],
caseSensitive: Boolean): Unit = {
partitionColumnsSchema(schema, partitionColumns, caseSensitive).foreach {
field => field.dataType match {
case _: AtomicType => // OK
case _ => throw new AnalysisException(s"Cannot use ${field.dataType} for partition column")
}
}
if (partitionColumns.nonEmpty && partitionColumns.size == schema.fields.length) {
throw new AnalysisException(s"Cannot use all columns for partition columns")
}
}
def partitionColumnsSchema(
schema: StructType,
partitionColumns: Seq[String],
caseSensitive: Boolean): StructType = {
val equality = columnNameEquality(caseSensitive)
StructType(partitionColumns.map { col =>
schema.find(f => equality(f.name, col)).getOrElse {
throw new AnalysisException(s"Partition column $col not found in schema $schema")
}
}).asNullable
}
private def columnNameEquality(caseSensitive: Boolean): (String, String) => Boolean = {
if (caseSensitive) {
org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
} else {
org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution
}
}
/**
* Given a collection of [[Literal]]s, resolves possible type conflicts by up-casting "lower"
* types.
*/
private def resolveTypeConflicts(literals: Seq[Literal]): Seq[Literal] = {
val desiredType = {
val topType = literals.map(_.dataType).maxBy(upCastingOrder.indexOf(_))
// Falls back to string if all values of this column are null or empty string
if (topType == NullType) StringType else topType
}
literals.map { case l @ Literal(_, dataType) =>
Literal.create(Cast(l, desiredType).eval(), desiredType)
}
}
}
|
wangyixiaohuihui/spark2-annotation
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
|
Scala
|
apache-2.0
| 19,664
|
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.modules.jvm
import edu.latrobe._
import edu.latrobe.blaze._
import edu.latrobe.blaze.modules._
abstract class ZeroMean_JVM
extends ZeroMean
with MapLayer_JVM[ZeroMeanBuilder] {
final val (runningMean, runningMeanReference) = {
val ref = builder.runningMeanReference
val tmp = weightBufferBuilder.get(ref)
if (tmp.isDefined) {
val result = tmp.get.asInstanceOf[RealArrayTensor]
(result, None)
}
else {
val result = RealArrayTensor.zeros(runningMeanLayout)
val newRef = weightBufferBuilder.register(ref, result)
(result, Some(newRef))
}
}
// ---------------------------------------------------------------------------
// Weights related.
// ---------------------------------------------------------------------------
override def refresh(): Unit = {}
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
final override protected def doPredictForUnitTraining(inPlaceAllowed: Boolean,
input: Tensor,
learningRate: Real)
: (Tensor, PredictContext) = {
val out = {
if (inPlaceAllowed) {
input.asOrToRealArrayTensor
}
else {
input.toRealArrayTensor
}
}
doPredictForUnitTraining(out, learningRate)
(out, EmptyContext)
}
protected def doPredictForUnitTraining(output: RealArrayTensor,
learningRate: Real)
: Unit
final override protected def doPredictForUnitInference(inPlaceAllowed: Boolean,
input: Tensor)
: Tensor = {
val out = {
if (inPlaceAllowed) {
input.asOrToRealArrayTensor
}
else {
input.toRealArrayTensor
}
}
doPredictForUnitInference(out)
out
}
protected def doPredictForUnitInference(output: RealArrayTensor)
: Unit
final override protected def doPredictForChannelTraining(inPlaceAllowed: Boolean,
input: Tensor,
learningRate: Real)
: (Tensor, PredictContext) = {
val out = {
if (inPlaceAllowed) {
input.asOrToRealArrayTensor
}
else {
input.toRealArrayTensor
}
}
doPredictForChannelTraining(out, learningRate)
(out, EmptyContext)
}
protected def doPredictForChannelTraining(output: RealArrayTensor,
learningRate: Real)
: Unit
final override protected def doPredictForChannelInference(inPlaceAllowed: Boolean,
input: Tensor)
: Tensor = {
val out = {
if (inPlaceAllowed) {
input.asOrToRealArrayTensor
}
else {
input.toRealArrayTensor
}
}
doPredictForChannelInference(out)
out
}
protected def doPredictForChannelInference(output: RealArrayTensor)
: Unit
final override protected def doPredictForSampleTraining(inPlaceAllowed: Boolean,
input: Tensor,
learningRate: Real)
: (Tensor, PredictContext) = {
val out = {
if (inPlaceAllowed) {
input.asOrToRealArrayTensor
}
else {
input.toRealArrayTensor
}
}
doPredictForSampleTraining(out, learningRate)
(out, EmptyContext)
}
protected def doPredictForSampleTraining(output: RealArrayTensor,
learningRate: Real)
: Unit
final override protected def doPredictForSampleInference(inPlaceAllowed: Boolean,
input: Tensor)
: Tensor = {
val out = {
if (inPlaceAllowed) {
input.asOrToRealArrayTensor
}
else {
input.toRealArrayTensor
}
}
doPredictForSampleInference(out)
out
}
protected def doPredictForSampleInference(output: RealArrayTensor)
: Unit
final override protected def doPredictForBatchTraining(inPlaceAllowed: Boolean,
input: Tensor,
learningRate: Real)
: (Tensor, PredictContext) = {
val out = {
if (inPlaceAllowed) {
input.asOrToRealArrayTensor
}
else {
input.toRealArrayTensor
}
}
doPredictForBatchTraining(out, learningRate)
(out, EmptyContext)
}
protected def doPredictForBatchTraining(output: RealArrayTensor,
learningRate: Real)
: Unit
final override protected def doPredictForBatchInference(inPlaceAllowed: Boolean,
input: Tensor)
: Tensor = {
val out = {
if (inPlaceAllowed) {
input.asOrToRealArrayTensor
}
else {
input.toRealArrayTensor
}
}
doPredictForBatchInference(out)
out
}
protected def doPredictForBatchInference(output: RealArrayTensor)
: Unit
// ---------------------------------------------------------------------------
// Back propagation related.
// ---------------------------------------------------------------------------
final override protected def doDeriveInputErrorForUnit(context: PredictContext,
error: Tensor)
: RealArrayTensor = {
val err = error.asOrToRealArrayTensor
doDeriveInputErrorForUnit(context, err)
err
}
protected def doDeriveInputErrorForUnit(context: PredictContext,
error: RealArrayTensor)
: Unit
final override protected def doDeriveInputErrorForChannel(context: PredictContext,
error: Tensor)
: RealArrayTensor = {
val err = error.asOrToRealArrayTensor
doDeriveInputErrorForChannel(context, err)
err
}
protected def doDeriveInputErrorForChannel(context: PredictContext,
error: RealArrayTensor)
: Unit
final override protected def doDeriveInputErrorForSample(context: PredictContext,
error: Tensor)
: RealArrayTensor = {
val err = error.asOrToRealArrayTensor
doDeriveInputErrorForSample(context, err)
err
}
protected def doDeriveInputErrorForSample(context: PredictContext,
error: RealArrayTensor)
: Unit
final override protected def doDeriveInputErrorForBatch(context: PredictContext,
error: Tensor)
: RealArrayTensor = {
val err = error.asOrToRealArrayTensor
doDeriveInputErrorForBatch(context, err)
err
}
protected def doDeriveInputErrorForBatch(context: PredictContext,
error: RealArrayTensor)
: Unit
}
|
bashimao/ltudl
|
blaze/src/main/scala/edu/latrobe/blaze/modules/jvm/ZeroMean_JVM.scala
|
Scala
|
apache-2.0
| 8,078
|
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.kafka08.consumer
import kafka.consumer.ConsumerConfig
import org.locationtech.geomesa.kafka08.KafkaUtils08
case class Broker(host: String, port: Int) {
override def toString = s"[$host,$port]"
}
object Broker {
val defaultPort = 9092
def apply(broker: String): Broker = {
val colon = broker.lastIndexOf(':')
if (colon == -1) {
Broker(broker, defaultPort)
} else {
try {
Broker(broker.substring(0, colon), broker.substring(colon + 1).toInt)
} catch {
case e: Exception => throw new IllegalArgumentException(s"Invalid broker string '$broker'", e)
}
}
}
}
object Brokers {
def apply(brokers: String): Seq[Broker] = brokers.split(",").map(Broker.apply)
def apply(config: ConsumerConfig): Seq[Broker] = {
val brokers : String = config.props.getString(KafkaUtils08.brokerParam)
apply(brokers)
}
}
|
nagavallia/geomesa
|
geomesa-kafka/geomesa-kafka-datastore/geomesa-kafka-08-datastore/src/main/scala/org/locationtech/geomesa/kafka08/consumer/Broker.scala
|
Scala
|
apache-2.0
| 1,370
|
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush
import org.scalatest.FlatSpec
import org.scalatest.prop.Checkers
import com.signalcollect.triplerush.EfficientIndexPattern._
import com.signalcollect.triplerush.EfficientIndexPattern.longToIndexPattern
class QueryIdEmbeddingSpec extends FlatSpec with Checkers {
"QueryIds" should "correctly embed and recover query IDs" in {
check((queryId: Int) => {
if (queryId != Int.MinValue) {
val embedded = OperationIds.embedInLong(queryId)
assert(embedded.isOperationId)
val extracted = OperationIds.extractFromLong(embedded)
assert(extracted === queryId)
true
} else {
true
}
}, minSuccessful(10))
}
}
|
uzh/triplerush
|
src/test/scala/com/signalcollect/triplerush/QueryIdEmbeddingSpec.scala
|
Scala
|
apache-2.0
| 1,362
|
package play.utils
import java.sql._
import java.util.logging.Logger
class ProxyDriver(proxied: Driver) extends Driver {
def acceptsURL(url: String) = proxied.acceptsURL(url)
def connect(user: String, properties: java.util.Properties) = proxied.connect(user, properties)
def getMajorVersion() = proxied.getMajorVersion
def getMinorVersion() = proxied.getMinorVersion
def getPropertyInfo(user: String, properties: java.util.Properties) = proxied.getPropertyInfo(user, properties)
def jdbcCompliant() = proxied.jdbcCompliant
def getParentLogger(): Logger = null
}
|
michaelahlers/team-awesome-wedding
|
vendor/play-2.2.1/framework/src/play/src/main/scala/play/utils/ProxyDriver.scala
|
Scala
|
mit
| 580
|
package scalabpe.core
import java.util.concurrent.ArrayBlockingQueue
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.RejectedExecutionException
import java.util.concurrent.ThreadFactory
import java.util.concurrent.ThreadPoolExecutor
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.locks.ReentrantLock
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
case class FlowCallTimeout(requestId: String)
case class FlowTimeout(requestId: String)
class InvokeInfo(val service: String, val timeout: Int, val params: HashMapStringAny, val toAddr: String = null)
object FlowActor {
val count = new AtomicInteger(1)
}
class FlowActor(val router: Router, var threadNum: Int = 0) extends Actor with Logging with Closable with Dumpable {
val queueSize = 20000
var threadFactory: ThreadFactory = _
var pool: ThreadPoolExecutor = _
val flowmaps = new ConcurrentHashMap[String, Flow]()
init
def dump() {
val buff = new StringBuilder
buff.append("pool.size=").append(pool.getPoolSize).append(",")
buff.append("pool.getQueue.size=").append(pool.getQueue.size).append(",")
buff.append("flowmaps.size=").append(flowmaps.size).append(",")
log.info(buff.toString)
}
def init() {
if (threadNum == 0)
threadNum = (router.cfgXml \\ "ThreadNum").text.toInt
threadFactory = new NamedThreadFactory("flow" + FlowActor.count.getAndIncrement())
pool = new ThreadPoolExecutor(threadNum, threadNum, 0, TimeUnit.SECONDS, new ArrayBlockingQueue[Runnable](queueSize), threadFactory)
pool.prestartAllCoreThreads()
Flow.router = router
log.info("FlowActor started")
}
def close() {
val size = flowmaps.size
val t11 = System.currentTimeMillis
while (flowmaps.size > 0) {
Thread.sleep(15)
}
val t22 = System.currentTimeMillis
if (t22 - t11 > 100)
log.warn("FlowActor long time to wait for flow finished, size={}, ts={}", size, t22 - t11)
val t1 = System.currentTimeMillis
pool.shutdown()
pool.awaitTermination(5, TimeUnit.SECONDS)
pool = null
val t2 = System.currentTimeMillis
if (t2 - t1 > 100)
log.warn("FlowActor long time to shutdown pool, ts={}", t2 - t1)
log.info("FlowActor stopped")
}
override def receive(v: Any) {
if (Flow.isSyncedInvoke.get() == true) {
try {
onReceive(v)
} catch {
case e: Exception =>
log.error("flow exception v={}", v, e)
}
} else {
if (pool == null) return
try {
pool.execute(new Runnable() {
def run() {
try {
onReceive(v)
} catch {
case e: Exception =>
log.error("flow exception v={}", v, e)
}
}
})
} catch {
case e: RejectedExecutionException =>
// ignore the message
log.error("flow queue is full")
}
}
}
def onReceive(v: Any) {
v match {
case req: Request =>
//log.info("request received, req={}",req)
val flow = createFlow(req)
if (flow != null) {
try {
saveFlow(req.requestId, flow)
flow.requestReceived()
} catch {
case e: Exception =>
if (!flow.replied) {
replyError(ResultCodes.SERVICE_INTERNALERROR, req)
}
log.error("exception in flow.requestReceived, e={}", e.getMessage, e)
removeFlow(req.requestId)
flow.cancelFlowTimer()
}
} else {
replyError(ResultCodes.SERVICE_NOT_FOUND, req)
}
case res: InvokeResult =>
//log.info("result received, res={}",res)
val flow = findFlow(Flow.parseRequestId(res.requestId))
if (flow != null) {
try {
flow.responseReceived(res)
} catch {
case e: Exception =>
log.error("exception in flow.responseReceived, e={}", e.getMessage, e)
if (!flow.replied) {
replyError(ResultCodes.SERVICE_INTERNALERROR, flow.req)
}
removeFlow(flow.req.requestId)
flow.cancelFlowTimer()
}
}
case FlowCallTimeout(requestId) =>
//log.info("asynccall timeout, requestId={}",requestId)
val flow = findFlow(Flow.parseRequestId(requestId))
if (flow != null) {
try {
flow.timeoutReceived(requestId)
} catch {
case e: Exception =>
log.error("exception in flow.timeoutReceived, e={}", e.getMessage, e)
if (!flow.replied) {
replyError(ResultCodes.SERVICE_INTERNALERROR, flow.req)
}
removeFlow(flow.req.requestId)
flow.cancelFlowTimer()
}
}
case FlowTimeout(requestId) =>
//log.info("flow timeout, requestId={}",requestId)
removeFlow(requestId)
case _ =>
log.error("unknown msg received")
}
}
def findFlow(requestId: String): Flow = {
val existedFlow = flowmaps.get(requestId)
existedFlow
}
def createFlow(req: Request): Flow = {
val (serviceName, msgName) = router.serviceIdToName(req.serviceId, req.msgId)
val clsName = "scalabpe.flow.Flow_" + serviceName + "_" + msgName
try {
val flow = Class.forName(clsName).newInstance.asInstanceOf[Flow]
Flow.updateClsStats(clsName)
flow.req = req
flow.owner = this
flow
} catch {
case e: Exception =>
log.error("class not found, clsName={},e={}", clsName, e.getMessage);
null
}
}
def saveFlow(requestId: String, flow: Flow) {
flowmaps.put(requestId, flow)
}
def removeFlow(requestId: String): Flow = {
flowmaps.remove(requestId)
}
def replyError(errorCode: Int, req: Request) {
val res = new Response(errorCode, new HashMapStringAny(), req)
router.reply(new RequestResponseInfo(req, res))
}
}
object Flow {
var router: Router = _
val callStats = new ConcurrentHashMap[String, String]()
val isSyncedInvoke = new ThreadLocal[Boolean]() {
override def initialValue(): Boolean = {
return false
}
}
def parseRequestId(requestId: String): String = {
val p = requestId.lastIndexOf(":");
if (p < 0) requestId
else requestId.substring(0, p)
}
def parseIdx(requestId: String): Int = {
val p = requestId.lastIndexOf(":");
if (p < 0)
0
else
requestId.substring(p + 1).toInt
}
def newRequestId(requestId: String, idx: Int): String = {
requestId + ":" + idx
}
def updateClsStats(clsName: String) {
if (!Router.testMode) return
val key = clsName + ":0"
val value = clsName + ":constructor:unknown:0"
callStats.putIfAbsent(key, value)
}
def updateCallStats() {
if (!Router.testMode) return
val elements = Thread.currentThread().getStackTrace()
for (e <- elements) {
val clsName = e.getClassName()
val methodName = e.getMethodName()
val fileName = e.getFileName()
val lineNumber = e.getLineNumber()
if (clsName.indexOf("scalabpe.flow.") >= 0) {
val key = clsName + ":" + lineNumber
val value = clsName + ":" + methodName + ":" + fileName + ":" + lineNumber
callStats.putIfAbsent(key, value)
}
}
}
}
abstract class Flow extends Logging {
var req: Request = _
protected[core] var owner: FlowActor = _
protected[core] var replied = false
var flowTimeout = 60000
private var flowFinished = true
private var flowTimer: QuickTimer = _
private val callTimers = new HashMap[String, QuickTimer]()
private var logVars: HashMapStringAny = null
private var lastf: () => Unit = _
private var idx = 0
protected[core] var lastresultarray: ArrayBuffer[InvokeResult] = _
protected[core] var subrequestIds: ArrayBufferString = _
protected[core] private val idxmap = new HashMap[Int, Int]()
protected[core] val lock = new ReentrantLock(false)
protected[core] val allResultsReceived = lock.newCondition();
protected[core] var replyOnError = false
protected[core] var replyOnErrorCode = 0
// 可重载点
def filterRequest(map: HashMapStringAny): Unit = {}
def baseReceive(): Unit = { receive() }
def filterInvoke(targetServiceId: Int, targetMsgId: Int, map: HashMapStringAny): Unit = {}
def filterReply(code: Int, map: HashMapStringAny): Unit = {}
def baseEndFlow(): Unit = {}
def receive(): Unit; // user defined flow
def getConfig(name: String, defaultValue: String = ""): String = {
Flow.router.getConfig(name, defaultValue)
}
def logVar(name: String, value: Any) {
if (logVars == null) logVars = new HashMapStringAny()
logVars.put(name, value)
}
private def nextRequestId(): String = {
val subrequestid = Flow.newRequestId(req.requestId, idx)
idxmap.put(idx, 0)
idx += 1
subrequestid
}
private def nextRequestIds(n: Int): ArrayBufferString = {
val ids = new ArrayBufferString();
for (i <- 0 until n) {
val subrequestid = Flow.newRequestId(req.requestId, idx)
idxmap.put(idx, i)
idx += 1
ids += subrequestid
}
ids
}
protected[core] def cancelFlowTimer(): Unit = {
flowTimer.cancel()
}
def autoReply(): Unit = {
if (replied) return
reply(lasterrorcode())
}
private def endFlow(): Unit = {
autoReply()
flowFinished = true
baseEndFlow()
if (!flowFinished) {
return
}
flowTimer.cancel()
owner.removeFlow(req.requestId)
}
private def doNothingCallback(): Unit = {}
protected[core] def requestReceived(): Unit = {
Flow.updateCallStats()
flowTimer = Flow.router.newTimer(Flow.router.timeoutId(FlowTimoutType.TYPE_FLOW, req.serviceId, req.msgId, req.requestId), flowTimeout)
flowFinished = true
lock.lock();
try {
filterRequest(req.body)
baseReceive()
if (flowFinished) {
endFlow()
}
} finally {
lock.unlock()
}
}
protected[core] def timeoutReceived(requestId: String): Unit = {
responseReceived(InvokeResult.timeout(requestId))
}
protected[core] def responseReceived(res: InvokeResult): Unit = {
lock.lock();
try {
responseReceivedInternal(res)
} finally {
lock.unlock()
}
}
def setInvokeResultInternal(res: InvokeResult, arrayidx: Int = 0) = {
lastresultarray(arrayidx) = res
}
private def responseReceivedInternal(res: InvokeResult): Unit = {
val idx = Flow.parseIdx(res.requestId)
val arrayidx = idxmap.getOrElse(idx, -1)
if (arrayidx == -1) {
return
}
idxmap.remove(idx)
lastresultarray(arrayidx) = res
val timer = callTimers.remove(res.requestId)
if (timer != None) {
timer.get.cancel()
}
val finished = lastresultarray.forall(_ != null)
if (finished) {
if (lastf != null) {
val t = lastf
lastf = null
flowFinished = true
if (!doReplyOnError()) {
t()
}
if (flowFinished) {
endFlow()
}
} else {
lock.lock();
try {
allResultsReceived.signal()
} finally {
lock.unlock();
}
}
}
}
def invokeWithNoReply(service: String, params: Tuple2[String, Any]*): Unit = {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff)
val info = new InvokeInfo(service, 0, body)
invokeMulti(doNothingCallback, ArrayBuffer[InvokeInfo](info))
}
def invokeWithNoReplyWithToAddr(service: String, toAddr: String, params: Tuple2[String, Any]*): Unit = {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff)
val info = new InvokeInfo(service, 0, body, toAddr)
invokeMulti(doNothingCallback, ArrayBuffer[InvokeInfo](info))
}
def invokeFuture(service: String, timeout: Int, params: Tuple2[String, Any]*): InvokeInfo = {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff)
new InvokeInfo(service, timeout, body)
}
def invokeFutureWithToAddr(service: String, timeout: Int, toAddr: String, params: Tuple2[String, Any]*): InvokeInfo = {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff)
new InvokeInfo(service, timeout, body, toAddr)
}
def invoke(f: () => Unit, service: String, timeout: Int, params: Tuple2[String, Any]*): Unit = {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff)
val info = new InvokeInfo(service, timeout, body)
invokeMulti(f, ArrayBuffer[InvokeInfo](info))
}
def invokeAutoReply(f: () => Unit, service: String, timeout: Int, params: Tuple2[String, Any]*): Unit = {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff)
val info = new InvokeInfo(service, timeout, body)
invokeMultiAutoReply(f, ArrayBuffer[InvokeInfo](info))
}
def invokeWithToAddr(f: () => Unit, service: String, timeout: Int, toAddr: String, params: Tuple2[String, Any]*): Unit = {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff)
val info = new InvokeInfo(service, timeout, body, toAddr)
invokeMulti(f, ArrayBuffer[InvokeInfo](info))
}
def invoke2(f: () => Unit, info1: InvokeInfo, info2: InvokeInfo): Unit = {
val infos = ArrayBuffer[InvokeInfo](info1, info2)
invokeMulti(f, infos)
}
def invoke2AutoReply(f: () => Unit, info1: InvokeInfo, info2: InvokeInfo): Unit = {
val infos = ArrayBuffer[InvokeInfo](info1, info2)
invokeMultiAutoReply(f, infos)
}
def invoke3(f: () => Unit, info1: InvokeInfo, info2: InvokeInfo, info3: InvokeInfo): Unit = {
val infos = ArrayBuffer[InvokeInfo](info1, info2, info3)
invokeMulti(f, infos)
}
def invoke3AutoReply(f: () => Unit, info1: InvokeInfo, info2: InvokeInfo, info3: InvokeInfo): Unit = {
val infos = ArrayBuffer[InvokeInfo](info1, info2, info3)
invokeMultiAutoReply(f, infos)
}
def invoke4(f: () => Unit, info1: InvokeInfo, info2: InvokeInfo, info3: InvokeInfo, info4: InvokeInfo): Unit = {
val infos = ArrayBuffer[InvokeInfo](info1, info2, info3, info4)
invokeMulti(f, infos)
}
def invoke4AutoReply(f: () => Unit, info1: InvokeInfo, info2: InvokeInfo, info3: InvokeInfo, info4: InvokeInfo): Unit = {
val infos = ArrayBuffer[InvokeInfo](info1, info2, info3, info4)
invokeMultiAutoReply(f, infos)
}
def invoke5(f: () => Unit, info1: InvokeInfo, info2: InvokeInfo, info3: InvokeInfo, info4: InvokeInfo, info5: InvokeInfo): Unit = {
val infos = ArrayBuffer[InvokeInfo](info1, info2, info3, info4, info5)
invokeMulti(f, infos)
}
def invoke5AutoReply(f: () => Unit, info1: InvokeInfo, info2: InvokeInfo, info3: InvokeInfo, info4: InvokeInfo, info5: InvokeInfo): Unit = {
val infos = ArrayBuffer[InvokeInfo](info1, info2, info3, info4, info5)
invokeMultiAutoReply(f, infos)
}
def invoke(f: () => Unit, infos: List[InvokeInfo]): Unit = {
val buff = new ArrayBuffer[InvokeInfo]()
infos.foreach(info => buff += info)
invokeMulti(f, buff)
}
def invokeAutoReply(f: () => Unit, infos: List[InvokeInfo]): Unit = {
val buff = new ArrayBuffer[InvokeInfo]()
infos.foreach(info => buff += info)
invokeMultiAutoReply(f, buff)
}
def invokeMulti(f: () => Unit, infos: ArrayBuffer[InvokeInfo]): Unit = {
if (f == null) {
throw new IllegalArgumentException("invoke callback cannot be empty")
}
if (lastf != null) {
throw new RuntimeException("only one callback function can be used")
}
Flow.updateCallStats()
lastf = f
replyOnError = false
Flow.isSyncedInvoke.set(false)
val finished = send(infos)
if (finished) {
lastf = null
f()
} else {
flowFinished = false
}
}
def invokeMultiAutoReply(f: () => Unit, infos: ArrayBuffer[InvokeInfo]): Unit = {
if (f == null) {
throw new IllegalArgumentException("invoke callback cannot be empty")
}
if (lastf != null) {
throw new RuntimeException("only one callback function can be used")
}
Flow.updateCallStats()
lastf = f
replyOnError = true
replyOnErrorCode = 0
for (info <- infos) {
val t = info.params.i("errorCode")
if (t != 0)
replyOnErrorCode = t // use the last errorCode
}
Flow.isSyncedInvoke.set(false)
val finished = send(infos)
if (finished) {
lastf = null
if (!doReplyOnError()) {
f()
}
} else {
flowFinished = false
}
}
def doReplyOnError(): Boolean = {
if (!replyOnError) return false
replyOnError = false
var i = 0
while (i < lastresultarray.size) {
val ret = lastresultarray(i)
if (ret.code != 0) {
if (!replied) {
if (replyOnErrorCode != 0) reply(replyOnErrorCode)
else reply(ret.code)
}
return true
}
i += 1
}
false
}
def sleep(f: () => Unit, timeout: Int): Unit = {
if (f == null) {
throw new IllegalArgumentException("sleep callback cannot be empty")
}
if (lastf != null) {
throw new RuntimeException("only one callback function can be used")
}
Flow.updateCallStats()
lastf = f
replyOnError = false
flowFinished = false
Flow.isSyncedInvoke.set(false)
lastresultarray = ArrayBuffer.fill[InvokeResult](1)(null)
val subrequestId = nextRequestId()
val timeoutId = Flow.router.timeoutId(FlowTimoutType.TYPE_CALL, req.serviceId, req.msgId, subrequestId)
val timer = Flow.router.newTimer(timeoutId, timeout)
callTimers.put(subrequestId, timer)
}
protected[core] def send(infos: ArrayBuffer[InvokeInfo]): Boolean = {
lastresultarray = ArrayBuffer.fill[InvokeResult](infos.size)(null)
subrequestIds = nextRequestIds(infos.size)
var i = 0
while (i < infos.size) {
val info = infos(i)
val subrequestId = subrequestIds(i)
val ret = send(info, subrequestId)
if (ret != null) {
val idx = Flow.parseIdx(subrequestId)
idxmap.remove(idx)
lastresultarray(i) = ret
} else if (info.timeout <= 0) {
val idx = Flow.parseIdx(subrequestId)
idxmap.remove(idx)
lastresultarray(i) = InvokeResult.success(req.requestId)
} else {
val timeoutId = Flow.router.timeoutId(FlowTimoutType.TYPE_CALL, req.serviceId, req.msgId, subrequestId)
val timer = Flow.router.newTimer(timeoutId, info.timeout)
callTimers.put(subrequestId, timer)
}
i += 1
}
val finished = lastresultarray.forall(_ != null)
finished
}
private def send(info: InvokeInfo, subrequestId: String): InvokeResult = {
val (serviceId, msgId) = Flow.router.serviceNameToId(info.service)
if (serviceId == 0) {
log.error("service not found, service=%s".format(info.service))
// this error will not be recorded in csos log
return new InvokeResult(subrequestId, ResultCodes.SERVICE_NOT_FOUND, new HashMapStringAny())
}
filterInvoke(serviceId, msgId, info.params)
val (newbody, ec) = Flow.router.encodeRequest(serviceId, msgId, info.params)
if (ec != 0) {
//log.error("encode request error, serviceId="+serviceId+", msgId="+msgId)
// this error will not be recorded in csos log
return new InvokeResult(subrequestId, ec, new HashMapStringAny())
}
var connId = req.connId
if (info.timeout <= 0) {
connId = Router.DO_NOT_REPLY
} else {
if (connId == Router.DO_NOT_REPLY) connId = "subflow:0"
}
var encoding = req.encoding
if (info.params != null) {
val s = info.params.s("head.encoding")
if (s != null && s != "")
encoding = AvenueCodec.parseEncoding(s)
}
val newreq = new Request(
subrequestId,
connId,
req.sequence,
encoding,
serviceId,
msgId,
req.xhead,
newbody,
owner)
newreq.toAddr = info.toAddr
newreq.parentServiceId = req.serviceId
newreq.parentMsgId = req.msgId
if (info.timeout > 0) {
newreq.expireTimeout = info.timeout
}
val ret = Flow.router.send(newreq)
if (ret != null)
ret
else if (info.timeout <= 0)
InvokeResult.success(subrequestId)
else
null
}
def reply(code: Int) {
replyWithMap(code, new HashMapStringAny())
}
def reply(code: Int, params: Tuple2[String, Any]*) {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff, false)
replyWithMap(code, body)
}
private def replyWithMap(code: Int, params: HashMapStringAny): Unit = {
if (replied) {
throw new RuntimeException("flow already replied")
}
Flow.updateCallStats()
filterReply(code, params)
val res = new Response(code, params, req)
Flow.router.reply(new RequestResponseInfo(req, res, logVars))
replied = true
}
protected[core] def paramsToBody(params: ArrayBuffer[Tuple2[String, Any]], xheadSupport: Boolean = true): HashMapStringAny = {
val body = new HashMapStringAny()
for ((key, value) <- params) {
value match {
case req: Request =>
copy(body, xheadSupport, key, req.body)
case ir: InvokeResult =>
copy(body, xheadSupport, key, ir.res)
case map: HashMapStringAny =>
copy(body, xheadSupport, key, map)
if (key != "*")
body.put(key, map)
case _ =>
copy(body, xheadSupport, key, value)
}
}
body
}
private def checkIsXhead(xheadSupport: Boolean, name: String): Tuple2[String, Boolean] = {
if (xheadSupport && name.startsWith("xhead.")) {
val t = (name.substring(6), true)
t
} else {
val t = (name, false)
t
}
}
private def copy(toMap: HashMapStringAny, xheadSupport: Boolean, name: String, value: Any): Unit = {
if (value == null) return
val (key, isXhead) = checkIsXhead(xheadSupport, name.trim)
if (isXhead) {
req.xhead.put(key, value)
} else {
toMap.put(key, value)
}
}
private def copy(toMap: HashMapStringAny, xheadSupport: Boolean, names: String, fromMap: HashMapStringAny): Unit = {
if (names == "*") {
toMap ++= fromMap
return
}
val ss = CachedSplitter.commaSplitter.strToArray(names)
for (s <- ss) {
var fromKey = ""
var toKey = ""
val p = s.indexOf(":")
if (p <= 0) {
toKey = s.trim
fromKey = null
} else {
toKey = s.substring(0, p).trim
fromKey = s.substring(p + 1).trim
}
val (key, isXhead) = checkIsXhead(xheadSupport, toKey)
if (fromKey == null) fromKey = key
val v = fromMap.getOrElse(fromKey, null)
if (v != null) {
if (isXhead)
req.xhead.put(key, v)
else
toMap.put(key, v)
}
}
}
def lasterrorcode(): Int = {
if (lastresultarray == null || lastresultarray.size == 0) {
return 0
}
if (lastresultarray.size == 1) {
return lastresultarray(0).code
}
var lastcode = 0
for (i <- 0 until lastresultarray.size) {
if (lastresultarray(i).code != 0) {
lastcode = lastresultarray(i).code
}
}
lastcode
}
def allresults(): ArrayBuffer[InvokeResult] = lastresultarray
def lastresult(): InvokeResult = lastresultarray(0)
def lastresults2(): Tuple2[InvokeResult, InvokeResult] = {
(lastresultarray(0), lastresultarray(1))
}
def lastresults3(): Tuple3[InvokeResult, InvokeResult, InvokeResult] = {
(lastresultarray(0), lastresultarray(1), lastresultarray(2))
}
def lastresults4(): Tuple4[InvokeResult, InvokeResult, InvokeResult, InvokeResult] = {
(lastresultarray(0), lastresultarray(1), lastresultarray(2), lastresultarray(3))
}
def lastresults5(): Tuple5[InvokeResult, InvokeResult, InvokeResult, InvokeResult, InvokeResult] = {
(lastresultarray(0), lastresultarray(1), lastresultarray(2), lastresultarray(3), lastresultarray(4))
}
}
abstract class SyncedFlow extends Flow {
def syncedInvoke(service: String, timeout: Int, params: Tuple2[String, Any]*): InvokeResult = {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff)
val info = new InvokeInfo(service, timeout, body)
val rets = syncedInvokeMulti(ArrayBuffer[InvokeInfo](info))
rets(0)
}
def syncedInvokeWithToAddr(service: String, timeout: Int, toAddr: String, params: Tuple2[String, Any]*): InvokeResult = {
val buff = new ArrayBuffer[Tuple2[String, Any]]()
for (t <- params) buff += t
val body = paramsToBody(buff)
val info = new InvokeInfo(service, timeout, body, toAddr)
val rets = syncedInvokeMulti(ArrayBuffer[InvokeInfo](info))
rets(0)
}
def syncedInvoke2(info1: InvokeInfo, info2: InvokeInfo): Tuple2[InvokeResult, InvokeResult] = {
val infos = ArrayBuffer[InvokeInfo](info1, info2)
val rets = syncedInvokeMulti(infos)
(rets(0), rets(1))
}
def syncedInvoke3(info1: InvokeInfo, info2: InvokeInfo, info3: InvokeInfo): Tuple3[InvokeResult, InvokeResult, InvokeResult] = {
val infos = ArrayBuffer[InvokeInfo](info1, info2, info3)
val rets = syncedInvokeMulti(infos)
(rets(0), rets(1), rets(2))
}
def syncedInvoke4(info1: InvokeInfo, info2: InvokeInfo, info3: InvokeInfo, info4: InvokeInfo): Tuple4[InvokeResult, InvokeResult, InvokeResult, InvokeResult] = {
val infos = ArrayBuffer[InvokeInfo](info1, info2, info3, info4)
val rets = syncedInvokeMulti(infos)
(rets(0), rets(1), rets(2), rets(3))
}
def syncedInvoke5(info1: InvokeInfo, info2: InvokeInfo, info3: InvokeInfo, info4: InvokeInfo, info5: InvokeInfo): Tuple5[InvokeResult, InvokeResult, InvokeResult, InvokeResult, InvokeResult] = {
val infos = ArrayBuffer[InvokeInfo](info1, info2, info3, info4, info5)
val rets = syncedInvokeMulti(infos)
(rets(0), rets(1), rets(2), rets(3), rets(4))
}
def maxTimeout(infos: ArrayBuffer[InvokeInfo]): Int = {
var timeout = 0
for (info <- infos) {
if (info.timeout > timeout) timeout = info.timeout
}
timeout
}
def syncedInvoke(infos: List[InvokeInfo]): ArrayBuffer[InvokeResult] = {
val buff = new ArrayBuffer[InvokeInfo]()
infos.foreach(info => buff += info)
syncedInvokeMulti(buff)
}
def syncedInvokeMulti(infos: ArrayBuffer[InvokeInfo]): ArrayBuffer[InvokeResult] = {
Flow.updateCallStats()
Flow.isSyncedInvoke.set(true)
val finished = send(infos)
Flow.isSyncedInvoke.set(false)
if (!finished) {
lock.lock();
try {
val ok = allResultsReceived.await(maxTimeout(infos), TimeUnit.MILLISECONDS)
if (!ok) {
var i = 0
while (i < lastresultarray.size) {
if (lastresultarray(i) == null) {
val idx = Flow.parseIdx(subrequestIds(i))
idxmap.remove(idx)
lastresultarray(i) = InvokeResult.timeout(subrequestIds(i))
}
i += 1
}
}
} finally {
lock.unlock();
}
}
lastresultarray
}
}
|
bruceran/scalabpe
|
src/scalabpe/core/actor_flow.scala
|
Scala
|
apache-2.0
| 31,408
|
package dbpedia.dataparsers
import java.lang.StringBuilder
import dbpedia.dataparsers.util.{RdfNamespace, StringUtils}
import dbpedia.dataparsers.util.StringUtils.{escape, replacements}
class DBpediaNamespace(namespace: String) extends RdfNamespace(null, namespace, true) {
override protected def append(sb: StringBuilder, suffix: String): Unit = {
escape(sb, suffix, DBpediaNamespace.iriEscapes)
}
}
object DBpediaNamespace {
// for this list of characters, see RFC 3987 and https://sourceforge.net/mailarchive/message.php?msg_id=28982391
private val iriEscapes = {
val chars = ('\\u0000' to '\\u001F').mkString + "\\"#%<>?[\\\\]^`{|}" + ('\\u007F' to '\\u009F').mkString
val replace = replacements('%', chars)
// don't escape space, replace it by underscore
replace(' ') = "_"
replace
}
private def ns(namespace: String): DBpediaNamespace = {
new DBpediaNamespace(namespace)
}
val ONTOLOGY = ns("http://.org/ontology/")
val DATATYPE = ns("http://.org/datatype/")
}
|
FnOio/dbpedia-parsing-functions-scala
|
src/main/scala/dbpedia/dataparsers/DBpediaNamespace.scala
|
Scala
|
gpl-2.0
| 1,024
|
class C1(p: Int)
class C2 extends C1(1) {
println(/* resolved: false */ p)
}
|
ilinum/intellij-scala
|
testdata/resolve2/inheritance/element/ClassParameter.scala
|
Scala
|
apache-2.0
| 79
|
package com.twitter.util
import org.specs.SpecificationWithJUnit
class RingBufferSpec extends SpecificationWithJUnit {
"RingBuffer" should {
"empty" in {
val buf = new RingBuffer[String](4)
buf.length mustEqual 0
buf.size mustEqual 0
buf.isEmpty mustEqual true
buf(0) must throwA[IndexOutOfBoundsException]
buf.next must throwA[NoSuchElementException]
buf.iterator.hasNext mustEqual false
}
"handle single element" in {
val buf = new RingBuffer[String](4)
buf += "a"
buf.size mustEqual 1
buf(0) mustEqual "a"
buf.toList mustEqual List("a")
}
"handle multiple element" in {
val buf = new RingBuffer[String](4)
buf ++= List("a", "b", "c")
buf.size mustEqual 3
buf(0) mustEqual "a"
buf(1) mustEqual "b"
buf(2) mustEqual "c"
buf.toList mustEqual List("a", "b", "c")
buf.next mustEqual "a"
buf.size mustEqual 2
buf.next mustEqual "b"
buf.size mustEqual 1
buf.next mustEqual "c"
buf.size mustEqual 0
}
"handle overwrite/rollover" in {
val buf = new RingBuffer[String](4)
buf ++= List("a", "b", "c", "d", "e", "f")
buf.size mustEqual 4
buf(0) mustEqual "c"
buf.toList mustEqual List("c", "d", "e", "f")
}
"removeWhere" in {
val buf = new RingBuffer[Int](6)
buf ++= (0 until 10)
buf.toList mustEqual List(4, 5, 6, 7, 8, 9)
buf.removeWhere(_ % 3 == 0)
buf.toList mustEqual List(4, 5, 7, 8)
}
}
}
|
mosesn/util
|
util-core/src/test/scala/com/twitter/util/RingBufferSpec.scala
|
Scala
|
apache-2.0
| 1,547
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import scala.concurrent.{Future, ExecutionContext}
import org.apache.spark.{Logging, SparkContext}
import org.apache.spark.rpc._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.scheduler.TaskSchedulerImpl
import org.apache.spark.ui.JettyUtils
import org.apache.spark.util.{ThreadUtils, RpcUtils}
import scala.util.control.NonFatal
/**
* Abstract Yarn scheduler backend that contains common logic
* between the client and cluster Yarn scheduler backends.
*/
private[spark] abstract class YarnSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext)
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv) {
if (conf.getOption("spark.scheduler.minRegisteredResourcesRatio").isEmpty) {
minRegisteredRatio = 0.8
}
protected var totalExpectedExecutors = 0
private val yarnSchedulerEndpoint = rpcEnv.setupEndpoint(
YarnSchedulerBackend.ENDPOINT_NAME, new YarnSchedulerEndpoint(rpcEnv))
private implicit val askTimeout = RpcUtils.askTimeout(sc.conf)
/**
* Request executors from the ApplicationMaster by specifying the total number desired.
* This includes executors already pending or running.
*/
override def doRequestTotalExecutors(requestedTotal: Int): Boolean = {
yarnSchedulerEndpoint.askWithRetry[Boolean](RequestExecutors(requestedTotal))
}
/**
* Request that the ApplicationMaster kill the specified executors.
*/
override def doKillExecutors(executorIds: Seq[String]): Boolean = {
yarnSchedulerEndpoint.askWithRetry[Boolean](KillExecutors(executorIds))
}
override def sufficientResourcesRegistered(): Boolean = {
totalRegisteredExecutors.get() >= totalExpectedExecutors * minRegisteredRatio
}
/**
* Add filters to the SparkUI.
*/
private def addWebUIFilter(
filterName: String,
filterParams: Map[String, String],
proxyBase: String): Unit = {
if (proxyBase != null && proxyBase.nonEmpty) {
System.setProperty("spark.ui.proxyBase", proxyBase)
}
val hasFilter =
filterName != null && filterName.nonEmpty &&
filterParams != null && filterParams.nonEmpty
if (hasFilter) {
logInfo(s"Add WebUI Filter. $filterName, $filterParams, $proxyBase")
conf.set("spark.ui.filters", filterName)
filterParams.foreach { case (k, v) => conf.set(s"spark.$filterName.param.$k", v) }
scheduler.sc.ui.foreach { ui => JettyUtils.addFilters(ui.getHandlers, conf) }
}
}
/**
* An [[RpcEndpoint]] that communicates with the ApplicationMaster.
*/
private class YarnSchedulerEndpoint(override val rpcEnv: RpcEnv)
extends ThreadSafeRpcEndpoint with Logging {
private var amEndpoint: Option[RpcEndpointRef] = None
private val askAmThreadPool =
ThreadUtils.newDaemonCachedThreadPool("yarn-scheduler-ask-am-thread-pool")
implicit val askAmExecutor = ExecutionContext.fromExecutor(askAmThreadPool)
override def receive: PartialFunction[Any, Unit] = {
case RegisterClusterManager(am) =>
logInfo(s"ApplicationMaster registered as $am")
amEndpoint = Some(am)
case AddWebUIFilter(filterName, filterParams, proxyBase) =>
addWebUIFilter(filterName, filterParams, proxyBase)
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case r: RequestExecutors =>
amEndpoint match {
case Some(am) =>
Future {
context.reply(am.askWithRetry[Boolean](r))
} onFailure {
case NonFatal(e) =>
logError(s"Sending $r to AM was unsuccessful", e)
context.sendFailure(e)
}
case None =>
logWarning("Attempted to request executors before the AM has registered!")
context.reply(false)
}
case k: KillExecutors =>
amEndpoint match {
case Some(am) =>
Future {
context.reply(am.askWithRetry[Boolean](k))
} onFailure {
case NonFatal(e) =>
logError(s"Sending $k to AM was unsuccessful", e)
context.sendFailure(e)
}
case None =>
logWarning("Attempted to kill executors before the AM has registered!")
context.reply(false)
}
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
if (amEndpoint.exists(_.address == remoteAddress)) {
logWarning(s"ApplicationMaster has disassociated: $remoteAddress")
}
}
override def onStop(): Unit = {
askAmThreadPool.shutdownNow()
}
}
}
private[spark] object YarnSchedulerBackend {
val ENDPOINT_NAME = "YarnScheduler"
}
|
andrewor14/iolap
|
core/src/main/scala/org/apache/spark/scheduler/cluster/YarnSchedulerBackend.scala
|
Scala
|
apache-2.0
| 5,585
|
package rpg
import org.specs2._
class CharacterChecksSpec extends Specification { def is = s2"""
Character Check Specification
simple checks should work in
simple attribute checks $e1
simple skill checks $e2
"""
// -----------------------------------------------------------------------------------------------
// tests
// -----------------------------------------------------------------------------------------------
import Test._
def e1 = {
val bob = new Character("Bob")
val res = bob check Stamina
res must beAnInstanceOf[Result]
}
def e2 = {
val bob = new Character("Bob")
val res = bob check Running
res must beAnInstanceOf[Result]
}
/*
def ey = {
val bob = new Character("Bob")
val res = bob check Running vs 2 under { _ - 3 }
res must beAnInstanceOf[?]
}
*//*
def ez = {
val bob = new Character("Bob")
val res = bob check Running vs Level(2, "jogger") under Circumstances("limps") { _ - 3 }
res must beAnInstanceOf[?]
}
*/
}
|
wookietreiber/arpgt
|
src/test/scala/CharacterChecksSpec.scala
|
Scala
|
gpl-3.0
| 1,250
|
/*
* Copyright 2015 eleflow.com.br.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml
import com.cloudera.sparkts.models.{UberHoltWintersModel, TimeSeriesModel, UberArimaModel}
import eleflow.uberdata.enums.SupportedAlgorithm
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.{HasFeaturesCol, HasLabelCol}
import org.apache.spark.mllib.linalg.{Vector, VectorUDT, Vectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.Dataset
import org.apache.spark.mllib.util.MLUtils
import scala.reflect.ClassTag
/**
* Created by dirceu on 01/06/16.
*/
class ForecastBestModel[L](
override val uid: String,
val models: RDD[
(L, (TimeSeriesModel, Seq[(TimeSeriesModel, ModelParamEvaluation[L])]))
]
)(implicit kt: ClassTag[L])
extends ForecastBaseModel[ForecastBestModel[L]]
with HasLabelCol
with HasFeaturesCol
with ForecastPipelineStage {
override def copy(extra: ParamMap): ForecastBestModel[L] = {
val newModel = copyValues(new ForecastBestModel[L](uid, models), extra)
newModel.setParent(parent)
}
override def transformSchema(schema: StructType): StructType = {
super.transformSchema(schema).add(StructField("featuresValidation", new org.apache.spark.ml.linalg.VectorUDT))
}
def evaluateParams(models: Seq[(TimeSeriesModel, ModelParamEvaluation[L])],
features: Vector,
nFut: Broadcast[Int]): Seq[Object] = {
val (bestModel, modelParamEvaluation) = models.head
try {
modelParamEvaluation.algorithm match {
case SupportedAlgorithm.Arima =>
val a = bestModel.asInstanceOf[UberArimaModel]
val (featuresPrediction, forecastPrediction) =
a.forecast(features, nFut.value).toArray.splitAt(features.size)
Seq(
Vectors.dense(forecastPrediction).asML,
SupportedAlgorithm.Arima.toString,
a.params,
Vectors.dense(featuresPrediction).asML
)
case SupportedAlgorithm.HoltWinters =>
val h = bestModel.asInstanceOf[UberHoltWintersModel]
val forecast = Vectors.dense(new Array[Double](nFut.value))
h.forecast(features, forecast)
Seq(
forecast.asML,
SupportedAlgorithm.HoltWinters.toString,
h.params,
features.asML
)
case SupportedAlgorithm.MovingAverage8 =>
val windowSize = modelParamEvaluation.params.toSeq
.map(f => (f.param.name, f.value.asInstanceOf[Int]))
.toMap
val h = bestModel.asInstanceOf[UberHoltWintersModel]
val forecast =
Vectors.dense(new Array[Double](windowSize.values.head))
h.forecast(features, forecast)
val movingAverageForecast = Vectors.dense(
MovingAverageCalc.simpleMovingAverageArray(
forecast.toArray,
windowSize.values.head
)
)
Seq(
movingAverageForecast.asML,
SupportedAlgorithm.MovingAverage8.toString,
windowSize.map(f => (f._1, f._2.toString)),
features.asML
)
}
} catch {
case e: Exception =>
log.error("Error when predicting ")
e.printStackTrace()
evaluateParams(models.tail, features, nFut)
}
}
override def transform(dataSet: Dataset[_]): DataFrame = {
val schema = dataSet.schema
val predSchema = transformSchema(schema)
val scContext = dataSet.sqlContext.sparkContext
//TODO fazer com que os modelos invalidos voltem numeros absurdos
val joined = models.join(dataSet.rdd.map{case (r: Row) => (r.getAs[L]($(labelCol)), r)})
val featuresColName =
dataSet.sqlContext.sparkContext.broadcast($(featuresCol))
val nFut = scContext.broadcast($(nFutures))
val predictions = joined.map {
case (id, ((bestModel, metrics), row)) =>
val features = row.getAs[org.apache.spark.ml.linalg.Vector](featuresColName.value)
val prediction = {
evaluateParams(metrics, org.apache.spark.mllib.linalg.Vectors.fromML(features), nFut)
}
Row(row.toSeq ++ prediction: _*)
}
dataSet.sqlContext.createDataFrame(predictions, predSchema)
}
}
|
eleflow/uberdata
|
iuberdata_core/src/main/scala/org/apache/spark/ml/ForecastBestModel.scala
|
Scala
|
apache-2.0
| 4,929
|
package at.logic.gapt.proofs.lk.algorithms
import at.logic.gapt.expr._
import BetaReduction._
import at.logic.gapt.proofs.lk._
import at.logic.gapt.proofs.lk.algorithms.ProofTransformationUtils.computeMap
import at.logic.gapt.proofs.lk.base._
import at.logic.gapt.proofs.occurrences._
object applySubstitution {
// TODO: finish refactoring rules like this! there is still redundancy in handleRule!
def handleWeakening( new_parent: ( LKProof, Map[FormulaOccurrence, FormulaOccurrence] ),
subst: Substitution,
old_parent: LKProof,
old_proof: LKProof,
constructor: ( LKProof, HOLFormula ) => LKProof with PrincipalFormulas,
m: FormulaOccurrence ) = {
val new_proof = constructor( new_parent._1, betaNormalize( subst( m.formula ) ) )
( new_proof, computeMap( old_parent.root.antecedent ++ old_parent.root.succedent, old_proof, new_proof, new_parent._2 ) + ( ( m, new_proof.prin.head ) ) )
}
def handleContraction( new_parent: ( LKProof, Map[FormulaOccurrence, FormulaOccurrence] ),
old_parent: LKProof,
old_proof: LKProof,
a1: FormulaOccurrence,
a2: FormulaOccurrence,
constructor: ( LKProof, FormulaOccurrence, FormulaOccurrence ) => LKProof ) = {
val new_proof = constructor( new_parent._1, new_parent._2( a1 ), new_parent._2( a2 ) )
( new_proof, computeMap( old_parent.root.antecedent ++ old_parent.root.succedent, old_proof, new_proof, new_parent._2 ) )
}
def handleBinaryProp( new_parent_1: ( LKProof, Map[FormulaOccurrence, FormulaOccurrence] ),
new_parent_2: ( LKProof, Map[FormulaOccurrence, FormulaOccurrence] ),
a1: FormulaOccurrence,
a2: FormulaOccurrence,
old_parent_1: LKProof,
old_parent_2: LKProof,
old_proof: LKProof,
constructor: ( LKProof, LKProof, FormulaOccurrence, FormulaOccurrence ) => LKProof ) = {
val new_proof = constructor( new_parent_1._1, new_parent_2._1, new_parent_1._2( a1 ), new_parent_2._2( a2 ) )
( new_proof, computeMap( old_parent_1.root.antecedent ++ old_parent_1.root.succedent ++ old_parent_2.root.antecedent ++ old_parent_2.root.succedent,
old_proof, new_proof, new_parent_1._2 ++ new_parent_2._2 ) )
}
def handleEquationRule(
constructor: ( LKProof, LKProof, FormulaOccurrence, FormulaOccurrence, HOLFormula ) => LKProof,
p1: LKProof,
p2: LKProof,
old_proof: LKProof,
new_p1: ( LKProof, Map[FormulaOccurrence, FormulaOccurrence] ),
new_p2: ( LKProof, Map[FormulaOccurrence, FormulaOccurrence] ),
s: Sequent,
a1: FormulaOccurrence,
a2: FormulaOccurrence,
m: HOLFormula ) = {
val new_proof = constructor( new_p1._1, new_p2._1, a1, a2, m )
( new_proof, computeMap( p1.root.antecedent ++ p1.root.succedent ++ p2.root.antecedent ++ p2.root.succedent,
old_proof, new_proof, new_p1._2 ++ new_p2._2 ) )
}
def handleRule( proof: LKProof, new_parents: List[( LKProof, Map[FormulaOccurrence, FormulaOccurrence] )],
subst: Substitution ): ( LKProof, Map[FormulaOccurrence, FormulaOccurrence] ) = {
proof match {
case Axiom( so ) => {
val ant_occs = so.antecedent
val succ_occs = so.succedent
val a = Axiom( ant_occs.map( fo => betaNormalize( subst( fo.formula ) ) ),
succ_occs.map( fo => betaNormalize( subst( fo.formula ) ) ) )
require( a.root.antecedent.length >= ant_occs.length, "cannot create translation map: old proof antecedent is shorter than new one" )
require( a.root.succedent.length >= succ_occs.length, "cannot create translation map: old proof succedent is shorter than new one" )
val map = Map[FormulaOccurrence, FormulaOccurrence]() ++
( ant_occs zip a.root.antecedent ) ++ ( succ_occs zip a.root.succedent )
( a, map )
}
case WeakeningLeftRule( p, s, m ) => handleWeakening( new_parents.head, subst, p, proof, WeakeningLeftRule.apply, m )
case WeakeningRightRule( p, s, m ) => handleWeakening( new_parents.head, subst, p, proof, WeakeningRightRule.apply, m )
case ContractionLeftRule( p, s, a1, a2, m ) => handleContraction( new_parents.head, p, proof, a1, a2, ContractionLeftRule.apply )
case ContractionRightRule( p, s, a1, a2, m ) => handleContraction( new_parents.head, p, proof, a1, a2, ContractionRightRule.apply )
case CutRule( p1, p2, s, a1, a2 ) => {
val new_p1 = new_parents.head
val new_p2 = new_parents.last
val new_proof = CutRule( new_p1._1, new_p2._1, new_p1._2( a1 ), new_p2._2( a2 ) )
( new_proof, computeMap(
p1.root.antecedent ++ ( p1.root.succedent.filter( _ != a1 ) ) ++
( p2.root.antecedent.filter( _ != a2 ) ) ++ p2.root.succedent,
proof, new_proof, new_p1._2 ++ new_p2._2 ) )
}
case AndRightRule( p1, p2, s, a1, a2, m ) => handleBinaryProp( new_parents.head, new_parents.last, a1, a2, p1, p2, proof, AndRightRule.apply )
case AndLeft1Rule( p, s, a, m ) => {
val f = m.formula match { case And( _, w ) => w }
val new_parent = new_parents.head
val new_proof = AndLeft1Rule( new_parent._1, new_parent._2( a ), betaNormalize( subst( f ) ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case AndLeft2Rule( p, s, a, m ) => {
val f = m.formula match { case And( w, _ ) => w }
val new_parent = new_parents.head
val new_proof = AndLeft2Rule( new_parent._1, betaNormalize( subst( f ) ), new_parent._2( a ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case OrLeftRule( p1, p2, s, a1, a2, m ) => handleBinaryProp( new_parents.head, new_parents.last, a1, a2, p1, p2, proof, OrLeftRule.apply )
case OrRight1Rule( p, s, a, m ) => {
val f = m.formula match { case Or( _, w ) => w }
val new_parent = new_parents.head
val new_proof = OrRight1Rule( new_parent._1, new_parent._2( a ), betaNormalize( subst( f ) ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case OrRight2Rule( p, s, a, m ) => {
val f = m.formula match { case Or( w, _ ) => w }
val new_parent = new_parents.head
val new_proof = OrRight2Rule( new_parent._1, betaNormalize( subst( f ) ), new_parent._2( a ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case ImpLeftRule( p1, p2, s, a1, a2, m ) => {
val new_p1 = new_parents.head
val new_p2 = new_parents.last
val new_proof = ImpLeftRule( new_p1._1, new_p2._1, new_p1._2( a1 ), new_p2._2( a2 ) )
( new_proof, computeMap( p1.root.antecedent ++ p1.root.succedent ++ p2.root.antecedent ++ p2.root.succedent,
proof, new_proof, new_p1._2 ++ new_p2._2 ) )
}
case ImpRightRule( p, s, a1, a2, m ) => {
val new_parent = new_parents.head
val new_proof = ImpRightRule( new_parent._1,
new_parent._2( a1 ),
new_parent._2( a2 ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case NegLeftRule( p, s, a, m ) => {
val new_parent = new_parents.head
val new_proof = NegLeftRule( new_parent._1, new_parent._2( a ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case NegRightRule( p, s, a, m ) => {
val new_parent = new_parents.head
val new_proof = NegRightRule( new_parent._1, new_parent._2( a ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case DefinitionRightRule( p, s, a, m ) => {
val new_parent = new_parents.head
val new_proof = DefinitionRightRule( new_parent._1, new_parent._2( a ), betaNormalize( subst( m.formula ) ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case DefinitionLeftRule( p, s, a, m ) => {
val new_parent = new_parents.head
val new_proof = DefinitionLeftRule( new_parent._1, new_parent._2( a ), betaNormalize( subst( m.formula ) ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case EquationLeft1Rule( p1, p2, s, a1, a2, _, m ) =>
handleEquationRule( EquationLeftMacroRule.apply, p1, p2, proof, new_parents.head, new_parents.last, s,
new_parents.head._2( a1 ), new_parents.last._2( a2 ),
betaNormalize( subst( m.formula ) ) )
case EquationLeft2Rule( p1, p2, s, a1, a2, _, m ) =>
handleEquationRule( EquationLeftMacroRule.apply, p1, p2, proof, new_parents.head, new_parents.last, s,
new_parents.head._2( a1 ), new_parents.last._2( a2 ),
betaNormalize( subst( m.formula ) ) )
case EquationRight1Rule( p1, p2, s, a1, a2, _, m ) =>
handleEquationRule( EquationRightMacroRule.apply, p1, p2, proof, new_parents.head, new_parents.last, s,
new_parents.head._2( a1 ), new_parents.last._2( a2 ),
betaNormalize( subst( m.formula ) ) )
case EquationRight2Rule( p1, p2, s, a1, a2, _, m ) =>
handleEquationRule( EquationRightMacroRule.apply, p1, p2, proof, new_parents.head, new_parents.last, s,
new_parents.head._2( a1 ), new_parents.last._2( a2 ),
betaNormalize( subst( m.formula ) ) )
case ForallLeftRule( p, s, a, m, t ) => {
val new_parent = new_parents.head
val new_proof = ForallLeftRule( new_parent._1, new_parent._2( a ), betaNormalize( subst( m.formula ) ), betaNormalize( subst( t ) ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case ExistsRightRule( p, s, a, m, t ) => {
val new_parent = new_parents.head
val new_proof = ExistsRightRule( new_parent._1, new_parent._2( a ), betaNormalize( subst( m.formula ) ), betaNormalize( subst( t ) ) )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case ExistsLeftRule( p, s, a, m, v ) => {
assert( !subst.map.contains( v ) )
val new_parent = new_parents.head
val new_proof = ExistsLeftRule( new_parent._1, new_parent._2( a ), betaNormalize( subst( m.formula ) ), v )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case ForallRightRule( p, s, a, m, v ) => {
assert( !subst.map.contains( v ) )
val new_parent = new_parents.head
val new_proof = ForallRightRule( new_parent._1, new_parent._2( a ), betaNormalize( subst( m.formula ) ), v )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
/*
case ForallSkLeftRule( p, s, a, m, t ) => {
val new_parent = new_parents.head
val label_removed = m.skolem_label.diff(a.skolem_label).nonEmpty || a.skolem_label.diff(m.skolem_label).nonEmpty
val new_proof = ForallSkLeftRule( new_parent._1, new_parent._2( a ).asInstanceOf[LabelledFormulaOccurrence], subst.applyAndBetaNormalize( m.formula ).asInstanceOf[Formula], subst.applyAndBetaNormalize( t ), label_removed )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case ExistsSkRightRule( p, s, a, m, t ) => {
val new_parent = new_parents.head
val label_removed = m.skolem_label.diff(a.skolem_label).nonEmpty || a.skolem_label.diff(m.skolem_label).nonEmpty
val new_proof = ExistsSkRightRule( new_parent._1, new_parent._2( a ).asInstanceOf[LabelledFormulaOccurrence], subst.applyAndBetaNormalize( m.formula ).asInstanceOf[Formula], subst.applyAndBetaNormalize( t ), label_removed )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case ExistsSkLeftRule( p, s, a, m, v ) => {
val new_parent = new_parents.head
val new_proof = ExistsSkLeftRule( new_parent._1, new_parent._2( a ).asInstanceOf[LabelledFormulaOccurrence], subst.applyAndBetaNormalize( m.formula ).asInstanceOf[Formula], v )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
case ForallSkRightRule( p, s, a, m, v ) => {
val new_parent = new_parents.head
val new_proof = ForallSkRightRule( new_parent._1, new_parent._2( a ).asInstanceOf[LabelledFormulaOccurrence], subst.applyAndBetaNormalize( m.formula ).asInstanceOf[Formula], v )
( new_proof, computeMap( p.root.antecedent ++ p.root.succedent, proof, new_proof, new_parent._2 ) )
}
*/
}
}
def apply( proof: LKProof, subst: Substitution ): ( LKProof, Map[FormulaOccurrence, FormulaOccurrence] ) =
proof match {
case Axiom( _ ) => handleRule( proof, Nil, subst )
case UnaryLKProof( _, p, _, _, _ ) => handleRule( proof, apply( p, subst ) :: Nil, subst )
// case UnaryLKSKProof(_, p, _, _, _) => handleRule( proof, apply( p, subst )::Nil, subst )
case BinaryLKProof( _, p1, p2, _, _, _, _ ) =>
handleRule( proof, apply( p1, subst ) :: apply( p2, subst ) :: Nil, subst )
}
}
|
gisellemnr/gapt
|
src/main/scala/at/logic/gapt/proofs/lk/algorithms/substitution.scala
|
Scala
|
gpl-3.0
| 13,803
|
package com.twitter.zipkin.hadoop
import org.specs.Specification
import com.twitter.zipkin.hadoop.sources.Util
import com.twitter.zipkin.gen
import com.twitter.zipkin.gen.{AnnotationType, Annotation}
import scala.collection.JavaConverters._
class UtilSpec extends Specification {
"Util.getServiceName" should {
"yield None if the list is empty" in {
val l : List[Annotation] = List()
Util.getServiceName(l) must be_==(None)
}
"yield Some(service name) if present" in {
val endpoint = new gen.Endpoint(123, 666, "service")
val endpoint1 = new gen.Endpoint(123, 666, "service1")
val l : List[Annotation] = List(new gen.Annotation(1000, "cr").setHost(endpoint), new gen.Annotation(2000, "ss").setHost(endpoint1))
Util.getServiceName(l) must be_==(Some("service1"))
}
}
"Util.getBestClientSendName" should {
"yield client name if parentID == 0" in {
Util.getBestClientSideName((0, "client", "service")) must be_==("client")
Util.getBestClientSideName((0, "client", null)) must be_==("client")
}
"yield Unknown Service Name if service name is null and pid != 0" in {
Util.getBestClientSideName((1, "client", null)) must be_==("Unknown Service Name")
}
"yield service name otherwise" in {
Util.getBestClientSideName((1, "client", "service")) must be_==("service")
Util.getBestClientSideName((1, null, "service")) must be_==("service")
}
}
"Util.repeatSpan" should {
"repeat a SpanServiceName correctly" in {
val endpoint = new gen.Endpoint(123, 666, "service")
val span = new gen.SpanServiceName(12345, "methodcall", 666,
List(new gen.Annotation(1000, "sr").setHost(endpoint), new gen.Annotation(2000, "cr").setHost(endpoint)).asJava,
List(new gen.BinaryAnnotation("hi", null, AnnotationType.BOOL)).asJava, "service").setParent_id(0)
val span1 = new gen.SpanServiceName(12345, "methodcall", 667,
List(new gen.Annotation(1000, "sr").setHost(endpoint), new gen.Annotation(2000, "cr").setHost(endpoint)).asJava,
List(new gen.BinaryAnnotation("hi", null, AnnotationType.BOOL)).asJava, "service").setParent_id(1)
Util.repeatSpan(span, 1, 666, 0) must beEqualTo(List((span, 666),(span1, 667)))
}
"repeat a Span correctly" in {
val endpoint = new gen.Endpoint(123, 666, "service")
val span = new gen.Span(12345, "methodcall", 666,
List(new gen.Annotation(1000, "sr").setHost(endpoint), new gen.Annotation(2000, "cr").setHost(endpoint)).asJava,
List(new gen.BinaryAnnotation("hi", null, AnnotationType.BOOL)).asJava).setParent_id(0)
val span1 = new gen.Span(12345, "methodcall", 667,
List(new gen.Annotation(1000, "sr").setHost(endpoint), new gen.Annotation(2000, "cr").setHost(endpoint)).asJava,
List(new gen.BinaryAnnotation("hi", null, AnnotationType.BOOL)).asJava).setParent_id(1)
Util.repeatSpan(span, 1, 666, 0) must beEqualTo(List((span, 666),(span1, 667)))
}
}
"Util.getSpanIDtoNames" should {
"Get correct (id, service name) pairs" in {
val endpoint = new gen.Endpoint(123, 666, "service")
val endpoint1 = new gen.Endpoint(123, 666, "service1")
val span = new gen.SpanServiceName(12345, "methodcall", 666,
List(new gen.Annotation(1000, "sr").setHost(endpoint), new gen.Annotation(2000, "cr").setHost(endpoint)).asJava,
List(new gen.BinaryAnnotation("hi", null, AnnotationType.BOOL)).asJava, "service").setParent_id(0)
val span1 = new gen.SpanServiceName(12345, "methodcall", 667,
List(new gen.Annotation(1000, "sr").setHost(endpoint1), new gen.Annotation(2000, "cr").setHost(endpoint1)).asJava,
List(new gen.BinaryAnnotation("hi", null, AnnotationType.BOOL)).asJava, "service1").setParent_id(1)
Util.getSpanIDtoNames(List((span, 666), (span1, 667))) must beEqualTo(List((666, "service"), (667, "service1")))
}
}
}
|
dsias/zipkin
|
zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/UtilSpec.scala
|
Scala
|
apache-2.0
| 3,939
|
package lila.relation
import akka.actor.ActorSelection
import scala.util.Success
import lila.db.api._
import lila.db.Implicits._
import lila.game.GameRepo
import lila.hub.actorApi.relation.ReloadOnlineFriends
import lila.hub.actorApi.timeline.{ Propagate, Follow => FollowUser }
import lila.user.tube.userTube
import lila.user.{ User => UserModel, UserRepo }
import tube.relationTube
final class RelationApi(
cached: Cached,
actor: ActorSelection,
bus: lila.common.Bus,
getOnlineUserIds: () => Set[String],
timeline: ActorSelection,
reporter: ActorSelection,
followable: String => Fu[Boolean],
maxFollow: Int,
maxBlock: Int) {
def followers(userId: ID) = cached followers userId
def following(userId: ID) = cached following userId
def blockers(userId: ID) = cached blockers userId
def blocking(userId: ID) = cached blocking userId
def blocks(userId: ID) = blockers(userId) ⊹ blocking(userId)
def nbFollowers(userId: ID) = followers(userId) map (_.size)
def nbFollowing(userId: ID) = following(userId) map (_.size)
def nbBlocking(userId: ID) = blocking(userId) map (_.size)
def nbBlockers(userId: ID) = blockers(userId) map (_.size)
def friends(userId: ID) = following(userId) zip followers(userId) map {
case (f1, f2) => f1 intersect f2
}
def areFriends(u1: ID, u2: ID) = friends(u1) map (_ contains u2)
def follows(u1: ID, u2: ID) = following(u1) map (_ contains u2)
def blocks(u1: ID, u2: ID) = blocking(u1) map (_ contains u2)
def relation(u1: ID, u2: ID): Fu[Option[Relation]] = cached.relation(u1, u2)
def onlinePopularUsers(max: Int): Fu[List[UserModel]] =
(getOnlineUserIds().toList map { id =>
nbFollowers(id) map (id -> _)
}).sequenceFu map (_ sortBy (-_._2) take max map (_._1)) flatMap UserRepo.byOrderedIds
def follow(u1: ID, u2: ID): Funit =
if (u1 == u2) funit
else followable(u2) zip relation(u1, u2) zip relation(u2, u1) flatMap {
case ((false, _), _) => funit
case ((_, Some(Follow)), _) => funit
case ((_, _), Some(Block)) => funit
case _ => RelationRepo.follow(u1, u2) >> limitFollow(u1) >>
refresh(u1, u2) >>-
(timeline ! Propagate(
FollowUser(u1, u2)
).toFriendsOf(u1).toUsers(List(u2)))
}
private def limitFollow(u: ID) = nbFollowing(u) flatMap { nb =>
(nb >= maxFollow) ?? RelationRepo.drop(u, true, nb - maxFollow + 1)
}
private def limitBlock(u: ID) = nbBlocking(u) flatMap { nb =>
(nb >= maxBlock) ?? RelationRepo.drop(u, false, nb - maxBlock + 1)
}
def block(u1: ID, u2: ID): Funit =
if (u1 == u2) funit
else relation(u1, u2) flatMap {
case Some(Block) => funit
case _ => RelationRepo.block(u1, u2) >> limitBlock(u1) >> refresh(u1, u2) >>-
bus.publish(lila.hub.actorApi.relation.Block(u1, u2), 'relation) >>-
(nbBlockers(u2) zip nbFollowers(u2))
}
def unfollow(u1: ID, u2: ID): Funit =
if (u1 == u2) funit
else relation(u1, u2) flatMap {
case Some(Follow) => RelationRepo.unfollow(u1, u2) >> refresh(u1, u2)
case _ => funit
}
def unblock(u1: ID, u2: ID): Funit =
if (u1 == u2) funit
else relation(u1, u2) flatMap {
case Some(Block) => RelationRepo.unblock(u1, u2) >> refresh(u1, u2) >>-
bus.publish(lila.hub.actorApi.relation.UnBlock(u1, u2), 'relation)
case _ => funit
}
private def refresh(u1: ID, u2: ID): Funit =
cached.invalidate(u1, u2) >>-
List(u1, u2).foreach(actor ! ReloadOnlineFriends(_))
}
|
danilovsergey/i-bur
|
modules/relation/src/main/RelationApi.scala
|
Scala
|
mit
| 3,556
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.util.NoSuchElementException
import scala.collection.mutable.Buffer
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
class NextIteratorSuite extends FunSuite with ShouldMatchers {
test("one iteration") {
val i = new StubIterator(Buffer(1))
i.hasNext should be === true
i.next should be === 1
i.hasNext should be === false
intercept[NoSuchElementException] { i.next() }
}
test("two iterations") {
val i = new StubIterator(Buffer(1, 2))
i.hasNext should be === true
i.next should be === 1
i.hasNext should be === true
i.next should be === 2
i.hasNext should be === false
intercept[NoSuchElementException] { i.next() }
}
test("empty iteration") {
val i = new StubIterator(Buffer())
i.hasNext should be === false
intercept[NoSuchElementException] { i.next() }
}
test("close is called once for empty iterations") {
val i = new StubIterator(Buffer())
i.hasNext should be === false
i.hasNext should be === false
i.closeCalled should be === 1
}
test("close is called once for non-empty iterations") {
val i = new StubIterator(Buffer(1, 2))
i.next should be === 1
i.next should be === 2
// close isn't called until we check for the next element
i.closeCalled should be === 0
i.hasNext should be === false
i.closeCalled should be === 1
i.hasNext should be === false
i.closeCalled should be === 1
}
class StubIterator(ints: Buffer[Int]) extends NextIterator[Int] {
var closeCalled = 0
override def getNext() = {
if (ints.size == 0) {
finished = true
0
} else {
ints.remove(0)
}
}
override def close() {
closeCalled += 1
}
}
}
|
sryza/spark
|
core/src/test/scala/org/apache/spark/util/NextIteratorSuite.scala
|
Scala
|
apache-2.0
| 2,606
|
import scala.quoted.*
import scala.deriving.*
case class Foo(i: Int)
case class Box[A](x: A)
object Macro {
inline def foo[T]: String =
${ fooImpl[T] }
def fooImpl[T](implicit t: Type[T], qctx: Quotes): Expr[String] = {
import quotes.reflect.*
val sym = TypeTree.of[T].symbol
if sym.isClassDef then '{ "symbol" }
else if sym.isNoSymbol then '{ "no symbol" }
else '{ "match error" }
}
}
|
dotty-staging/dotty
|
tests/run-macros/no-symbol/1.scala
|
Scala
|
apache-2.0
| 420
|
package com.ergodicity.marketdb
import model.{TradePayload, Security, Market}
import org.scalatest.WordSpec
import org.joda.time.DateTime
import sbinary._
import Operations._
import com.ergodicity.marketdb.model.TradeProtocol._
import org.slf4j.LoggerFactory
class TradeProtocolSpec extends WordSpec {
val log = LoggerFactory.getLogger(classOf[TradeProtocolSpec])
"TradeProtocol" must {
val market = Market("RTS")
val security = Security("RTS 3.12")
val now = new DateTime
"serialized and deserialized to/from byte array" in {
val payload = TradePayload(market, security, 11l, BigDecimal("1.111"), 1, now, false)
val binary = toByteArray(payload)
log.info("ByteArray length: " + binary.length)
log.info("Array: " + showArray(binary))
val fromBinary = fromByteArray[TradePayload](binary)
log.info("From binary: " + fromBinary)
assert(fromBinary match {
case TradePayload(mrkt, sec, id, prc, amnt, t, ns) =>
mrkt == market && sec == security && prc == BigDecimal("1.111") && id == 11l && amnt == 1 && t == now && ns == false
case _ => false
})
}
"serialize and deserialize to/from List" in {
val payload1 = TradePayload(market, security, 11l, BigDecimal("1.111"), 1, now, false)
val payload2 = TradePayload(market, security, 12l, BigDecimal("1.111"), 1, now, false)
val list = List(payload1, payload2)
val binary = toByteArray(list)
log.info("ByteArray length: " + binary.length)
log.info("Array: " + showArray(binary))
val fromBinary = fromByteArray[List[TradePayload]](binary)
log.info("From binary: " + fromBinary)
assert(fromBinary.size == 2)
}
}
// utility methods for printing a byte array
def showArray(b: Array[Byte]) = b.map(showByte).mkString(" ")
def showByte(b: Byte) = pad(((b + 256) % 256).toHexString)
def pad(s: String) = if (s.length == 1) "0" + s else s
}
|
ezhulenev/marketdb
|
marketdb-api/src/test/scala/com/ergodicity/marketdb/TradeProtocolSpec.scala
|
Scala
|
mit
| 1,957
|
package hr.element.geom
case class Parallelogram(p1: Point, p2: Point, p3: Point) extends Shape {
val p4 = p3 + p1 - p2
def points = IndexedSeq(p1, p2, p3, p4)
}
|
melezov/ocd-transform
|
src/main/scala/hr/element/geom/Parallelogram.scala
|
Scala
|
bsd-3-clause
| 167
|
package org.apache.kafka.cep
import org.apache.kafka.cep.utils.Observed
class ExpiredEventDetector(val underlyingDetector: Detector)(implicit system: CEP)
extends Detector(1) {
underlyingDetector.addObserver(ExpiredEventDetector.this)
override def handle(observed: Observed, event: Any) = observed match {
case `underlyingDetector` if (!event.asInstanceOf[Event].isComplete) ⇒ {
val underlyingEvent = event.asInstanceOf[Event]
getFutureEvent(underlyingEvent.id, underlyingEvent.timestamp, 1) match {
case None ⇒ {}
case Some(future) ⇒ {
update(future, underlyingEvent.ageInMillis)
mergeFutureEvent(underlyingEvent, 0, 1)
}
}
}
case _ ⇒ {}
}
}
|
michal-harish/kafka-cep
|
src/main/scala/org/apache/kafka/cep/ExpiredEventDetector.scala
|
Scala
|
apache-2.0
| 734
|
@main def Test = {
val a = 5
val x = 1
+ `a` * 6
assert(x == 1, x)
}
|
dotty-staging/dotty
|
tests/run/i7031.scala
|
Scala
|
apache-2.0
| 82
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package unit.kafka
import java.io.{FileOutputStream, File}
import java.security.Permission
import kafka.Kafka
import org.junit.{After, Before, Test}
import junit.framework.Assert._
class KafkaTest {
val originalSecurityManager: SecurityManager = System.getSecurityManager
class ExitCalled extends SecurityException {
}
private class NoExitSecurityManager extends SecurityManager {
override def checkExit(status: Int): Unit = {
throw new ExitCalled
}
override def checkPermission(perm : Permission): Unit = {
}
override def checkPermission(perm : Permission, context: Object): Unit = {
}
}
@Before
def setSecurityManager() : Unit = {
System.setSecurityManager(new NoExitSecurityManager)
}
@After
def setOriginalSecurityManager() : Unit = {
System.setSecurityManager(originalSecurityManager)
}
@Test
def testGetKafkaConfigFromArgs(): Unit = {
val propertiesFile = prepareDefaultConfig()
// We should load configuration file without any arguments
val config1 = Kafka.getKafkaConfigFromArgs(Array(propertiesFile))
assertEquals(1, config1.brokerId)
// We should be able to override given property on command line
val config2 = Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "--override", "broker.id=2"))
assertEquals(2, config2.brokerId)
// We should be also able to set completely new property
val config3 = Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact"))
assertEquals(1, config3.brokerId)
assertEquals("compact", config3.logCleanupPolicy)
// We should be also able to set several properties
val config4 = Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact", "--override", "broker.id=2"))
assertEquals(2, config4.brokerId)
assertEquals("compact", config4.logCleanupPolicy)
}
@Test(expected = classOf[ExitCalled])
def testGetKafkaConfigFromArgsWrongSetValue(): Unit = {
val propertiesFile = prepareDefaultConfig()
Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "--override", "a=b=c"))
}
@Test(expected = classOf[ExitCalled])
def testGetKafkaConfigFromArgsNonArgsAtTheEnd(): Unit = {
val propertiesFile = prepareDefaultConfig()
Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "--override", "broker.id=1", "broker.id=2"))
}
@Test(expected = classOf[ExitCalled])
def testGetKafkaConfigFromArgsNonArgsOnly(): Unit = {
val propertiesFile = prepareDefaultConfig()
Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "broker.id=1", "broker.id=2"))
}
@Test(expected = classOf[ExitCalled])
def testGetKafkaConfigFromArgsNonArgsAtTheBegging(): Unit = {
val propertiesFile = prepareDefaultConfig()
Kafka.getKafkaConfigFromArgs(Array(propertiesFile, "broker.id=1", "--override", "broker.id=2"))
}
def prepareDefaultConfig(): String = {
prepareConfig(Array("broker.id=1", "zookeeper.connect=somewhere"))
}
def prepareConfig(lines : Array[String]): String = {
val file = File.createTempFile("kafkatest", ".properties")
file.deleteOnExit()
val writer = new FileOutputStream(file)
lines.foreach { l =>
writer.write(l.getBytes)
writer.write("\\n".getBytes)
}
writer.close
file.getAbsolutePath
}
}
|
crashlytics/kafka
|
core/src/test/scala/unit/kafka/KafkaConfigTest.scala
|
Scala
|
apache-2.0
| 4,134
|
package org.eknet.spray.openid.provider
import scala.concurrent.ExecutionContext
import akka.util.Timeout
import spray.routing._
import spray.routing.directives.PathDirectives
class ProviderRoute(endpointSettings: EndpointSettings, discoverySettings: DiscoverySettings) extends PathDirectives with RouteConcatenation {
val endpoint = new EndpointRoute(endpointSettings)
val discovery = new DiscoveryRoute(discoverySettings)
def route(implicit ec: ExecutionContext, to: Timeout): Route = {
discoverySettings.endpointPath {
endpoint.route
} ~
discovery.route
}
}
|
eikek/spray-openid
|
src/main/scala/org/eknet/spray/openid/provider/ProviderRoute.scala
|
Scala
|
apache-2.0
| 591
|
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.fp
import slamdata.Predef._
package object ski extends Serializable {
// NB: Unicode has double-struck and bold versions of the letters, which might
// be more appropriate, but the code points are larger than 2 bytes, so
// Scala doesn't handle them.
/** Probably not useful; implemented here mostly because it's amusing. */
def σ[A, B, C](x: A => B => C, y: A => B, z: A): C = x(z)(y(z))
/** A shorter name for the constant function of 1, 2, 3, or 6 args.
* NB: the argument is eager here, so use `_ => ...` instead if you need it
* to be thunked.
*/
def κ[A, B](x: B): A => B = _ => x
def κ2[A, B, C](x: C): (A, B) => C = (_, _) => x
def κ3[A, B, C, D](x: D): (A, B, C) => D = (_, _, _) => x
def κ6[A, B, C, D, E, F, G](x: G): (A, B, C, D, E, F) => G = (_, _, _, _, _, _) => x
/** A shorter name for the identity function. */
def ι[A]: A => A = x => x
}
|
jedesah/Quasar
|
foundation/src/main/scala/quasar/fp/ski/package.scala
|
Scala
|
apache-2.0
| 1,611
|
package lila.game
import chess.Clock
import lila.common.LightUser
import play.twirl.api.Html
object Namer {
def players(game: Game, withRatings: Boolean = true)(implicit lightUser: String => Option[LightUser]): (Html, Html) =
player(game.firstPlayer, withRatings) -> player(game.secondPlayer, withRatings)
def player(p: Player, withRating: Boolean = true, withTitle: Boolean = true)(implicit lightUser: String => Option[LightUser]) = Html {
p.aiLevel.fold(
p.userId.flatMap(lightUser).fold(lila.user.User.anonymous) { user =>
if (withRating) s"${withTitle.fold(user.titleNameHtml, user.name)} (${ratingString(p)})"
else withTitle.fold(user.titleName, user.name)
}) { level => s"A.I. level $level" }
}
private def ratingString(p: Player) = p.rating match {
case Some(rating) => s"$rating${if (p.provisional) "?" else ""}"
case _ => "?"
}
def playerString(p: Player, withRating: Boolean = true, withTitle: Boolean = true)(implicit lightUser: String => Option[LightUser]) =
player(p, withRating, withTitle)(lightUser).body.replace(" ", " ")
}
|
Enigmahack/lila
|
modules/game/src/main/Namer.scala
|
Scala
|
mit
| 1,138
|
package swiss.sib.analytics.server.logs.custom.uniprot
case class UniProtEntryPropery(accession: String, database: String);
|
sib-swiss/server-log-analytics
|
src/main/scala/swiss/sib/analytics/server/logs/custom/uniprot/UniProtEntryPropery.scala
|
Scala
|
gpl-2.0
| 125
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.netty
import java.nio.ByteBuffer
import scala.collection.JavaConverters._
import org.apache.spark.Logging
import org.apache.spark.network.BlockDataManager
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.client.{RpcResponseCallback, TransportClient}
import org.apache.spark.network.server.{OneForOneStreamManager, RpcHandler, StreamManager}
import org.apache.spark.network.shuffle.protocol.{BlockTransferMessage, OpenBlocks, StreamHandle, UploadBlock}
import org.apache.spark.serializer.Serializer
import org.apache.spark.storage.{BlockId, StorageLevel}
/**
* Serves requests to open blocks by simply registering one chunk per block requested.
* Handles opening and uploading arbitrary BlockManager blocks.
*
* Opened blocks are registered with the "one-for-one" strategy, meaning each Transport-layer Chunk
* is equivalent to one Spark-level shuffle block.
*/
class NettyBlockRpcServer(
appId: String,
serializer: Serializer,
blockManager: BlockDataManager)
extends RpcHandler with Logging {
private val streamManager = new OneForOneStreamManager()
override def receive(
client: TransportClient,
messageBytes: Array[Byte],
responseContext: RpcResponseCallback): Unit = {
val message = BlockTransferMessage.Decoder.fromByteArray(messageBytes)
logTrace(s"Received request: $message")
message match {
case openBlocks: OpenBlocks =>
val blocks: Seq[ManagedBuffer] =
openBlocks.blockIds.map(BlockId.apply).map(blockManager.getBlockData)
val streamId = streamManager.registerStream(appId, blocks.iterator.asJava)
logTrace(s"Registered streamId $streamId with ${blocks.size} buffers")
responseContext.onSuccess(new StreamHandle(streamId, blocks.size).toByteArray)
case uploadBlock: UploadBlock =>
// StorageLevel is serialized as bytes using our JavaSerializer.
val level: StorageLevel =
serializer.newInstance().deserialize(ByteBuffer.wrap(uploadBlock.metadata))
val data = new NioManagedBuffer(ByteBuffer.wrap(uploadBlock.blockData))
blockManager.putBlockData(BlockId(uploadBlock.blockId), data, level)
responseContext.onSuccess(new Array[Byte](0))
}
}
override def getStreamManager(): StreamManager = streamManager
}
|
pronix/spark
|
core/src/main/scala/org/apache/spark/network/netty/NettyBlockRpcServer.scala
|
Scala
|
apache-2.0
| 3,180
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
* A result of a job in the DAGScheduler.
*/
@DeveloperApi
sealed trait JobResult
@DeveloperApi
case object JobSucceeded extends JobResult
private[spark] case class JobFailed(exception: Exception) extends JobResult
|
sh-cho/cshSpark
|
scheduler/JobResult.scala
|
Scala
|
apache-2.0
| 1,131
|
package bootstrap.liftweb
import javax.mail.internet.MimeMessage
import code.managers.ClusterRefs
import code.rest.TestRest
import net.liftweb._
import common._
import http._
import net.liftweb.util.Props
import util._
import code.config._
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.util.{Failure, Success}
/**
* A class that's instantiated early and run. It allows the application
* to modify lift's environment
*/
class Boot extends Loggable {
implicit val timeout = Timeout(30 seconds)
def boot {
logger.info("Run Mode: "+Props.mode.toString)
// where to search snippet
LiftRules.addToPackages("code")
// set the default htmlProperties
LiftRules.htmlProperties.default.set((r: Req) => new Html5Properties(r.userAgent))
// Build SiteMap
LiftRules.setSiteMap(Site.siteMap)
// Error handler
ErrorHandler.init
// 404 handler
LiftRules.uriNotFound.prepend(NamedPF("404handler") {
case (req, failure) =>
NotFoundAsTemplate(ParsePath(List("404"), "html", false, false))
})
// Show the spinny image when an Ajax call starts
LiftRules.ajaxStart = Full(() => LiftRules.jsArtifacts.show("ajax-spinner").cmd)
// Make the spinny image go away when it ends
LiftRules.ajaxEnd = Full(() => LiftRules.jsArtifacts.hide("ajax-spinner").cmd)
// Force the request to be UTF-8
LiftRules.early.append(_.setCharacterEncoding("UTF-8"))
// Mailer
Mailer.devModeSend.default.set((m: MimeMessage) => logger.info("Dev mode message:\n" + prettyPrint(m)))
Mailer.testModeSend.default.set((m: MimeMessage) => logger.info("Test mode message:\n" + prettyPrint(m)))
// Cluster
LiftRules.statelessDispatch.append(TestRest)
// Init
ClusterRefs.actorSystem
}
private def prettyPrint(m: MimeMessage): String = {
val stringBuilder = new StringBuilder
val headerLines = m.getAllHeaderLines
while (headerLines.hasMoreElements)
stringBuilder ++= headerLines.nextElement.toString + "\n"
val out =
s"""
|$stringBuilder
|-----------------------------
|${m.getContent}
""".stripMargin
out
}
}
|
rhyolight/htm-moclu
|
moclu-http/src/main/scala/bootstrap/liftweb/Boot.scala
|
Scala
|
agpl-3.0
| 2,271
|
//optimiseHeapOptions optimiseHeapBooleans optimiseHeapStrings
class Foo(s: Option[String])
//---
{
final class Foo private (private[this] val _s: _root_.scala.Array[_root_.scala.Byte], private[this] val _bitmask: _root_.scala.Long) {
override def equals(thatAny: _root_.scala.Any): _root_.scala.Boolean = thatAny match {
case that: Foo =>
(this eq that) || this.s == that.s
case _ =>
false
}
override def hashCode: _root_.scala.Int = s.hashCode
override def toString: _root_.java.lang.String = "Foo(" + s.toString + ")"
def copy(s: Option[String] = this.s): Foo = Foo(s)
def s: Option[String] = if (this._s == null) {
_root_.scala.None
} else {
_root_.scala.Some(new String(this._s))
}
}
object Foo {
def unapply(that: Foo): _root_.scala.Option[Option[String]] = _root_.scala.Some(that.s)
override def toString: _root_.java.lang.String = "Foo"
def apply(s: Option[String]): Foo = {
val packed = pack(s)
val created = new Foo(packed._1, packed._2)
created
}
private def pack(s: Option[String]): (_root_.scala.Array[_root_.scala.Byte], _root_.scala.Long) = {
var _bitmask: _root_.scala.Long = 0L
val _s = if (s == _root_.scala.None) {
null
} else {
s.get.getBytes
}
(_s, _bitmask)
}
}
}
|
vovapolu/scala-data-classes
|
src/test/resources/generatedTests/HeapOptimizationNoBitmaskGen.scala
|
Scala
|
lgpl-3.0
| 1,361
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.harness
import java.time.{Instant, ZoneId}
import java.util.concurrent.ConcurrentLinkedQueue
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.types.Row
import org.junit.Test
import scala.collection.mutable
class MatchHarnessTest extends HarnessTestBase {
import RecordBuilder._
@Test
def testAccessingProctime(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env)
val data = new mutable.MutableList[(Int, String)]
val t = env.fromCollection(data).toTable(tEnv, 'id, 'name, 'proctime.proctime)
tEnv.registerTable("MyTable", t)
val sqlQuery =
s"""
|SELECT *
|FROM MyTable
|MATCH_RECOGNIZE (
| ORDER BY proctime
| MEASURES
| MATCH_PROCTIME() as proctime,
| HOUR(MATCH_PROCTIME()) as currentHour
| PATTERN (A)
| DEFINE
| A AS A.name LIKE '%a%'
|) AS T
|""".stripMargin
val harness = createHarnessTester[Byte, Row, CRow](
tEnv.sqlQuery(sqlQuery).toAppendStream[Row],
"GlobalCepOperator")
harness.open()
val expectedOutput = new ConcurrentLinkedQueue[Object]()
val now = Instant.ofEpochSecond(1000000)
harness.setProcessingTime(now.toEpochMilli)
harness.processElement(row(Int.box(1), "a").asRecord())
val currentHour = now.atZone(ZoneId.of("GMT")).getHour
// MATCH_PROCTIME is not materialized, therefore it is null, HOUR(MATCH_PROCTIME) is
// materialized
expectedOutput.add(cRow(null, Long.box(currentHour)).asRecord())
verify(expectedOutput, harness.getOutput)
}
private class RecordBuilder[T](record: T) {
def asRecord() : StreamRecord[T] = {
new StreamRecord[T](record)
}
}
private object RecordBuilder {
def row(values: AnyRef*) : RecordBuilder[Row] = {
new RecordBuilder[Row](Row.of(values : _*))
}
def cRow(values: AnyRef*) : RecordBuilder[CRow] = {
new RecordBuilder[CRow](CRow(values : _*))
}
}
}
|
ueshin/apache-flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/harness/MatchHarnessTest.scala
|
Scala
|
apache-2.0
| 3,135
|
package de.qlextension
import java.net.URLClassLoader
import java.net.URL
import java.io.File
import java.io.FileReader
import java.io.FileNotFoundException
import scala.collection.JavaConverters._
import org.nlogo.api.DefaultClassManager
import org.nlogo.api.PrimitiveManager
import org.nlogo.api.Primitive
import com.typesafe.config.ConfigFactory
/**
*
* This class is the only element of ql.jar.
* It initialises the ql-extension.
*
* The main logic is implemented in the qlearning.jar.
*
* This class is separated from the main logic because of problems regarding the class path.
* If the additional jars are not in the system class loader, they must be reloaded when a HeadlessWorkspace loads the extension.
* This leads to additional Akka ActorSystems, which impedes the communication between the HeadlessWorkspaces and main workspace (which loaded the extension).
*
* There are two ways to prevent this behaviour:
* 1. Adding the jars to the variable 'Class-Path' of the manifest file of NetLogo.jar.
* 2. Adding the jars to the class path at runtime: http://stackoverflow.com/questions/1010919/adding-files-to-java-classpath-at-runtime
*
* The latter is tried in the following.
* However, this is a hack that is not directly supported by Java.
* There might arise problems with a SecurityManager.
*
* If problems arise, try the first way.
*
*/
object QLExtension {
// name of NetLogo extension
val EXTENSION_NAME = "ql"
// name of section in configuration file
val cfgstr = "netlogo"
// path of configuration file
val confFile = new File("extensions/ql/application.conf")
// check whether confFile exists
try {
new FileReader(confFile)
} catch {
case e: FileNotFoundException =>
System.err.println("FileNotFoundException: extensions/ql/application.conf")
exit(0)
}
// load configuration file
val config = ConfigFactory.parseFile(confFile)
val inParallelMode = config.getBoolean("netlogo.enable-parallel-mode")
// additional jars that are needed
val defaultJarList = List[String]("extensions/ql/ql.jar",
"extensions/ql/qlearning.jar",
"extensions/ql/akka-actor-2.0.5.jar",
"extensions/ql/akka-agent-2.0.5.jar",
"extensions/ql/config-1.0.2.jar",
"extensions/ql/colt-1.2.0.jar",
"extensions/ql/scala-stm_2.9.1-0.5.jar")
val jarList = if (inParallelMode) defaultJarList ++ config.getStringList("netlogo.parallel.additional-jars").asScala else defaultJarList
val sysloader = ClassLoader.getSystemClassLoader().asInstanceOf[URLClassLoader]
// adding the jars to the system class loader
val sysclass = classOf[URLClassLoader]
try {
val method = sysclass.getDeclaredMethod("addURL", classOf[URL])
method.setAccessible(true)
jarList.foreach(jarName => {
val file = new File(jarName)
// check whether jar exists
new FileReader(file)
// load jar
method.invoke(sysloader, file.toURL())
})
} catch {
case e: FileNotFoundException =>
val newLine = System.getProperty("line.separator")
System.err.println("FileNotFoundException: Check if all required jars exists: " + newLine +
jarList.tail.foldLeft(jarList.first)((s, el) => s + "," + newLine + el)) + "." + newLine +
exit(0)
case t: Throwable =>
val newLine = System.getProperty("line.separator")
System.err.println("Setting additional jars failed. A SecurityManager may prevent the adding of jars to the class path at runtime." + newLine +
"Manually add the names of the jars to the variable 'Class-Path' of the manifest file of NetLogo.jar.")
t.printStackTrace()
}
}
/**
* the extension class needed by NetLogo
*/
class QLExtension extends DefaultClassManager {
import QLExtension._
private def getPrimitive(name: String) = {
sysloader.loadClass(name).getConstructor().newInstance().asInstanceOf[Primitive]
}
override def load(manager: PrimitiveManager) {
manager.addPrimitive("init", getPrimitive("de.qlearning.Init"))
manager.addPrimitive("decay-exploration", getPrimitive("de.qlearning.DecreaseExperimenting"))
manager.addPrimitive("add-agent", getPrimitive("de.qlearning.AddAgent"))
manager.addPrimitive("remove-agent", getPrimitive("de.qlearning.RemoveAgent"))
manager.addPrimitive("create-group", getPrimitive("de.qlearning.CreateGroup"))
manager.addPrimitive("set-group-structure", getPrimitive("de.qlearning.NewGroupStructure"))
manager.addPrimitive("start", getPrimitive("de.qlearning.Start"))
manager.addPrimitive("stop", getPrimitive("de.qlearning.Stop"))
manager.addPrimitive("get-group-list", getPrimitive("de.qlearning.GetGroupList"))
manager.addPrimitive("get-agents", getPrimitive("de.qlearning.GetAgents"))
manager.addPrimitive("get-decisions", getPrimitive("de.qlearning.GetDecisions"))
manager.addPrimitive("set-rewards", getPrimitive("de.qlearning.SetRewards"))
manager.addPrimitive("set-new-states", getPrimitive("de.qlearning.SetNewStates"))
manager.addPrimitive("get-performance", getPrimitive("de.qlearning.GetPerformance"))
manager.addPrimitive("one-of", getPrimitive("de.qlearning.OneOf"))
manager.addPrimitive("set-reward", getPrimitive("de.qlearning.SetReward"))
manager.addPrimitive("set-reward-and-state", getPrimitive("de.qlearning.SetRewardAndState"))
}
override def additionalJars: java.util.List[String] = jarList.asJava
override def clearAll() {
// nothing to do
}
}
|
JZschache/NetLogo-QLearning-Extension
|
src/de/qlextension/QLExtension.scala
|
Scala
|
gpl-2.0
| 5,678
|
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.charts.component.impl
import com.excilys.ebi.gatling.charts.component.{ ComponentLibrary, Component }
import com.excilys.ebi.gatling.charts.series.Series
/**
* Mock implementation that is removed from the binary.
* A unique implementation is expected to be present in the classpath.
*
* @author stephanelandelle
*/
class ComponentLibraryImpl extends ComponentLibrary {
def getAllSessionsJs(runStart: Long, series: Series[Int, Int]): String = throw new UnsupportedOperationException
def getActiveSessionsChartComponent(runStart: Long, series: Seq[Series[Int, Int]]): Component = throw new UnsupportedOperationException
def getRequestsChartComponent(runStart: Long, allRequests: Series[Int, Int], failedRequests: Series[Int, Int], succeededRequests: Series[Int, Int], pieSeries: Series[String, Int]): Component = throw new UnsupportedOperationException
def getTransactionsChartComponent(runStart: Long, allTransactions: Series[Int, Int], failedTransactions: Series[Int, Int], succeededTransactions: Series[Int, Int], pieSeries: Series[String, Int]): Component = throw new UnsupportedOperationException
def getRequestDetailsResponseTimeChartComponent(runStart: Long, responseTimesSuccess: Series[Int, (Int, Int)], responseTimesFailures: Series[Int, (Int, Int)]): Component = throw new UnsupportedOperationException
def getRequestDetailsResponseTimeDistributionChartComponent(responseTimesSuccess: Series[Int, Int], responseTimesFailures: Series[Int, Int]): Component = throw new UnsupportedOperationException
def getRequestDetailsLatencyChartComponent(runStart: Long, latencySuccess: Series[Int, (Int, Int)], latencyFailures: Series[Int, (Int, Int)]): Component = throw new UnsupportedOperationException
def getRequestDetailsScatterChartComponent(successData: Series[Int, Int], failuresData: Series[Int, Int]): Component = throw new UnsupportedOperationException
def getRequestDetailsIndicatorChartComponent: Component = throw new UnsupportedOperationException
def getNumberOfRequestsChartComponent: Component = throw new UnsupportedOperationException
}
|
Tjoene/thesis
|
Case_Programs/gatling-1.4.0/gatling-charts/src/main/scala/com/excilys/ebi/gatling/charts/component/impl/ComponentLibraryImpl.scala
|
Scala
|
gpl-2.0
| 2,743
|
package dotty.tools.scaladoc
package tasty
import scala.quoted._
object SyntheticsSupport:
extension (using Quotes)(t: reflect.TypeRepr)
def isCompiletimeAppliedType: Boolean = t.hackIsCompiletimeAppliedType(t)
private def hackIsCompiletimeAppliedType(rtpe: reflect.TypeRepr): Boolean =
import dotty.tools.dotc
given ctx: dotc.core.Contexts.Context = quotes.asInstanceOf[scala.quoted.runtime.impl.QuotesImpl].ctx
val tpe = rtpe.asInstanceOf[dotc.core.Types.Type]
ctx.definitions.isCompiletimeAppliedType(tpe.typeSymbol)
end extension
extension (using Quotes)(s: reflect.Symbol)
def isSyntheticFunc: Boolean =
import reflect._
s.flags.is(Flags.Synthetic) || s.flags.is(Flags.FieldAccessor) || s.isDefaultHelperMethod
def isSuperBridgeMethod: Boolean = s.name.contains("$super$")
def isDefaultHelperMethod: Boolean = ".*\\$default\\$\\d+$".r.matches(s.name)
def isOpaque: Boolean =
import reflect._
s.flags.is(Flags.Opaque)
def isInfix: Boolean = hackIsInfix(s)
def getmembers: List[reflect.Symbol] = hackGetmembers(s)
end extension
def isValidPos(using Quotes)(pos: reflect.Position) =
if hackExists(pos) then pos.start != pos.end else false
def isSyntheticField(using Quotes)(c: reflect.Symbol) =
import reflect._
c.flags.is(Flags.CaseAccessor) || (c.flags.is(Flags.Module) && !c.flags.is(Flags.Given))
def constructorWithoutParamLists(using Quotes)(c: reflect.ClassDef): Boolean =
!isValidPos(c.constructor.pos) || {
val end = c.constructor.pos.end
val typesEnd = c.constructor.leadingTypeParams.lastOption.fold(end - 1)(_.pos.end)
val classDefTree = c.constructor.show
c.constructor.leadingTypeParams.nonEmpty && end <= typesEnd + 1
}
// TODO: #49 Remove it after TASTY-Reflect release with published flag Extension
private def hackIsInfix(using Quotes)(rsym: reflect.Symbol): Boolean = {
import reflect._
import dotty.tools.dotc
given ctx: dotc.core.Contexts.Context = quotes.asInstanceOf[scala.quoted.runtime.impl.QuotesImpl].ctx
val sym = rsym.asInstanceOf[dotc.core.Symbols.Symbol]
ctx.definitions.isInfix(sym)
}
/* We need there to filter out symbols with certain flagsets, because these symbols come from compiler and TASTY can't handle them well.
They are valdefs that describe case companion objects and cases from enum.
TASTY crashed when calling _.tree on them.
*/
private def hackGetmembers(using Quotes)(rsym: reflect.Symbol): List[reflect.Symbol] = {
import reflect._
import dotty.tools.dotc
given ctx: dotc.core.Contexts.Context = quotes.asInstanceOf[scala.quoted.runtime.impl.QuotesImpl].ctx
val sym = rsym.asInstanceOf[dotc.core.Symbols.Symbol]
sym.namedType.allMembers.iterator.map(_.symbol)
.collect {
case sym if
(!sym.is(dotc.core.Flags.ModuleVal) || sym.is(dotc.core.Flags.Given)) &&
!sym.flags.isAllOf(dotc.core.Flags.Enum | dotc.core.Flags.Case | dotc.core.Flags.JavaStatic) =>
sym.asInstanceOf[Symbol]
}.toList
}
private def hackGetSupertypes(using Quotes)(rdef: reflect.ClassDef) = {
import reflect._
import dotty.tools.dotc
given dotc.core.Contexts.Context = quotes.asInstanceOf[scala.quoted.runtime.impl.QuotesImpl].ctx
val classdef = rdef.asInstanceOf[dotc.ast.tpd.TypeDef]
val ref = classdef.symbol.info.asInstanceOf[dotc.core.Types.ClassInfo].appliedRef
val baseTypes: List[(dotc.core.Symbols.Symbol, dotc.core.Types.Type)] =
ref.baseClasses.map(b => b -> ref.baseType(b))
baseTypes.asInstanceOf[List[(Symbol, TypeRepr)]]
}
private def hackExists(using Quotes)(rpos: reflect.Position) = {
import reflect._
import dotty.tools.dotc
import dotty.tools.dotc.util.Spans._
given dotc.core.Contexts.Context = quotes.asInstanceOf[scala.quoted.runtime.impl.QuotesImpl].ctx
val pos = rpos.asInstanceOf[dotc.util.SourcePosition]
pos.exists
}
def getSupertypes(using Quotes)(c: reflect.ClassDef) = hackGetSupertypes(c).tail
def typeForClass(using Quotes)(c: reflect.ClassDef): reflect.TypeRepr =
import reflect._
import dotty.tools.dotc
given dotc.core.Contexts.Context = quotes.asInstanceOf[scala.quoted.runtime.impl.QuotesImpl].ctx
val cSym = c.symbol.asInstanceOf[dotc.core.Symbols.Symbol]
cSym.typeRef.appliedTo(cSym.typeParams.map(_.typeRef)).asInstanceOf[TypeRepr]
def memberInfo(using Quotes)(c: reflect.ClassDef, symbol: reflect.Symbol): reflect.TypeRepr =
import reflect._
import dotty.tools.dotc
given dotc.core.Contexts.Context = quotes.asInstanceOf[scala.quoted.runtime.impl.QuotesImpl].ctx
typeForClass(c).asInstanceOf[dotc.core.Types.Type]
.memberInfo(symbol.asInstanceOf[dotc.core.Symbols.Symbol])
.asInstanceOf[TypeRepr]
|
dotty-staging/dotty
|
scaladoc/src/dotty/tools/scaladoc/tasty/SyntheticSupport.scala
|
Scala
|
apache-2.0
| 4,851
|
/**
* sbt-osgi-manager - OSGi development bridge based on Bnd and Tycho.
*
* Copyright (c) 2016 Alexey Aksenov ezh@ezh.msk.ru
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sbt.osgi.manager.tycho
import java.io.File
import java.net.URI
import java.util.ArrayList
import org.apache.maven.model.{ Dependency ⇒ MavenDependency }
import org.eclipse.equinox.p2.metadata.IInstallableUnit
import org.eclipse.tycho.core.shared.TargetEnvironment
import org.eclipse.tycho.osgi.adapters.MavenLoggerAdapter
import org.eclipse.tycho.p2.resolver.facade.P2ResolutionResult
import org.eclipse.tycho.p2.target.facade.TargetPlatformConfigurationStub
import org.scalatest.{ FreeSpec, Matchers }
import org.slf4j.LoggerFactory
import sbt.{ AttributeEntry, AttributeMap, BasicCommands, Build, BuildStreams, BuildStructure, BuildUnit, BuildUtil, ConsoleOut, Def, DetectedAutoPlugin, DetectedModules, DetectedPlugins, File, GlobalLogging, KeyIndex, Keys, Load, LoadedDefinitions, LoadedPlugins, MainLogging, PartBuildUnit, Plugin, PluginData, Project, ProjectRef, Scope, SessionSettings, Settings, State, StructureIndex, This }
import sbt.osgi.manager.{ Dependency, Environment, OSGi, OSGiConf, OSGiKey, Plugin, Test }
import sbt.toGroupID
import scala.collection.{ breakOut, immutable }
import scala.collection.JavaConversions.{ asScalaBuffer, asScalaSet, collectionAsScalaIterable }
import scala.language.implicitConversions
class MavenTest extends FreeSpec with Matchers {
val folder = Test.createTempFolder() getOrElse { throw new IllegalStateException("Unable to create temporary directory") }
val mavenFolder = new File(folder, "maven")
val settings = Project.inConfig(OSGiConf)(Seq(
OSGiKey.osgiMavenDirectory := mavenFolder))
"test" in {
Test.withImplementation(ResolveP2, new TestResolveP2) {
info("Maven environment located at folder: " + mavenFolder)
Test.removeAll(mavenFolder)
implicit val arg = Plugin.TaskArgument(FakeState.state, ProjectRef(FakeState.testProject.base.toURI(), FakeState.testProject.id))
val mavenHome = Maven.prepareHome()
info("Maven home is " + mavenHome)
val bridge = Maven()
bridge should not be (null)
val dependencies = Seq(Dependency.convertDependency(OSGi.ECLIPSE_PLUGIN % "org.eclipse.ui" % OSGi.ANY_VERSION.toString()))
val rawRepositories = Seq(("Eclipse P2 update site", new URI("http://eclipse.ialto.com/eclipse/updates/4.2/R-4.2.1-201209141800/")))
val targetPlatformConfiguration = new TargetPlatformConfigurationStub()
val repositories = ResolveP2.addRepositoriesToTargetPlatformConfiguration(targetPlatformConfiguration, rawRepositories, bridge)
val environment = sbt.osgi.manager.Environment.Execution.JavaSE6
val targetPlatform = ResolveP2.createTargetPlatform(targetPlatformConfiguration, environment, Seq.empty, true, bridge)
targetPlatform should not be (null)
repositories should not be (null)
repositories should have size (1)
info("Repositories: " + repositories.mkString(","))
val resolver = bridge.p2ResolverFactory.createResolver(new MavenLoggerAdapter(bridge.plexus.getLogger, true))
resolver should not be (null)
resolver.addDependency(dependencies.head.getType(), dependencies.head.getArtifactId(), dependencies.head.getVersion())
val resolutionResults: Seq[P2ResolutionResult] = Environment.all.flatMap {
case (tOS, tWS, tARCH) ⇒
try {
val environmentList = new ArrayList[TargetEnvironment]()
environmentList.add(new TargetEnvironment(tOS.value, tWS.value, tARCH.value))
resolver.setEnvironments(environmentList)
resolver.resolveDependencies(targetPlatform, null).toSeq
} catch {
case e: RuntimeException ⇒
arg.log.info(e.getMessage)
Seq(ResolveP2.EmptyP2ResolutionResult: P2ResolutionResult)
}
}
resolutionResults should not be ('empty)
val artifacts = resolutionResults.map(_.getArtifacts).flatten.groupBy(_.getId).map(_._2.head)(breakOut).sortBy(_.getId)
// Process results
val rePerDependencyMap = ResolveP2.inner.asInstanceOf[TestResolveP2].collectArtifactsPerDependency(dependencies, artifacts)
artifacts.foreach { entry ⇒
val originModuleIds = rePerDependencyMap.get(entry).map(dependencies ⇒ dependencies.flatMap(Dependency.getOrigin)) getOrElse Seq()
entry.getInstallableUnits().map(_ match {
case riu: IInstallableUnit if originModuleIds.nonEmpty && originModuleIds.exists(_.withSources) ⇒
info("Collect P2 IU %s with source code".format(riu))
case riu: IInstallableUnit if originModuleIds.nonEmpty ⇒
info("Collect P2 IU %s".format(riu))
case riu: IInstallableUnit ⇒
info("Collect an unbound installable unit: " + riu)
case ru ⇒
info("Skip an unknown reactor unit: " + ru)
})
}
artifacts.size should be(63)
}
}
object FakeState {
lazy val settings: Seq[Def.Setting[_]] = MavenTest.this.settings
val base = new File("").getAbsoluteFile
val testProject = Project("test-project", base)
val currentProject = Map(testProject.base.toURI -> testProject.id)
val currentEval: () ⇒ sbt.compiler.Eval = () ⇒ Load.mkEval(Nil, base, Nil)
val sessionSettings = SessionSettings(base.toURI, currentProject, Nil, Map.empty, Nil, currentEval)
val delegates: (Scope) ⇒ Seq[Scope] = scope ⇒ Seq(scope, Scope(This, scope.config, This, This))
val scopeLocal: Def.ScopeLocal = _ ⇒ Nil
val data: Settings[Scope] = Def.make(settings)(delegates, scopeLocal, Def.showFullKey)
val extra: KeyIndex ⇒ BuildUtil[_] = (keyIndex) ⇒ BuildUtil(base.toURI, Map.empty, keyIndex, data)
val structureIndex: StructureIndex = Load.structureIndex(data, settings, extra, Map.empty)
val streams: (State) ⇒ BuildStreams.Streams = null
val loadedDefinitions: LoadedDefinitions = new LoadedDefinitions(
base, Nil, ClassLoader.getSystemClassLoader, Nil, Seq(testProject), Nil)
val pluginData = PluginData(Nil, Nil, None, None, Nil)
val detectedModules: DetectedModules[Plugin] = new DetectedModules(Nil)
val builds: DetectedModules[Build] = new DetectedModules[Build](Nil)
val detectedAutoPlugins: Seq[DetectedAutoPlugin] = Seq.empty
val detectedPlugins = new DetectedPlugins(detectedModules, detectedAutoPlugins, builds)
val loadedPlugins = new LoadedPlugins(base, pluginData, ClassLoader.getSystemClassLoader, detectedPlugins)
val buildUnit = new BuildUnit(base.toURI, base, loadedDefinitions, loadedPlugins)
val (partBuildUnit: PartBuildUnit, _) = Load.loaded(buildUnit)
val loadedBuildUnit = Load.resolveProjects(base.toURI, partBuildUnit, _ ⇒ testProject.id)
val units = Map(base.toURI -> loadedBuildUnit)
val buildStructure = new BuildStructure(units, base.toURI, settings, data, structureIndex, streams, delegates, scopeLocal)
val attributes = AttributeMap.empty ++ AttributeMap(
AttributeEntry(Keys.sessionSettings, sessionSettings),
AttributeEntry(Keys.stateBuildStructure, buildStructure))
val initialGlobalLogging = GlobalLogging.initial(MainLogging.globalDefault(ConsoleOut.systemOut), File.createTempFile("sbt", ".log"), ConsoleOut.systemOut)
val commandDefinitions = BasicCommands.allBasicCommands
val state = State(null, commandDefinitions, Set.empty, None, Seq.empty, State.newHistory,
attributes, initialGlobalLogging, State.Continue)
}
class TestResolveP2 extends ResolveP2 {
override def collectArtifactsPerDependency(dependencies: Seq[MavenDependency],
artifacts: Seq[P2ResolutionResult.Entry])(implicit arg: Plugin.TaskArgument): immutable.HashMap[P2ResolutionResult.Entry, Seq[MavenDependency]] =
super.collectArtifactsPerDependency(dependencies, artifacts)
}
}
|
digimead/sbt-osgi-manager
|
src/test/scala/sbt/osgi/manager/tycho/MavenTest.scala
|
Scala
|
apache-2.0
| 8,460
|
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
trait QValuePlatform
|
http4s/http4s
|
core/shared/src/main/scala-3/org/http4s/QValuePlatform.scala
|
Scala
|
apache-2.0
| 636
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import org.scalatest._
import scala.collection.mutable.WrappedArray
class PrettyMethodsSpec extends Spec with Matchers {
object `Trait PrettyMethods` {
object `should by default allow you to call pretty on anything and get default Prettifier output,` {
import PrettyMethods._
def `putting double quotes around strings` {
"hi".pretty should be ("\\"hi\\"")
}
def `putting single quotes around chars` {
'h'.pretty should be ("'h'")
}
def `putting print arrays` {
Array(1, 2, 3).pretty should be ("Array(1, 2, 3)")
}
def `putting print wrapped arrays` {
WrappedArray.make(Array(1, 2, 3)).pretty should be ("Array(1, 2, 3)")
}
def `putting the Unit value` {
().pretty should be ("<(), the Unit value>")
}
def `putting call toString on anything not specially treated` {
List("1", "2", "3").pretty should be ("List(\\"1\\", \\"2\\", \\"3\\")")
}
}
/* This proved that I got rid of the Any => String conversion, but by not compiling.
def `should not simply convert Any to String` {
new ConversionCheckedTripleEquals {
import PrettyMethods._
"2" should === (2)
}
}
*/
}
}
|
travisbrown/scalatest
|
src/test/scala/org/scalactic/PrettyMethodsSpec.scala
|
Scala
|
apache-2.0
| 1,858
|
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2015, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib.util
import java.{util => ju}
import org.junit.Test
import org.junit.Assert._
import scala.collection.JavaConversions._
import scala.reflect.ClassTag
class LinkedHashSetTest extends HashSetTest {
override def factory: LinkedHashSetFactory = new LinkedHashSetFactory
@Test def should_iterate_over_elements_in_an_ordered_manner(): Unit = {
val hs = factory.empty[String]
val l1 = List[String]("ONE", "TWO", (null: String))
assertTrue(hs.addAll(asJavaCollection(l1)))
assertEquals(3, hs.size)
val iter1 = hs.iterator()
val result1 = {
for (i <- 0 until 3) yield {
assertTrue(iter1.hasNext())
val value = iter1.next()
assertEquals(l1(i), value)
value
}
}
assertFalse(iter1.hasNext())
assertEquals(l1, result1)
val l2 = l1 :+ "THREE"
assertTrue(hs.add(l2(3)))
val iter2 = hs.iterator()
val result2 = {
for (i <- 0 until 4) yield {
assertTrue(iter2.hasNext())
val value = iter2.next()
assertEquals(l2(i), value)
value
}
}
assertFalse(iter2.hasNext())
assertTrue(result2.equals(l2))
}
}
object LinkedHashSetFactory extends HashSetFactory {
def allFactories: Iterator[LinkedHashSetFactory] =
Iterator(new LinkedHashSetFactory)
}
class LinkedHashSetFactory extends HashSetFactory {
override def implementationName: String =
"java.util.LinkedHashSet"
override def empty[E: ClassTag]: ju.LinkedHashSet[E] =
new ju.LinkedHashSet[E]()
}
|
mdedetrich/scala-js
|
test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/util/LinkedHashSetTest.scala
|
Scala
|
bsd-3-clause
| 2,077
|
package actors.persistent.arrivals
import akka.actor.Props
import akka.persistence.{Recovery, SnapshotSelectionCriteria}
import drt.shared.{FeedSource, SDateLike}
import org.slf4j.{Logger, LoggerFactory}
import server.protobuf.messages.FlightsMessage.FlightsDiffMessage
object ArrivalsReadActor {
def props(pointInTime: SDateLike, persistenceId: String, feedSource: FeedSource): Props = Props(
new ArrivalsReadActor(pointInTime, persistenceId, feedSource)
)
}
class ArrivalsReadActor(pointInTime: SDateLike, persistenceIdString: String, feedSource: FeedSource) extends ArrivalsActor(() => pointInTime, Int.MaxValue, feedSource) {
override def persistenceId: String = persistenceIdString
def now: () => SDateLike = () => pointInTime
override val snapshotBytesThreshold: Int = 0
val log: Logger = LoggerFactory.getLogger(getClass)
def consumeDiffsMessage(diffsMessage: FlightsDiffMessage): Unit = consumeUpdates(diffsMessage)
override def processRecoveryMessage: PartialFunction[Any, Unit] = {
case diff@FlightsDiffMessage(Some(createdMillis), _, _, _) =>
if (createdMillis <= pointInTime.millisSinceEpoch) consumeDiffsMessage(diff)
case _ =>
}
override def recovery: Recovery = {
val criteria = SnapshotSelectionCriteria(maxTimestamp = pointInTime.millisSinceEpoch)
Recovery(fromSnapshot = criteria, replayMax = 10000)
}
}
|
UKHomeOffice/drt-scalajs-spa-exploration
|
server/src/main/scala/actors/persistent/arrivals/ArrivalsReadActor.scala
|
Scala
|
apache-2.0
| 1,380
|
package ionroller
import org.joda.time.DateTime
final case class EnvironmentUnusedDetails(at: DateTime)
|
yonglehou/ionroller
|
core/src/main/scala/ionroller/EnvironmentUnusedDetails.scala
|
Scala
|
mit
| 107
|
package uk.gov.gds.ier.transaction.forces.rank
import uk.gov.gds.ier.validation.{ErrorTransformForm, ErrorMessages, FormKeys}
import play.api.data.Forms._
import uk.gov.gds.ier.model._
import scala.Some
import play.api.data.validation.{Invalid, Valid, Constraint}
import uk.gov.gds.ier.transaction.forces.InprogressForces
trait RankForms extends RankConstraints {
self: FormKeys
with ErrorMessages =>
lazy val rankMapping = mapping(
keys.serviceNumber.key -> optional(nonEmptyText),
keys.rank.key -> optional(nonEmptyText)
) (
(serviceNumber, rank) => Rank(serviceNumber, rank)
) (
rank => Some(rank.serviceNumber, rank.rank)
) verifying serviceNumberAndRankRequired
val rankForm = ErrorTransformForm(
mapping(
keys.rank.key -> optional(rankMapping)
) (
rank => InprogressForces(rank = rank)
) (
inprogressApplication => Some(inprogressApplication.rank)
) verifying rankObjectRequired
)
}
trait RankConstraints {
self: ErrorMessages
with FormKeys =>
lazy val rankObjectRequired = Constraint[InprogressForces](keys.rank.key) {
application => application.rank match {
case Some(rank) => Valid
case None => Invalid(
"Please answer this question",
keys.rank.rank,
keys.rank.serviceNumber
)
}
}
lazy val serviceNumberAndRankRequired = Constraint[Rank](keys.rank.key) {
rank => rank match {
case Rank(Some(serviceNumber), None) =>
Invalid("Please answer this question",keys.rank.rank)
case Rank(None, Some(rank)) =>
Invalid("Please answer this question",keys.rank.serviceNumber)
case _ => Valid
}
}
}
|
alphagov/ier-frontend
|
app/uk/gov/gds/ier/transaction/forces/rank/RankForms.scala
|
Scala
|
mit
| 1,677
|
/**
* Copyright 2016, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperables.spark.wrappers.models
import org.apache.spark.ml.clustering.{DistributedLDAModel, LocalLDAModel, LDA => SparkLDA, LDAModel => SparkLDAModel}
import io.deepsense.deeplang.ExecutionContext
import io.deepsense.deeplang.doperables.SparkModelWrapper
import io.deepsense.deeplang.doperables.report.CommonTablesGenerators.SparkSummaryEntry
import io.deepsense.deeplang.doperables.report.{CommonTablesGenerators, Report}
import io.deepsense.deeplang.doperables.serialization.SerializableSparkModel
import io.deepsense.deeplang.doperables.spark.wrappers.params.common.{HasFeaturesColumnParam, HasSeedParam}
class LDAModel extends SparkModelWrapper[SparkLDAModel, SparkLDA]
with HasFeaturesColumnParam
with HasSeedParam {
val params: Array[io.deepsense.deeplang.params.Param[_]] = Array(
featuresColumn,
seed)
override def report: Report = {
val vocabularySize =
SparkSummaryEntry(
name = "vocabulary size",
value = sparkModel.vocabSize,
description = "The number of terms in the vocabulary.")
val estimatedDocConcentration =
SparkSummaryEntry(
name = "estimated doc concentration",
value = sparkModel.estimatedDocConcentration,
description = "Value for `doc concentration` estimated from data.")
super.report
.withAdditionalTable(CommonTablesGenerators.modelSummary(
List(
vocabularySize,
estimatedDocConcentration)))
}
override protected def loadModel(
ctx: ExecutionContext,
path: String): SerializableSparkModel[SparkLDAModel] = {
try {
new SerializableSparkModel(LocalLDAModel.load(path))
} catch {
case e: IllegalArgumentException =>
logger.warn(s"LocalLDAModel.load($path) failed. Trying to load DistributedLDAModel.", e)
new SerializableSparkModel(DistributedLDAModel.load(path))
}
}
}
|
deepsense-io/seahorse-workflow-executor
|
deeplang/src/main/scala/io/deepsense/deeplang/doperables/spark/wrappers/models/LDAModel.scala
|
Scala
|
apache-2.0
| 2,512
|
/**
* Copyright (c) 2012 Alexey Aksenov ezh@ezh.msk.ru
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.digimead.digi.lib.ctrl.dialog.filechooser
import scala.ref.WeakReference
import org.digimead.digi.lib.ctrl.ext.XResource
import org.digimead.digi.lib.ctrl.dialog.FileChooser
import android.view.View
import android.widget.Button
trait FCCancel {
this: FileChooser =>
private lazy val cancel = new WeakReference(extContent.map(l => l.findViewById(XResource.getId(l.getContext,
"filechooser_cancel")).asInstanceOf[Button]).getOrElse(null))
def initializeCancel() = cancel.get.foreach {
cancel =>
log.debug("FCCancel::initializeCancel")
cancel.setVisibility(View.GONE)
}
}
|
ezh/digi-lib-ctrl
|
src/main/scala/org/digimead/digi/lib/ctrl/dialog/filechooser/FCCancel.scala
|
Scala
|
apache-2.0
| 1,232
|
trait F1[/* - */T, /* + */ R]
object Test {
import scala.annotation.unchecked._
private[this] type VariantF1[-T, +R] = F1[T @uncheckedVariance, R @uncheckedVariance]
trait C[+T] { def foo: VariantF1[Any, T] }
}
|
felixmulder/scala
|
test/pending/pos/t8079c.scala
|
Scala
|
bsd-3-clause
| 218
|
package Week3
/**
* Created by gustavo on 26/04/16.
*/
abstract class IntSet {
def incl(x: Int): IntSet
def contains(x: Int): Boolean
def union(other: IntSet): IntSet
}
|
guhemama/moocs
|
Functional.Programming.in.Scala.Coursera/Week3/src/Week3/IntSet.scala
|
Scala
|
bsd-3-clause
| 180
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical
import org.apache.spark.sql.catalyst.plans.physical.SinglePartition
import org.apache.spark.sql.execution.{ColumnarBatchScan, LeafExecNode, WholeStageCodegenExec}
import org.apache.spark.sql.execution.streaming.continuous._
import org.apache.spark.sql.sources.v2.DataSourceV2
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousPartitionReaderFactory, ContinuousReadSupport, MicroBatchReadSupport}
/**
* Physical plan node for scanning data from a data source.
*/
// TODO: micro-batch should be handled by `DataSourceV2ScanExec`, after we finish the API refactor
// completely.
case class DataSourceV2StreamingScanExec(
output: Seq[AttributeReference],
@transient source: DataSourceV2,
@transient options: Map[String, String],
@transient pushedFilters: Seq[Expression],
@transient readSupport: ReadSupport,
@transient scanConfig: ScanConfig)
extends LeafExecNode with DataSourceV2StringFormat with ColumnarBatchScan {
override def simpleString: String = "ScanV2 " + metadataString
// TODO: unify the equal/hashCode implementation for all data source v2 query plans.
override def equals(other: Any): Boolean = other match {
case other: DataSourceV2StreamingScanExec =>
output == other.output && readSupport.getClass == other.readSupport.getClass &&
options == other.options
case _ => false
}
override def hashCode(): Int = {
Seq(output, source, options).hashCode()
}
override def outputPartitioning: physical.Partitioning = readSupport match {
case _ if partitions.length == 1 =>
SinglePartition
case s: OldSupportsReportPartitioning =>
new DataSourcePartitioning(
s.outputPartitioning(scanConfig), AttributeMap(output.map(a => a -> a.name)))
case _ => super.outputPartitioning
}
private lazy val partitions: Seq[InputPartition] = readSupport.planInputPartitions(scanConfig)
private lazy val readerFactory = readSupport match {
case r: MicroBatchReadSupport => r.createReaderFactory(scanConfig)
case r: ContinuousReadSupport => r.createContinuousReaderFactory(scanConfig)
case _ => throw new IllegalStateException("unknown read support: " + readSupport)
}
override val supportsBatch: Boolean = {
require(partitions.forall(readerFactory.supportColumnarReads) ||
!partitions.exists(readerFactory.supportColumnarReads),
"Cannot mix row-based and columnar input partitions.")
partitions.exists(readerFactory.supportColumnarReads)
}
private lazy val inputRDD: RDD[InternalRow] = readSupport match {
case _: ContinuousReadSupport =>
assert(!supportsBatch,
"continuous stream reader does not support columnar read yet.")
EpochCoordinatorRef.get(
sparkContext.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY),
sparkContext.env)
.askSync[Unit](SetReaderPartitions(partitions.size))
new ContinuousDataSourceRDD(
sparkContext,
sqlContext.conf.continuousStreamingExecutorQueueSize,
sqlContext.conf.continuousStreamingExecutorPollIntervalMs,
partitions,
schema,
readerFactory.asInstanceOf[ContinuousPartitionReaderFactory])
case _ =>
new DataSourceRDD(
sparkContext, partitions, readerFactory.asInstanceOf[PartitionReaderFactory], supportsBatch)
}
override def inputRDDs(): Seq[RDD[InternalRow]] = Seq(inputRDD)
override protected def doExecute(): RDD[InternalRow] = {
if (supportsBatch) {
WholeStageCodegenExec(this)(codegenStageId = 0).execute()
} else {
val numOutputRows = longMetric("numOutputRows")
inputRDD.map { r =>
numOutputRows += 1
r
}
}
}
}
|
mdespriee/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2StreamingScanExec.scala
|
Scala
|
apache-2.0
| 4,803
|
package com.countrygamer.arcanacraft.common.item.pendant
import baubles.api.{BaubleType, IBauble}
import com.countrygamer.arcanacraft.common.item.ItemArcanaCraft
import net.minecraft.entity.EntityLivingBase
import net.minecraft.item.ItemStack
/**
*
*
* @author CountryGamer
*/
class ItemPendant(name: String) extends ItemArcanaCraft(name) with IBauble {
// Default Constructor
{
}
// End Constructor
override def getBaubleType(itemstack: ItemStack): BaubleType = {
BaubleType.AMULET
}
override def onWornTick(itemstack: ItemStack, player: EntityLivingBase): Unit = {
}
override def canUnequip(itemstack: ItemStack, player: EntityLivingBase): Boolean = {
true
}
override def onUnequipped(itemstack: ItemStack, player: EntityLivingBase): Unit = {
}
override def onEquipped(itemstack: ItemStack, player: EntityLivingBase): Unit = {
}
override def canEquip(itemstack: ItemStack, player: EntityLivingBase): Boolean = {
true
}
}
|
TheTemportalist/ArcanaCraft
|
src/main/scala/com/countrygamer/arcanacraft/common/item/pendant/ItemPendant.scala
|
Scala
|
apache-2.0
| 960
|
package common.ConfHelper
import common.FileHelper.FileHelper
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
/**
* Created by horatio on 10/28/15.
*/
object ConfigHelper {
var conf: Option[DynConfig] = None
def getConf(): DynConfig = {
conf match {
case None =>
val conf_ = DynConfigFactory.load("./DynConfig/dynamic.conf")
conf = Some(conf_)
conf_
case Some(conf) => conf
}
}
def getMap(path: String, separator: String): Map[String, String] = {
import scala.collection.mutable.{Map => muMap}
val conf = muMap[String, String]()
try {
if (!FileHelper.fileIsExist(path)) println(s"${path} is not found")
else {
val source = Source.fromFile(path)
val lines = source.getLines()
for (line <- lines) {
val l = line.trim
if (l != "" && l.length > 1 && l.charAt(0) != '#') {
val fields = l.split(separator)
if (!conf.contains(fields(0).trim)) conf += (fields(0).trim -> fields(1).trim)
}
}
source.close()
}
} catch {
case ex: Exception =>
println(s"ConfigHelper: getMap: ${ex.getMessage()}")
}
conf.toMap
}
}
object DynConfigFactory {
def load(configFile: String): DynConfig = {
if (!FileHelper.fileIsExist(configFile))
new DynConfig(new ArrayBuffer[String]()) /* null */
else {
val lines = FileHelper.getFileLinesTrim(configFile)
new DynConfig(lines)
}
}
}
class DynConfig(kvs: ArrayBuffer[String]) {
import scala.collection.mutable.{Map => muMap}
val conf = muMap[String, String]()
initConfing(kvs)
def getString(key: String): String = {
if (conf.contains(key)) conf(key).toString
else Unit.toString
}
private def rmQuotes(str: String): String = {
if (str.length > 0) {
if (str.charAt(0) == '\\"' && str.charAt(str.length - 1) == '\\"')
str.substring(1, str.length - 1)
else str
}else str
}
private def initConfing(kvs: ArrayBuffer[String]) {
kvs.foreach { kv =>
if (kv.trim.length > 0 && kv.trim.charAt(0) != '#') { /* remove '\\n' and notation lines */
val fields = kv.trim().split("=", 2)
if (!conf.contains(fields(0).trim())) {
conf += (fields(0).trim -> rmQuotes(fields(1).trim()))
}
}
}
}
}
|
bash-horatio/ESJ
|
app/common/ConfHelper/ConfigHelper.scala
|
Scala
|
apache-2.0
| 2,368
|
/*
* Copyright (c) 2015,
* Ilya Sergey, Christopher Earl, Matthew Might and David Van Horn
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the project "Reachability" nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.ucombinator.scheme.transform
import org.ucombinator.scheme.syntax._
import org.ucombinator.scheme.parsing.RnRSParser
class Preamblifier extends ProgramTransformer {
def apply(prog: Program): Program = {
// Adds the standard preamble.:
// cons, car, cdr, pair?
prog match {
case Program(decs, defs, init) => {
val p = new RnRSParser
var newDecs = decs
var newDefs = defs
var newInit = init
newDecs = TypeDec(SName.from("pair"), StrictStruct(List(SName.from("car"), SName.from("cdr")))) :: newDecs
newDefs = p.parseDef("(define (car p) (struct-get p car pair))") :: newDefs
newDefs = p.parseDef("(define (cdr p) (struct-get p cdr pair))") :: newDefs
newDefs = p.parseDef("(define (cons a b) (make-struct pair a b))") :: newDefs
newDefs = p.parseDef("(define (pair? p) ((type? pair) p))") :: newDefs
Program(newDecs, newDefs, newInit)
}
}
}
}
|
ilyasergey/reachability
|
src/org/ucombinator/scheme/transform/Preamblifier.scala
|
Scala
|
bsd-3-clause
| 2,606
|
package io.finch.request
import com.twitter.finagle.httpx.Request
import com.twitter.io.Buf.ByteArray
import com.twitter.util.{Await, Future, Try}
import org.scalatest.{FlatSpec, Matchers}
import items._
class BodySpec extends FlatSpec with Matchers {
val foo = "foo"
val fooBytes = foo.getBytes("UTF-8")
"A RequiredArrayBody" should "be properly read if it exists" in {
val request: Request = requestWithBody(fooBytes)
val futureResult: Future[Array[Byte]] = binaryBody(request)
Await.result(futureResult) shouldBe fooBytes
}
it should "produce an error if the body is empty" in {
val request: Request = requestWithBody(Array[Byte]())
val futureResult: Future[Array[Byte]] = binaryBody(request)
a [NotPresent] shouldBe thrownBy(Await.result(futureResult))
}
it should "have a corresponding RequestItem" in {
binaryBody.item shouldBe BodyItem
}
"An OptionalArrayBody" should "be properly read if it exists" in {
val request: Request = requestWithBody(fooBytes)
val futureResult: Future[Option[Array[Byte]]] = binaryBodyOption(request)
Await.result(futureResult).get shouldBe fooBytes
}
it should "produce an error if the body is empty" in {
val request: Request = requestWithBody(Array[Byte]())
val futureResult: Future[Option[Array[Byte]]] = binaryBodyOption(request)
Await.result(futureResult) shouldBe None
}
it should "have a corresponding RequestItem" in {
binaryBodyOption.item shouldBe BodyItem
}
"A RequiredStringBody" should "be properly read if it exists" in {
val request: Request = requestWithBody(foo)
val futureResult: Future[String] = body(request)
Await.result(futureResult) shouldBe foo
}
it should "produce an error if the body is empty" in {
val request: Request = requestWithBody("")
val futureResult: Future[String] = body(request)
a [NotPresent] shouldBe thrownBy(Await.result(futureResult))
}
"An OptionalStringBody" should "be properly read if it exists" in {
val request: Request = requestWithBody(foo)
val futureResult: Future[Option[String]] = bodyOption(request)
Await.result(futureResult) shouldBe Some(foo)
}
it should "produce an error if the body is empty" in {
val request: Request = requestWithBody("")
val futureResult: Future[Option[String]] = bodyOption(request)
Await.result(futureResult) shouldBe None
}
"RequiredArrayBody Reader" should "work without parentheses at call site" in {
val reader = for {
body <- binaryBody
} yield body
val request: Request = requestWithBody(fooBytes)
Await.result(reader(request)) shouldBe fooBytes
}
"RequiredBody and OptionalBody" should "work with no request type available" in {
implicit val decodeInt = new DecodeRequest[Int] {
def apply(req: String): Try[Int] = Try(req.toInt)
}
val req = requestWithBody("123")
val ri: RequestReader[Int] = body.as[Int]
val i: Future[Int] = body.as[Int].apply(req)
val oi: RequestReader[Option[Int]] = bodyOption.as[Int]
val o = bodyOption.as[Int].apply(req)
Await.result(ri(req)) shouldBe 123
Await.result(i) shouldBe 123
Await.result(oi(req)) shouldBe Some(123)
Await.result(o) shouldBe Some(123)
}
it should "work with custom request and its implicit view to Request" in {
implicit val decodeDouble = new DecodeRequest[Double] { // custom encoder
def apply(req: String): Try[Double] = Try(req.toDouble)
}
case class CReq(http: Request) // custom request
implicit val cReqEv = (req: CReq) => req.http // implicit view
val req = CReq(requestWithBody("42.0"))
val rd: RequestReader[Double] = body.as[Double]
val d = body.as[Double].apply(req)
val od: RequestReader[Option[Double]] = bodyOption.as[Double]
val o: Future[Option[Double]] = bodyOption.as[Double].apply(req)
Await.result(rd(req)) shouldBe 42.0
Await.result(d) shouldBe 42.0
Await.result(od(req)) shouldBe Some(42.0)
Await.result(o) shouldBe Some(42.0)
}
it should "fail if the decoding of the body fails" in {
implicit val decodeInt = new DecodeRequest[Int] {
def apply(req: String): Try[Int] = Try(req.toInt)
}
val req = requestWithBody("foo")
val ri: RequestReader[Int] = body.as[Int]
val i: Future[Int] = body.as[Int].apply(req)
val oi: RequestReader[Option[Int]] = bodyOption.as[Int]
val o: Future[Option[Int]] = bodyOption.as[Int].apply(req)
a [NotParsed] shouldBe thrownBy(Await.result(ri(req)))
a [NotParsed] shouldBe thrownBy(Await.result(i))
a [NotParsed] shouldBe thrownBy(Await.result(oi(req)))
a [NotParsed] shouldBe thrownBy(Await.result(o))
}
private[this] def requestWithBody(body: String): Request = {
requestWithBody(body.getBytes("UTF-8"))
}
private[this] def requestWithBody(body: Array[Byte]): Request = {
val r = Request()
r.content = ByteArray.Owned(body)
r.contentLength = body.length.toLong
r
}
}
|
peel/finch
|
core/src/test/scala/io/finch/request/BodySpec.scala
|
Scala
|
apache-2.0
| 4,993
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.planner.plan.optimize.RelNodeBlockPlanBuilder
import org.apache.flink.table.planner.runtime.utils.JavaUserDefinedScalarFunctions.NonDeterministicUdf
import org.apache.flink.table.planner.utils.{TableFunc1, TableTestBase}
import org.apache.flink.table.types.logical.{BigIntType, IntType, VarCharType}
import org.junit.Test
class DagOptimizationTest extends TableTestBase {
private val util = streamTestUtil()
util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
util.addTableSource[(Int, Long, String)]("MyTable1", 'd, 'e, 'f)
val STRING = new VarCharType(VarCharType.MAX_LENGTH)
val LONG = new BigIntType()
val INT = new IntType()
@Test
def testSingleSink1(): Unit = {
val table = util.tableEnv.sqlQuery("SELECT c, COUNT(a) AS cnt FROM MyTable GROUP BY c")
val retractSink = util.createRetractTableSink(Array("c", "cnt"), Array(STRING, LONG))
util.verifyPlanInsert(table, retractSink, "retractSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testSingleSink2(): Unit = {
val table1 = util.tableEnv.sqlQuery("SELECT a as a1, b FROM MyTable WHERE a <= 10")
util.tableEnv.registerTable("table1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE a >= 0")
util.tableEnv.registerTable("table2", table2)
val table3 = util.tableEnv.sqlQuery("SELECT a AS a2, c FROM table2 WHERE b >= 5")
util.tableEnv.registerTable("table3", table3)
val table4 = util.tableEnv.sqlQuery("SELECT a AS a3, c as c1 FROM table2 WHERE b < 5")
util.tableEnv.registerTable("table4", table4)
val table5 = util.tableEnv.sqlQuery("SELECT a1, b, c as c2 FROM table1, table3 WHERE a1 = a2")
util.tableEnv.registerTable("table5", table5)
val table6 = util.tableEnv.sqlQuery("SELECT a1, b, c1 FROM table4, table5 WHERE a1 = a3")
val appendSink = util.createAppendTableSink(Array("a1", "b", "c1"), Array(INT, LONG, STRING))
util.verifyPlanInsert(table6, appendSink, "appendSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testSingleSink3(): Unit = {
util.addDataStream[(Int, Long, String, Double, Boolean)]("MyTable2", 'a, 'b, 'c, 'd, 'e)
val table1 = util.tableEnv.sqlQuery("SELECT a AS a1, b as b1 FROM MyTable WHERE a <= 10")
util.tableEnv.registerTable("table1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT a, b1 FROM table1, MyTable2 WHERE a = a1")
util.tableEnv.registerTable("table2", table2)
val table3 = util.tableEnv.sqlQuery("SELECT * FROM table1 UNION ALL SELECT * FROM table2")
val appendSink = util.createAppendTableSink(Array("a1", "b1"), Array(INT, LONG))
util.verifyPlanInsert(table3, appendSink, "appendSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testSingleSink4(): Unit = {
val table1 = util.tableEnv.sqlQuery("SELECT a as a1, b FROM MyTable WHERE a <= 10")
util.tableEnv.registerTable("table1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE a >= 0")
util.tableEnv.registerTable("table2", table2)
val table3 = util.tableEnv.sqlQuery("SELECT a AS a2, c FROM table2 WHERE b >= 5")
util.tableEnv.registerTable("table3", table3)
val table4 = util.tableEnv.sqlQuery("SELECT a AS a3, c AS c1 FROM table2 WHERE b < 5")
util.tableEnv.registerTable("table4", table4)
val table5 = util.tableEnv.sqlQuery("SELECT a1, b, c AS c2 from table1, table3 WHERE a1 = a2")
util.tableEnv.registerTable("table5", table5)
val table6 = util.tableEnv.sqlQuery("SELECT a3, b as b1, c1 FROM table4, table5 WHERE a1 = a3")
util.tableEnv.registerTable("table6", table6)
val table7 = util.tableEnv.sqlQuery("SELECT a1, b1, c1 FROM table1, table6 WHERE a1 = a3")
val appendSink = util.createAppendTableSink(Array("a", "b", "c"), Array(INT, LONG, STRING))
util.verifyPlanInsert(table7, appendSink, "appendSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testSingleSinkWithUDTF(): Unit = {
util.addTableSource[(Int, Long, Int, String, Long)]("MyTable2", 'i, 'j, 'k, 'l, 'm)
util.addFunction("split", new TableFunc1)
val sqlQuery =
"""
|select * from
| (SELECT * FROM MyTable, MyTable1, MyTable2 WHERE b = e AND a = i) t,
| LATERAL TABLE(split(c)) as T(s)
""".stripMargin
val table = util.tableEnv.sqlQuery(sqlQuery)
val appendSink = util.createAppendTableSink(
Array("a", "b", "c", "d", "e", "f", "i", "j", "k", "l", "m", "s"),
Array(INT, LONG, STRING, INT, LONG, STRING, INT, LONG, INT, STRING, LONG, STRING))
util.verifyPlanInsert(table, appendSink, "appendSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testSingleSinkSplitOnUnion(): Unit = {
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, false)
val sqlQuery = "SELECT SUM(a) AS total_sum FROM " +
"(SELECT a, c FROM MyTable UNION ALL SELECT d, f FROM MyTable1)"
val table = util.tableEnv.sqlQuery(sqlQuery)
val retractSink = util.createRetractTableSink(Array("total_sum"), Array(INT))
util.verifyPlanInsert(table, retractSink, "retractSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinks1(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_REUSE_OPTIMIZE_BLOCK_WITH_DIGEST_ENABLED, true)
val table1 = util.tableEnv.sqlQuery("SELECT SUM(a) AS sum_a, c FROM MyTable GROUP BY c")
util.tableEnv.registerTable("table1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT SUM(sum_a) AS total_sum FROM table1")
val table3 = util.tableEnv.sqlQuery("SELECT MIN(sum_a) AS total_min FROM table1")
val retractSink1 = util.createRetractTableSink(Array("total_sum"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink1", retractSink1)
stmtSet.addInsert("retractSink1", table2)
val retractSink2 = util.createRetractTableSink(Array("total_min"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink2", retractSink2)
stmtSet.addInsert("retractSink2", table3)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinks2(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, true)
util.addTableSource[(Int, Long, String, Double, Boolean)]("MyTable2", 'a, 'b, 'c, 'd, 'e)
val table1 = util.tableEnv.sqlQuery("SELECT a as a1, b as b1 FROM MyTable WHERE a <= 10")
util.tableEnv.registerTable("table1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT a, b1 from table1, MyTable2 where a = a1")
util.tableEnv.registerTable("table2", table2)
val table3 = util.tableEnv.sqlQuery("SELECT * FROM table1 UNION ALL SELECT * FROM table2")
val appendSink1 = util.createAppendTableSink(Array("a", "b1"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink1", appendSink1)
stmtSet.addInsert("appendSink1", table3)
val appendSink2 = util.createAppendTableSink(Array("a", "b1"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink2", appendSink2)
stmtSet.addInsert("appendSink2", table3)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinks3(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, true)
util.addTableSource[(Int, Long, String, Double, Boolean)]("MyTable2", 'a, 'b, 'c, 'd, 'e)
val table1 = util.tableEnv.sqlQuery("SELECT a AS a1, b AS b1 FROM MyTable WHERE a <= 10")
util.tableEnv.registerTable("table1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT a, b1 FROM table1, MyTable2 WHERE a = a1")
util.tableEnv.registerTable("table2", table2)
val table3 = util.tableEnv.sqlQuery("SELECT * FROM table1 UNION ALL SELECT * FROM table2")
val appendSink1 = util.createAppendTableSink(Array("a", "b1"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink1", appendSink1)
stmtSet.addInsert("appendSink1", table2)
val appendSink2 = util.createAppendTableSink(Array("a", "b1"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink2", appendSink2)
stmtSet.addInsert("appendSink2", table3)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinks4(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
val table1 = util.tableEnv.sqlQuery("SELECT a as a1, b FROM MyTable WHERE a <= 10")
util.tableEnv.registerTable("table1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE a >= 0")
util.tableEnv.registerTable("table2", table2)
val table3 = util.tableEnv.sqlQuery("SELECT a as a2, c FROM table2 WHERE b >= 5")
util.tableEnv.registerTable("table3", table3)
val table4 = util.tableEnv.sqlQuery("SELECT a as a3, c as c1 FROM table2 WHERE b < 5")
util.tableEnv.registerTable("table4", table4)
val table5 = util.tableEnv.sqlQuery("SELECT a1, b, c as c2 FROM table1, table3 WHERE a1 = a2")
util.tableEnv.registerTable("table5", table5)
val table6 = util.tableEnv.sqlQuery("SELECT a1, b, c1 FROM table4, table5 WHERE a1 = a3")
val appendSink1 = util.createAppendTableSink(Array("a1", "b", "c2"), Array(INT, LONG, STRING))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink1", appendSink1)
stmtSet.addInsert("appendSink1", table5)
val appendSink2 = util.createAppendTableSink(Array("a1", "b", "c1"), Array(INT, LONG, STRING))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink2", appendSink2)
stmtSet.addInsert("appendSink2", table6)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinks5(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_REUSE_OPTIMIZE_BLOCK_WITH_DIGEST_ENABLED, true)
// test with non-deterministic udf
util.tableEnv.registerFunction("random_udf", new NonDeterministicUdf())
val table1 = util.tableEnv.sqlQuery("SELECT random_udf(a) AS a, c FROM MyTable")
util.tableEnv.registerTable("table1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM table1")
val table3 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM table1")
val retractSink1 = util.createRetractTableSink(Array("total_sum"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink1", retractSink1)
stmtSet.addInsert("retractSink1", table2)
val retractSink2 = util.createRetractTableSink(Array("total_min"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink2", retractSink2)
stmtSet.addInsert("retractSink2", table3)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinksWithUDTF(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, false)
util.addFunction("split", new TableFunc1)
val sqlQuery1 =
"""
|SELECT a, b - MOD(b, 300) AS b, c FROM MyTable
|WHERE b >= UNIX_TIMESTAMP('${startTime}')
""".stripMargin
val table1 = util.tableEnv.sqlQuery(sqlQuery1)
util.tableEnv.registerTable("table1", table1)
val sqlQuery2 =
"SELECT a, b, c1 AS c FROM table1, LATERAL TABLE(split(c)) AS T(c1) WHERE c <> '' "
val table2 = util.tableEnv.sqlQuery(sqlQuery2)
util.tableEnv.registerTable("table2", table2)
val sqlQuery3 = "SELECT a, b, COUNT(DISTINCT c) AS total_c FROM table2 GROUP BY a, b"
val table3 = util.tableEnv.sqlQuery(sqlQuery3)
util.tableEnv.registerTable("table3", table3)
val sqlQuery4 = "SELECT a, total_c FROM table3 UNION ALL SELECT a, 0 AS total_c FROM table1"
val table4 = util.tableEnv.sqlQuery(sqlQuery4)
util.tableEnv.registerTable("table4", table4)
val sqlQuery5 = "SELECT * FROM table4 WHERE a > 50"
val table5 = util.tableEnv.sqlQuery(sqlQuery5)
val retractSink1 = util.createRetractTableSink(Array("a", "total_c"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink1", retractSink1)
stmtSet.addInsert("retractSink1", table5)
val sqlQuery6 = "SELECT * FROM table4 WHERE a < 50"
val table6 = util.tableEnv.sqlQuery(sqlQuery6)
val retractSink2 = util.createRetractTableSink(Array("a", "total_c"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink2", retractSink2)
stmtSet.addInsert("retractSink2", table6)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinksSplitOnUnion1(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, false)
val table = util.tableEnv.sqlQuery(
"SELECT a, c FROM MyTable UNION ALL SELECT d, f FROM MyTable1")
util.tableEnv.registerTable("TempTable", table)
val table1 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM TempTable")
val upsertSink = util.createUpsertTableSink(Array(), Array("total_sum"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSinkInternal("upsertSink", upsertSink)
stmtSet.addInsert("upsertSink", table1)
val table3 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM TempTable")
val retractSink = util.createRetractTableSink(Array("total_min"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink", retractSink)
stmtSet.addInsert("retractSink", table3)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinksSplitOnUnion2(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_REUSE_OPTIMIZE_BLOCK_WITH_DIGEST_ENABLED, true)
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, false)
util.addTableSource[(Int, Long, String)]("MyTable2", 'a, 'b, 'c)
val sqlQuery1 =
"""
|SELECT a, c FROM MyTable
|UNION ALL
|SELECT d, f FROM MyTable1
|UNION ALL
|SELECT a, c FROM MyTable2
""".stripMargin
val table = util.tableEnv.sqlQuery(sqlQuery1)
util.tableEnv.registerTable("TempTable", table)
val table1 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM TempTable")
val retractSink1 = util.createRetractTableSink(Array("total_sum"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink1", retractSink1)
stmtSet.addInsert("retractSink1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM TempTable")
val retractSink2 = util.createRetractTableSink(Array("total_min"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink2", retractSink2)
stmtSet.addInsert("retractSink2", table2)
val sqlQuery2 = "SELECT a FROM (SELECT a, c FROM MyTable UNION ALL SELECT d, f FROM MyTable1)"
val table3 = util.tableEnv.sqlQuery(sqlQuery2)
val appendSink3 = util.createAppendTableSink(Array("a"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink3", appendSink3)
stmtSet.addInsert("appendSink3", table3)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinksSplitOnUnion3(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, false)
util.addTableSource[(Int, Long, String)]("MyTable2", 'a, 'b, 'c)
val sqlQuery1 = "SELECT a, c FROM MyTable UNION ALL SELECT d, f FROM MyTable1"
val table = util.tableEnv.sqlQuery(sqlQuery1)
util.tableEnv.registerTable("TempTable", table)
val appendSink = util.createAppendTableSink(Array("a", "c"), Array(INT, STRING))
util.tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSinkInternal("appendSink", appendSink)
stmtSet.addInsert("appendSink", table)
val sqlQuery2 = "SELECT a, c FROM TempTable UNION ALL SELECT a, c FROM MyTable2"
val table1 = util.tableEnv.sqlQuery(sqlQuery2)
util.tableEnv.registerTable("TempTable1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM TempTable1")
val retractSink = util.createRetractTableSink(Array("total_sum"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink", retractSink)
stmtSet.addInsert("retractSink", table2)
val table3 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM TempTable1")
val upsertSink = util.createUpsertTableSink(Array(), Array("total_min"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSinkInternal("upsertSink", upsertSink)
stmtSet.addInsert("upsertSink", table3)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiSinksSplitOnUnion4(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, false)
util.addTableSource[(Int, Long, String)]("MyTable2", 'a, 'b, 'c)
val sqlQuery =
"""
|SELECT a, c FROM MyTable
|UNION ALL
|SELECT d, f FROM MyTable1
|UNION ALL
|SELECT a, c FROM MyTable2
""".stripMargin
val table = util.tableEnv.sqlQuery(sqlQuery)
util.tableEnv.registerTable("TempTable", table)
val table1 = util.tableEnv.sqlQuery("SELECT SUM(a) AS total_sum FROM TempTable")
val upsertSink = util.createUpsertTableSink(Array(), Array("total_sum"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink", upsertSink)
stmtSet.addInsert("upsertSink", table1)
val table2 = util.tableEnv.sqlQuery("SELECT MIN(a) AS total_min FROM TempTable")
val retractSink = util.createRetractTableSink(Array("total_min"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink", retractSink)
stmtSet.addInsert("retractSink", table2)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testUnionAndAggWithDifferentGroupings(): Unit = {
val sqlQuery =
"""
|SELECT b, c, SUM(a) AS a_sum FROM MyTable GROUP BY b, c
|UNION ALL
|SELECT 1 AS b, c, SUM(a) AS a_sum FROM MyTable GROUP BY c
""".stripMargin
val table = util.tableEnv.sqlQuery(sqlQuery)
val upsertSink = util.createUpsertTableSink(Array(), Array("b", "c", "a_sum"),
Array(LONG, STRING, INT))
util.verifyPlanInsert(table, upsertSink, "upsertSink", ExplainDetail.CHANGELOG_MODE)
}
@Test
def testUpdateAsRetractConsumedAtSinkBlock(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
val table = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable")
util.tableEnv.registerTable("TempTable", table)
val sqlQuery =
s"""
|SELECT * FROM (
| SELECT a, b, c,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY c DESC) as rank_num
| FROM TempTable)
|WHERE rank_num <= 10
""".stripMargin
val table1 = util.tableEnv.sqlQuery(sqlQuery)
val retractSink = util.createRetractTableSink(
Array("a", "b", "c", "rank_num"), Array(INT, LONG, STRING, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink", retractSink)
stmtSet.addInsert("retractSink", table1)
val upsertSink = util.createUpsertTableSink(Array(), Array("a", "b"), Array(INT, LONG))
val table2 = util.tableEnv.sqlQuery("SELECT a, b FROM TempTable WHERE a < 6")
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink", upsertSink)
stmtSet.addInsert("upsertSink", table2)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testUpdateAsRetractConsumedAtSourceBlock(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
val sqlQuery =
s"""
|SELECT * FROM (
| SELECT a, b, c,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY c DESC) as rank_num
| FROM MyTable)
|WHERE rank_num <= 10
""".stripMargin
val table = util.tableEnv.sqlQuery(sqlQuery)
util.tableEnv.registerTable("TempTable", table)
val table1 = util.tableEnv.sqlQuery("SELECT a FROM TempTable WHERE a > 6")
val retractSink = util.createRetractTableSink(Array("a"), Array(INT))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink", retractSink)
stmtSet.addInsert("retractSink", table1)
val table2 = util.tableEnv.sqlQuery("SELECT a, b FROM TempTable WHERE a < 6")
val upsertSink = util.createUpsertTableSink(Array(), Array("a", "b"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink", upsertSink)
stmtSet.addInsert("upsertSink", table2)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testMultiLevelViews(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, false)
val table1 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE c LIKE '%hello%'")
util.tableEnv.registerTable("TempTable1", table1)
val appendSink = util.createAppendTableSink(Array("a", "b", "c"), Array(INT, LONG, STRING))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink", appendSink)
stmtSet.addInsert("appendSink", table1)
val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE c LIKE '%world%'")
util.tableEnv.registerTable("TempTable2", table2)
val sqlQuery =
"""
|SELECT b, COUNT(a) AS cnt FROM (
| (SELECT * FROM TempTable1)
| UNION ALL
| (SELECT * FROM TempTable2)
|) t
|GROUP BY b
""".stripMargin
val table3 = util.tableEnv.sqlQuery(sqlQuery)
util.tableEnv.registerTable("TempTable3", table3)
val table4 = util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable3 WHERE b < 4")
val retractSink = util.createRetractTableSink(Array("b", "cnt"), Array(LONG, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink", retractSink)
stmtSet.addInsert("retractSink", table4)
val table5 = util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable3 WHERE b >=4 AND b < 6")
val upsertSink = util.createUpsertTableSink(Array(), Array("b", "cnt"), Array(LONG, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink", upsertSink)
stmtSet.addInsert("upsertSink", table5)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
@Test
def testSharedUnionNode(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
util.tableEnv.getConfig.getConfiguration.setBoolean(
RelNodeBlockPlanBuilder.TABLE_OPTIMIZER_UNIONALL_AS_BREAKPOINT_ENABLED, false)
val table1 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE c LIKE '%hello%'")
util.tableEnv.registerTable("TempTable1", table1)
val appendSink = util.createAppendTableSink(Array("a", "b", "c"), Array(INT, LONG, STRING))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink", appendSink)
stmtSet.addInsert("appendSink", table1)
val table2 = util.tableEnv.sqlQuery("SELECT a, b, c FROM MyTable WHERE c LIKE '%world%'")
util.tableEnv.registerTable("TempTable2", table2)
val sqlQuery1 =
"""
|SELECT * FROM TempTable1
|UNION ALL
|SELECT * FROM TempTable2
""".stripMargin
val table3 = util.tableEnv.sqlQuery(sqlQuery1)
util.tableEnv.registerTable("TempTable3", table3)
val table4 = util.tableEnv.sqlQuery("SELECT * FROM TempTable3 WHERE b >= 5")
val retractSink1 = util.createRetractTableSink(Array("a", "b", "c"), Array(INT, LONG, STRING))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink1", retractSink1)
stmtSet.addInsert("retractSink1", table4)
val table5 = util.tableEnv.sqlQuery("SELECT b, count(a) as cnt FROM TempTable3 GROUP BY b")
util.tableEnv.registerTable("TempTable4", table5)
val table6 = util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable4 WHERE b < 4")
val retractSink2 = util.createRetractTableSink(Array("b", "cnt"), Array(LONG, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"retractSink2", retractSink2)
stmtSet.addInsert("retractSink2", table6)
util.tableEnv.sqlQuery("SELECT b, cnt FROM TempTable4 WHERE b >=4 AND b < 6")
val upsertSink = util.createUpsertTableSink(Array(), Array("b", "cnt"), Array(LONG, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink", upsertSink)
stmtSet.addInsert("upsertSink", table6)
util.verifyPlan(stmtSet, ExplainDetail.CHANGELOG_MODE)
}
}
|
greghogan/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/sql/DagOptimizationTest.scala
|
Scala
|
apache-2.0
| 27,661
|
package fi.onesto.sbt.mobilizer
import org.slf4j.helpers.MessageFormatter
import sbt.Level
import sbt.Level._
import org.slf4j.Marker
final class Slf4jSbtLogger(
private[this] val underlying: sbt.AbstractLogger,
private[this] val name: String,
private[this] val minimumLevel: Level.Value)
extends org.slf4j.helpers.MarkerIgnoringBase {
private[this] val serialVersionUID: Long = 1L
private[this] def adjustLevel(level: Level.Value): Level.Value = {
if (level.id >= minimumLevel.id) {
level
} else {
Level.Debug
}
}
private[this] def fmt(str: String, arg: Any): String = {
val result = MessageFormatter.format(str, arg)
Option(result.getThrowable).map{t => s"${result.getMessage}: $t"}.getOrElse(result.getMessage)
}
private[this] def fmt(str: String, arg1: Any, arg2: Any): String = {
val result = MessageFormatter.format(str, arg1, arg2)
Option(result.getThrowable).map{t => s"${result.getMessage}: $t"}.getOrElse(result.getMessage)
}
private[this] def fmt(str: String, args: Seq[AnyRef]): String = {
val result = MessageFormatter.arrayFormat(str, args.toArray)
Option(result.getThrowable).map{t => s"${result.getMessage}: $t"}.getOrElse(result.getMessage)
}
override def isTraceEnabled: Boolean = underlying.atLevel(Debug)
override def isTraceEnabled(marker: Marker): Boolean = underlying.atLevel(Debug)
override def trace(msg: String): Unit = underlying.log(adjustLevel(Debug), s"$name $msg")
override def trace(msg: String, arg: AnyRef): Unit = underlying.log(adjustLevel(Debug), s"$name ${fmt(msg, arg)}")
override def trace(msg: String, arg1: AnyRef, arg2: AnyRef): Unit = underlying.log(adjustLevel(Debug), s"$name ${fmt(msg, arg1, arg2)}")
override def trace(msg: String, arguments: AnyRef*): Unit = underlying.log(adjustLevel(Debug), s"$name ${fmt(msg, arguments)}")
override def trace(msg: String, t: Throwable): Unit = underlying.log(adjustLevel(Debug), s"$name $msg: ${t.toString}")
override def isDebugEnabled: Boolean = underlying.atLevel(Debug)
override def isDebugEnabled(marker: Marker): Boolean = underlying.atLevel(Debug)
override def debug(msg: String): Unit = underlying.log(adjustLevel(Debug), s"$name $msg")
override def debug(msg: String, arg: AnyRef): Unit = underlying.log(adjustLevel(Debug), s"$name ${fmt(msg, arg)}")
override def debug(msg: String, arg1: AnyRef, arg2: AnyRef): Unit = underlying.log(adjustLevel(Debug), s"$name ${fmt(msg, arg1, arg2)}")
override def debug(msg: String, arguments: AnyRef*): Unit = underlying.log(adjustLevel(Debug), s"$name ${fmt(msg, arguments)}")
override def debug(msg: String, t: Throwable): Unit = underlying.log(adjustLevel(Debug), s"$msg: ${t.toString}")
override def isInfoEnabled: Boolean = underlying.atLevel(Info)
override def isInfoEnabled(marker: Marker): Boolean = underlying.atLevel(Info)
override def info(msg: String): Unit = underlying.log(adjustLevel(Info), s"$name $msg")
override def info(msg: String, arg: AnyRef): Unit = underlying.log(adjustLevel(Info), s"$name ${fmt(msg, arg)}")
override def info(msg: String, arg1: AnyRef, arg2: AnyRef): Unit = underlying.log(adjustLevel(Info), s"$name ${fmt(msg, arg1, arg2)}")
override def info(msg: String, arguments: AnyRef*): Unit = underlying.log(adjustLevel(Info), s"$name ${fmt(msg, arguments)}")
override def info(msg: String, t: Throwable): Unit = underlying.log(adjustLevel(Info), s"$msg: ${t.toString}")
override def isWarnEnabled: Boolean = underlying.atLevel(Warn)
override def isWarnEnabled(marker: Marker): Boolean = underlying.atLevel(Warn)
override def warn(msg: String): Unit = underlying.log(adjustLevel(Warn), s"$name $msg")
override def warn(msg: String, arg: AnyRef): Unit = underlying.log(adjustLevel(Warn), s"$name ${fmt(msg, arg)}")
override def warn(msg: String, arg1: AnyRef, arg2: AnyRef): Unit = underlying.log(adjustLevel(Warn), s"$name ${fmt(msg, arg1, arg2)}")
override def warn(msg: String, arguments: AnyRef*): Unit = underlying.log(adjustLevel(Warn), s"$name ${fmt(msg, arguments)}")
override def warn(msg: String, t: Throwable): Unit = underlying.log(adjustLevel(Warn), s"$msg: ${t.toString}")
override def isErrorEnabled: Boolean = underlying.atLevel(Error)
override def isErrorEnabled(marker: Marker): Boolean = underlying.atLevel(Error)
override def error(msg: String): Unit = underlying.log(adjustLevel(Error), s"$name $msg")
override def error(msg: String, arg: AnyRef): Unit = underlying.log(adjustLevel(Error), s"$name ${fmt(msg, arg)}")
override def error(msg: String, arg1: AnyRef, arg2: AnyRef): Unit = underlying.log(adjustLevel(Error), s"$name ${fmt(msg, arg1, arg2)}")
override def error(msg: String, arguments: AnyRef*): Unit = underlying.log(adjustLevel(Error), s"$name ${fmt(msg, arguments)}")
override def error(msg: String, t: Throwable): Unit = underlying.log(adjustLevel(Error), s"$msg: ${t.toString}")
}
|
onesto/sbt-mobilizer
|
src/main/scala/fi/onesto/sbt/mobilizer/Slf4jSbtLogger.scala
|
Scala
|
mit
| 5,368
|
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.gatling.core.Predef._
class Assertions extends Simulation {
val scn = scenario("foo").inject(atOnceUsers(1))
//#setUp
setUp(scn).assertions(
global.responseTime.max.lessThan(50),
global.successfulRequests.percent.greaterThan(95)
)
//#setUp
//#details
details("Search" / "Index")
//#details
//#examples
// Assert that the max response time of all requests is less than 100 ms
setUp(scn).assertions(global.responseTime.max.lessThan(100))
// Assert that every request has no more than 5% of failing requests
setUp(scn).assertions(forAll.failedRequests.percent.lessThan(5))
// Assert that the percentage of failed requests named "Index" in the group "Search"
// is exactly 0 %
setUp(scn).assertions(details("Search" / "Index").failedRequests.percent.is(0))
// Assert that the rate of requests per seconds for the group "Search"
setUp(scn).assertions(details("Search").requestsPerSec.between(100, 1000))
//#examples
}
|
GabrielPlassard/gatling
|
src/sphinx/general/code/Assertions.scala
|
Scala
|
apache-2.0
| 1,595
|
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark.jts
import org.apache.spark.sql.SQLContext
package object udf {
/** Registration delegation function, called by `initJTS`.
* New UDF sets should be added here. */
private[jts] def registerFunctions(sqlContext: SQLContext): Unit = {
GeometricAccessorFunctions.registerFunctions(sqlContext)
GeometricCastFunctions.registerFunctions(sqlContext)
GeometricConstructorFunctions.registerFunctions(sqlContext)
GeometricOutputFunctions.registerFunctions(sqlContext)
GeometricProcessingFunctions.registerFunctions(sqlContext)
SpatialRelationFunctions.registerFunctions(sqlContext)
}
}
|
aheyne/geomesa
|
geomesa-spark/geomesa-spark-jts/src/main/scala/org/locationtech/geomesa/spark/jts/udf/package.scala
|
Scala
|
apache-2.0
| 1,114
|
package cosbench_ng
import com.typesafe.config.ConfigFactory
import akka.actor.{ ActorSystem, PoisonPill }
import akka.event.Logging.DebugLevel
import akka.serialization.SerializerWithStringManifest
import MyProtoBufMsg._
//Cluster imports
import akka.cluster.{ Cluster, ClusterEvent }
import akka.cluster.ClusterEvent._
import akka.cluster.singleton._
// log4j
import org.slf4j.{LoggerFactory}
import ch.qos.logback.classic.Level
object MyConfig {
val config = ConfigFactory.load().getConfig("Cosbench_ng.common")
var cl: Option[Config] = None // parsed command line
var rawCl: Option[Array[String]] = None // raw cmd line
// internal config
val maxThreads : Int = config.getInt("maxThreads")
}
case class Config(
bucketName : String = "Vishnu_test",
cmd : String = "PUT", // "PUT" or "GET" - action to execute
testTag : String = "NOTAG",
opsRate : Int = 200, // target ops per second
maxOps : Long = 5000, // total ops
objSize : Long = 1, // Obj size in KB
rangeReadStart : Long = -1, // range read start. -1 = no range read
rangeReadEnd : Long = -1, // range read end
endpoint : String = "https://s3.amazonaws.com",
region : String = "us-east-1",
aidSkey : (String, String) = ("aid", "skey"),
fakeS3Latency : Long = -1, // fake s3 latency
runToCompletion : Boolean = false, // don't exit, but wait for everything to complete
minSlaves : Long = 0, // minimum slaves to wait before we start work
debug : Int = 0
)
class ConfigMsg (c: Config) extends java.io.Serializable { val config = c }
// Replaced with Protobuf messages
//object MyCmd { def apply(s: Int, e: Int) = new MyProtoBufMsg.MyCmd(s,e) }
//class MyCmd(val start : Int = 0, val end : Int = 99) extends java.io.Serializable {}
object MyCmdIter { def apply(i: Int, inc: Int) = new MyCmdIter(i,inc) }
class MyCmdIter(val start: Int, val inc: Int) extends Iterator[MyCmd] {
var index : Option[MyCmd] = None
def hasNext = true
def next = {
val nI = index.getOrElse(MyCmd(start,start+inc))
index = Some(MyCmd(nI.start+inc+1, nI.end+inc+1))
nI
}
}
//case class StatList (sl : List[Stats])
//case class FinalStatList (sl : List[FinalStat])
class Stats() extends java.io.Serializable
class GoodStat (val rspStarted: Double, val rspComplete: Double) extends Stats //status = failed_op or successfull_op
class BadStat() extends Stats
object GoodStat { def apply(rs: Double, rc: Double) = new GoodStat(rs,rc) }
object BadStat { def apply() = new BadStat() }
object FinalStat { def apply( r: Int, s: Int, t: Int) = new FinalStat(r,s,t) }
class FinalStat(val opsStartedNCompleted: Int, val opsCompletedStatsNSent : Int, val opsNStarted : Int) extends Stats
/* MyCmd Serializer */
class MyCmdSerializer extends SerializerWithStringManifest {
val identifier = 11065789
override def manifest(o: AnyRef): String = o.getClass.getName
final val MyCmdManifest = "MyProtoBufMsg.MyCmd"
final val StatListMsgManifest = "MyProtoBufMsg.StatListMsg"
final val GoodStatMsgManifest = "MyProtoBufMsg.GoodStatMsg"
final val BadStatMsgManifest = "MyProtoBufMsg.BadStatMsg"
final val SlaveStatusMsgManifest = "MyProtoBufMsg.SlaveStatusMsg"
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match {
case MyCmdManifest => MyCmd.parseFrom(bytes)
case GoodStatMsgManifest => GoodStatMsg.parseFrom(bytes)
case BadStatMsgManifest => BadStatMsg.parseFrom(bytes)
case StatListMsgManifest => StatListMsg.parseFrom(bytes)
case SlaveStatusMsgManifest => SlaveStatusMsg.parseFrom(bytes)
}
override def toBinary(o: AnyRef): Array[Byte] = o match {
case x: MyCmd => x.toByteArray
case x: GoodStatMsg => x.toByteArray
case x: BadStatMsg => x.toByteArray
case x: StatListMsg => x.toByteArray
case x: SlaveStatusMsg => x.toByteArray
}
}
|
vardhanv/cosbench_ng
|
common/src/main/scala/Config.scala
|
Scala
|
mit
| 4,082
|
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc.request
import javax.inject.Inject
import play.api.http.{ HttpConfiguration, SecretConfiguration }
import play.api.libs.crypto.CookieSignerProvider
import play.api.libs.typedmap.TypedMap
import play.api.mvc._
import play.core.system.RequestIdProvider
/**
* A `RequestFactory` provides logic for creating requests.
*/
trait RequestFactory {
/**
* Create a `RequestHeader`.
*/
def createRequestHeader(
connection: RemoteConnection,
method: String,
target: RequestTarget,
version: String,
headers: Headers,
attrs: TypedMap): RequestHeader
/**
* Creates a `RequestHeader` based on the values of an
* existing `RequestHeader`. The factory may modify the copied
* values to produce a modified `RequestHeader`.
*/
def copyRequestHeader(rh: RequestHeader): RequestHeader = {
createRequestHeader(rh.connection, rh.method, rh.target, rh.version, rh.headers, rh.attrs)
}
/**
* Create a `Request` with a body. By default this just calls
* `createRequestHeader(...).withBody(body)`.
*/
def createRequest[A](
connection: RemoteConnection,
method: String,
target: RequestTarget,
version: String,
headers: Headers,
attrs: TypedMap,
body: A): Request[A] =
createRequestHeader(connection, method, target, version, headers, attrs).withBody(body)
/**
* Creates a `Request` based on the values of an
* existing `Request`. The factory may modify the copied
* values to produce a modified `Request`.
*/
def copyRequest[A](r: Request[A]): Request[A] = {
createRequest[A](r.connection, r.method, r.target, r.version, r.headers, r.attrs, r.body)
}
}
object RequestFactory {
/**
* A `RequestFactory` that creates a request with the arguments given, without
* any additional modification.
*/
val plain = new RequestFactory {
override def createRequestHeader(
connection: RemoteConnection,
method: String,
target: RequestTarget,
version: String,
headers: Headers,
attrs: TypedMap): RequestHeader =
new RequestHeaderImpl(connection, method, target, version, headers, attrs)
}
}
/**
* The default [[RequestFactory]] used by a Play application. This
* `RequestFactory` adds the following typed attributes to requests:
* - request id
* - cookie
* - session cookie
* - flash cookie
*/
class DefaultRequestFactory @Inject() (
cookieHeaderEncoding: CookieHeaderEncoding,
sessionBaker: SessionCookieBaker,
flashBaker: FlashCookieBaker) extends RequestFactory {
def this(config: HttpConfiguration) = this(
new DefaultCookieHeaderEncoding(config.cookies),
new DefaultSessionCookieBaker(config.session, config.secret, new CookieSignerProvider(config.secret).get),
new DefaultFlashCookieBaker(config.flash, config.secret, new CookieSignerProvider(config.secret).get)
)
override def createRequestHeader(
connection: RemoteConnection,
method: String,
target: RequestTarget,
version: String,
headers: Headers,
attrs: TypedMap): RequestHeader = {
val requestId: Long = RequestIdProvider.freshId()
val cookieCell = new LazyCell[Cookies] {
override protected def emptyMarker: Cookies = null
override protected def create: Cookies =
cookieHeaderEncoding.fromCookieHeader(headers.get(play.api.http.HeaderNames.COOKIE))
}
val sessionCell = new LazyCell[Session] {
override protected def emptyMarker: Session = null
override protected def create: Session = sessionBaker.decodeFromCookie(cookieCell.value.get(sessionBaker.COOKIE_NAME))
}
val flashCell = new LazyCell[Flash] {
override protected def emptyMarker: Flash = null
override protected def create: Flash = flashBaker.decodeFromCookie(cookieCell.value.get(flashBaker.COOKIE_NAME))
}
val updatedAttrMap = attrs + (
RequestAttrKey.Id -> requestId,
RequestAttrKey.Cookies -> cookieCell,
RequestAttrKey.Session -> sessionCell,
RequestAttrKey.Flash -> flashCell
)
new RequestHeaderImpl(connection, method, target, version, headers, updatedAttrMap)
}
}
|
ktoso/playframework
|
framework/src/play/src/main/scala/play/api/mvc/request/RequestFactory.scala
|
Scala
|
apache-2.0
| 4,219
|
package im.actor.server.dialog
import akka.actor.ActorSystem
import akka.http.scaladsl.util.FastFuture
import im.actor.api.rpc.PeersImplicits
import im.actor.api.rpc.counters.{ ApiAppCounters, UpdateCountersChanged }
import im.actor.api.rpc.messaging._
import im.actor.server.db.DbExtension
import im.actor.server.messaging.PushText
import im.actor.server.model.Peer
import im.actor.server.sequence.{ PushData, PushRules, SeqState, SeqUpdatesExtension }
import im.actor.server.user.UserExtension
import scala.concurrent.{ ExecutionContext, Future }
//default extension
final class ActorDelivery()(implicit val system: ActorSystem)
extends DeliveryExtension
with PushText
with PeersImplicits {
implicit val ec: ExecutionContext = system.dispatcher
implicit val seqUpdatesExt: SeqUpdatesExtension = SeqUpdatesExtension(system)
private val userExt = UserExtension(system)
private val dialogExt = DialogExtension(system)
override def receiverDelivery(
receiverUserId: Int,
senderUserId: Int,
peer: Peer,
randomId: Long,
timestamp: Long,
message: ApiMessage,
isFat: Boolean
): Future[Unit] = {
val receiverUpdate = UpdateMessage(
peer = peer.asStruct,
senderUserId = senderUserId,
date = timestamp,
randomId = randomId,
message = message,
attributes = None,
quotedMessage = None
)
for {
senderName ← userExt.getName(senderUserId, receiverUserId)
(pushText, censoredPushText) ← getPushText(peer, receiverUserId, senderName, message)
_ ← seqUpdatesExt.deliverSingleUpdate(
receiverUserId,
receiverUpdate,
PushRules(isFat = isFat).withData(
PushData()
.withText(pushText)
.withCensoredText(censoredPushText)
.withPeer(peer)
),
deliveryId = s"msg_${peer.toString}_$randomId"
)
} yield ()
}
override def sendCountersUpdate(userId: Int): Future[Unit] =
for {
counter ← dialogExt.getUnreadTotal(userId)
_ ← sendCountersUpdate(userId, counter)
} yield ()
override def sendCountersUpdate(userId: Int, counter: Int): Future[Unit] = {
val counterUpdate = UpdateCountersChanged(ApiAppCounters(Some(counter)))
seqUpdatesExt.deliverSingleUpdate(userId, counterUpdate, reduceKey = Some("counters_changed")) map (_ ⇒ ())
}
override def senderDelivery(
senderUserId: Int,
senderAuthSid: Int,
peer: Peer,
randomId: Long,
timestamp: Long,
message: ApiMessage,
isFat: Boolean
): Future[SeqState] = {
val apiPeer = peer.asStruct
val senderUpdate = UpdateMessage(
peer = apiPeer,
senderUserId = senderUserId,
date = timestamp,
randomId = randomId,
message = message,
attributes = None,
quotedMessage = None
)
val senderClientUpdate = UpdateMessageSent(apiPeer, randomId, timestamp)
seqUpdatesExt.deliverMappedUpdate(
userId = senderUserId,
default = Some(senderUpdate),
custom = Map(senderAuthSid → senderClientUpdate),
pushRules = PushRules(isFat = isFat, excludeAuthSids = Seq(senderAuthSid)),
deliveryId = s"msg_${peer.toString}_$randomId"
)
}
override def notifyReceive(userId: Int, peer: Peer, date: Long, now: Long): Future[Unit] = {
val update = UpdateMessageReceived(peer.asStruct, date, now)
userExt.broadcastUserUpdate(
userId,
update,
pushText = None,
isFat = false,
reduceKey = Some(s"receive_${peer.toString}"),
deliveryId = None
) map (_ ⇒ ())
}
override def notifyRead(userId: Int, peer: Peer, date: Long, now: Long): Future[Unit] = {
val update = UpdateMessageRead(peer.asStruct, date, now)
seqUpdatesExt.deliverSingleUpdate(
userId = userId,
update = update,
reduceKey = Some(s"read_${peer.toString}")
) map (_ ⇒ ())
}
override def read(readerUserId: Int, readerAuthSid: Int, peer: Peer, date: Long, unreadCount: Int): Future[Unit] =
for {
_ ← seqUpdatesExt.deliverSingleUpdate(
userId = readerUserId,
update = UpdateMessageReadByMe(peer.asStruct, date, Some(unreadCount)),
reduceKey = Some(s"read_by_me_${peer.toString}")
)
} yield ()
}
|
ljshj/actor-platform
|
actor-server/actor-core/src/main/scala/im/actor/server/dialog/ActorDelivery.scala
|
Scala
|
mit
| 4,347
|
/*
Copyright 2012 Georgia Tech Research Institute
Author: lance.gatlin@gtri.gatech.edu
This file is part of org.gtri.util.xmlbuilder library.
org.gtri.util.xmlbuilder library is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
org.gtri.util.xmlbuilder library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with org.gtri.util.xmlbuilder library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.gtri.util.xmlbuilder.impl.events
import org.gtri.util.issue.api.DiagnosticLocator
import org.gtri.util.xmlbuilder.api.{XmlContract, XmlEvent}
import org.gtri.util.scala.exelog.noop._
object AddXmlTextEvent {
implicit val thisclass = classOf[AddXmlTextEvent]
implicit val log = Logger.getLog(thisclass)
}
case class AddXmlTextEvent(text : String, locator : DiagnosticLocator) extends XmlEvent {
import AddXmlTextEvent._
def pushTo(contract: XmlContract) {
log.block("pushTo", Seq("contract" -> contract)) {
+"Pushing AddXmlTextEvent to XmlContract"
~s"contract.addXmlText($text)"
contract.addXmlText(text)
}
}
}
|
gtri-iead/org.gtri.util.xmlbuilder
|
impl/src/main/scala/org/gtri/util/xmlbuilder/impl/events/AddXmlTextEvent.scala
|
Scala
|
gpl-3.0
| 1,532
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.